1 /*- 2 * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB 3 * 4 * Copyright (c) 2015 - 2022 Intel Corporation 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenFabrics.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 /*$FreeBSD$*/ 35 36 #include "osdep.h" 37 #include "irdma_hmc.h" 38 #include "irdma_defs.h" 39 #include "irdma_type.h" 40 #include "irdma_ws.h" 41 #include "irdma_protos.h" 42 43 /** 44 * irdma_qp_from_entry - Given entry, get to the qp structure 45 * @entry: Points to list of qp structure 46 */ 47 static struct irdma_sc_qp * 48 irdma_qp_from_entry(struct list_head *entry) 49 { 50 if (!entry) 51 return NULL; 52 53 return (struct irdma_sc_qp *)((char *)entry - 54 offsetof(struct irdma_sc_qp, list)); 55 } 56 57 /** 58 * irdma_get_qp_from_list - get next qp from a list 59 * @head: Listhead of qp's 60 * @qp: current qp 61 */ 62 struct irdma_sc_qp * 63 irdma_get_qp_from_list(struct list_head *head, 64 struct irdma_sc_qp *qp) 65 { 66 struct list_head *lastentry; 67 struct list_head *entry = NULL; 68 69 if (list_empty(head)) 70 return NULL; 71 72 if (!qp) { 73 entry = (head)->next; 74 } else { 75 lastentry = &qp->list; 76 entry = (lastentry)->next; 77 if (entry == head) 78 return NULL; 79 } 80 81 return irdma_qp_from_entry(entry); 82 } 83 84 /** 85 * irdma_sc_suspend_resume_qps - suspend/resume all qp's on VSI 86 * @vsi: the VSI struct pointer 87 * @op: Set to IRDMA_OP_RESUME or IRDMA_OP_SUSPEND 88 */ 89 void 90 irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 op) 91 { 92 struct irdma_sc_qp *qp = NULL; 93 u8 i; 94 95 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { 96 mutex_lock(&vsi->qos[i].qos_mutex); 97 qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp); 98 while (qp) { 99 if (op == IRDMA_OP_RESUME) { 100 if (!qp->dev->ws_add(vsi, i)) { 101 qp->qs_handle = 102 vsi->qos[qp->user_pri].qs_handle; 103 irdma_cqp_qp_suspend_resume(qp, op); 104 } else { 105 irdma_cqp_qp_suspend_resume(qp, op); 106 irdma_modify_qp_to_err(qp); 107 } 108 } else if (op == IRDMA_OP_SUSPEND) { 109 /* issue cqp suspend command */ 110 if (!irdma_cqp_qp_suspend_resume(qp, op)) 111 atomic_inc(&vsi->qp_suspend_reqs); 112 } 113 qp = irdma_get_qp_from_list(&vsi->qos[i].qplist, qp); 114 } 115 mutex_unlock(&vsi->qos[i].qos_mutex); 116 } 117 } 118 119 static void 120 irdma_set_qos_info(struct irdma_sc_vsi *vsi, struct irdma_l2params *l2p) 121 { 122 u8 i; 123 124 vsi->qos_rel_bw = l2p->vsi_rel_bw; 125 vsi->qos_prio_type = l2p->vsi_prio_type; 126 vsi->dscp_mode = l2p->dscp_mode; 127 if (l2p->dscp_mode) { 128 irdma_memcpy(vsi->dscp_map, l2p->dscp_map, sizeof(vsi->dscp_map)); 129 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) 130 l2p->up2tc[i] = i; 131 } 132 for (i = 0; i < IRDMA_MAX_TRAFFIC_CLASS; i++) 133 vsi->tc_print_warning[i] = true; 134 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { 135 if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 136 vsi->qos[i].qs_handle = l2p->qs_handle_list[i]; 137 if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_2) 138 irdma_init_config_check(&vsi->cfg_check[i], 139 l2p->up2tc[i], 140 l2p->qs_handle_list[i]); 141 vsi->qos[i].traffic_class = l2p->up2tc[i]; 142 vsi->qos[i].rel_bw = 143 l2p->tc_info[vsi->qos[i].traffic_class].rel_bw; 144 vsi->qos[i].prio_type = 145 l2p->tc_info[vsi->qos[i].traffic_class].prio_type; 146 vsi->qos[i].valid = false; 147 } 148 } 149 150 /** 151 * irdma_change_l2params - given the new l2 parameters, change all qp 152 * @vsi: RDMA VSI pointer 153 * @l2params: New parameters from l2 154 */ 155 void 156 irdma_change_l2params(struct irdma_sc_vsi *vsi, 157 struct irdma_l2params *l2params) 158 { 159 if (l2params->mtu_changed) { 160 vsi->mtu = l2params->mtu; 161 if (vsi->ieq) 162 irdma_reinitialize_ieq(vsi); 163 } 164 165 if (!l2params->tc_changed) 166 return; 167 168 vsi->tc_change_pending = false; 169 irdma_set_qos_info(vsi, l2params); 170 irdma_sc_suspend_resume_qps(vsi, IRDMA_OP_RESUME); 171 } 172 173 /** 174 * irdma_qp_rem_qos - remove qp from qos lists during destroy qp 175 * @qp: qp to be removed from qos 176 */ 177 void 178 irdma_qp_rem_qos(struct irdma_sc_qp *qp) 179 { 180 struct irdma_sc_vsi *vsi = qp->vsi; 181 182 irdma_debug(qp->dev, IRDMA_DEBUG_DCB, 183 "DCB: Remove qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n", 184 qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, qp->on_qoslist); 185 mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); 186 if (qp->on_qoslist) { 187 qp->on_qoslist = false; 188 list_del(&qp->list); 189 } 190 mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); 191 } 192 193 /** 194 * irdma_qp_add_qos - called during setctx for qp to be added to qos 195 * @qp: qp to be added to qos 196 */ 197 void 198 irdma_qp_add_qos(struct irdma_sc_qp *qp) 199 { 200 struct irdma_sc_vsi *vsi = qp->vsi; 201 202 irdma_debug(qp->dev, IRDMA_DEBUG_DCB, 203 "DCB: Add qp[%d] UP[%d] qset[%d] on_qoslist[%d]\n", 204 qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle, qp->on_qoslist); 205 mutex_lock(&vsi->qos[qp->user_pri].qos_mutex); 206 if (!qp->on_qoslist) { 207 list_add(&qp->list, &vsi->qos[qp->user_pri].qplist); 208 qp->on_qoslist = true; 209 qp->qs_handle = vsi->qos[qp->user_pri].qs_handle; 210 } 211 mutex_unlock(&vsi->qos[qp->user_pri].qos_mutex); 212 } 213 214 /** 215 * irdma_sc_pd_init - initialize sc pd struct 216 * @dev: sc device struct 217 * @pd: sc pd ptr 218 * @pd_id: pd_id for allocated pd 219 * @abi_ver: User/Kernel ABI version 220 */ 221 void 222 irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id, 223 int abi_ver) 224 { 225 pd->pd_id = pd_id; 226 pd->abi_ver = abi_ver; 227 pd->dev = dev; 228 } 229 230 /** 231 * irdma_sc_add_arp_cache_entry - cqp wqe add arp cache entry 232 * @cqp: struct for cqp hw 233 * @info: arp entry information 234 * @scratch: u64 saved to be used during cqp completion 235 * @post_sq: flag for cqp db to ring 236 */ 237 static int 238 irdma_sc_add_arp_cache_entry(struct irdma_sc_cqp *cqp, 239 struct irdma_add_arp_cache_entry_info *info, 240 u64 scratch, bool post_sq) 241 { 242 __le64 *wqe; 243 u64 temp, hdr; 244 245 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 246 if (!wqe) 247 return -ENOSPC; 248 set_64bit_val(wqe, IRDMA_BYTE_8, info->reach_max); 249 250 temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) | 251 LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) | 252 LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40); 253 set_64bit_val(wqe, IRDMA_BYTE_16, temp); 254 255 hdr = info->arp_index | 256 LS_64(IRDMA_CQP_OP_MANAGE_ARP, IRDMA_CQPSQ_OPCODE) | 257 LS_64((info->permanent ? 1 : 0), IRDMA_CQPSQ_MAT_PERMANENT) | 258 LS_64(1, IRDMA_CQPSQ_MAT_ENTRYVALID) | 259 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 260 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 261 262 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 263 264 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ARP_CACHE_ENTRY WQE", wqe, 265 IRDMA_CQP_WQE_SIZE * 8); 266 if (post_sq) 267 irdma_sc_cqp_post_sq(cqp); 268 269 return 0; 270 } 271 272 /** 273 * irdma_sc_del_arp_cache_entry - dele arp cache entry 274 * @cqp: struct for cqp hw 275 * @scratch: u64 saved to be used during cqp completion 276 * @arp_index: arp index to delete arp entry 277 * @post_sq: flag for cqp db to ring 278 */ 279 static int 280 irdma_sc_del_arp_cache_entry(struct irdma_sc_cqp *cqp, u64 scratch, 281 u16 arp_index, bool post_sq) 282 { 283 __le64 *wqe; 284 u64 hdr; 285 286 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 287 if (!wqe) 288 return -ENOSPC; 289 290 hdr = arp_index | LS_64(IRDMA_CQP_OP_MANAGE_ARP, IRDMA_CQPSQ_OPCODE) | 291 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 292 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 293 294 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 295 296 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE", 297 wqe, IRDMA_CQP_WQE_SIZE * 8); 298 if (post_sq) 299 irdma_sc_cqp_post_sq(cqp); 300 301 return 0; 302 } 303 304 /** 305 * irdma_sc_manage_apbvt_entry - for adding and deleting apbvt entries 306 * @cqp: struct for cqp hw 307 * @info: info for apbvt entry to add or delete 308 * @scratch: u64 saved to be used during cqp completion 309 * @post_sq: flag for cqp db to ring 310 */ 311 static int 312 irdma_sc_manage_apbvt_entry(struct irdma_sc_cqp *cqp, 313 struct irdma_apbvt_info *info, 314 u64 scratch, bool post_sq) 315 { 316 __le64 *wqe; 317 u64 hdr; 318 319 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 320 if (!wqe) 321 return -ENOSPC; 322 323 set_64bit_val(wqe, IRDMA_BYTE_16, info->port); 324 325 hdr = LS_64(IRDMA_CQP_OP_MANAGE_APBVT, IRDMA_CQPSQ_OPCODE) | 326 LS_64(info->add, IRDMA_CQPSQ_MAPT_ADDPORT) | 327 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 328 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 329 330 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 331 332 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_APBVT WQE", wqe, 333 IRDMA_CQP_WQE_SIZE * 8); 334 if (post_sq) 335 irdma_sc_cqp_post_sq(cqp); 336 337 return 0; 338 } 339 340 /** 341 * irdma_sc_manage_qhash_table_entry - manage quad hash entries 342 * @cqp: struct for cqp hw 343 * @info: info for quad hash to manage 344 * @scratch: u64 saved to be used during cqp completion 345 * @post_sq: flag for cqp db to ring 346 * 347 * This is called before connection establishment is started. 348 * For passive connections, when listener is created, it will 349 * call with entry type of IRDMA_QHASH_TYPE_TCP_SYN with local 350 * ip address and tcp port. When SYN is received (passive 351 * connections) or sent (active connections), this routine is 352 * called with entry type of IRDMA_QHASH_TYPE_TCP_ESTABLISHED 353 * and quad is passed in info. 354 * 355 * When iwarp connection is done and its state moves to RTS, the 356 * quad hash entry in the hardware will point to iwarp's qp 357 * number and requires no calls from the driver. 358 */ 359 static int 360 irdma_sc_manage_qhash_table_entry(struct irdma_sc_cqp *cqp, 361 struct irdma_qhash_table_info *info, 362 u64 scratch, bool post_sq) 363 { 364 __le64 *wqe; 365 u64 qw1 = 0; 366 u64 qw2 = 0; 367 u64 temp; 368 struct irdma_sc_vsi *vsi = info->vsi; 369 370 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 371 if (!wqe) 372 return -ENOSPC; 373 temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) | 374 LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) | 375 LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40); 376 set_64bit_val(wqe, IRDMA_BYTE_0, temp); 377 378 qw1 = LS_64(info->qp_num, IRDMA_CQPSQ_QHASH_QPN) | 379 LS_64(info->dest_port, IRDMA_CQPSQ_QHASH_DEST_PORT); 380 if (info->ipv4_valid) { 381 set_64bit_val(wqe, IRDMA_BYTE_48, 382 LS_64(info->dest_ip[0], IRDMA_CQPSQ_QHASH_ADDR3)); 383 } else { 384 set_64bit_val(wqe, IRDMA_BYTE_56, 385 LS_64(info->dest_ip[0], IRDMA_CQPSQ_QHASH_ADDR0) | 386 LS_64(info->dest_ip[1], IRDMA_CQPSQ_QHASH_ADDR1)); 387 388 set_64bit_val(wqe, IRDMA_BYTE_48, 389 LS_64(info->dest_ip[2], IRDMA_CQPSQ_QHASH_ADDR2) | 390 LS_64(info->dest_ip[3], IRDMA_CQPSQ_QHASH_ADDR3)); 391 } 392 qw2 = LS_64(vsi->qos[info->user_pri].qs_handle, 393 IRDMA_CQPSQ_QHASH_QS_HANDLE); 394 if (info->vlan_valid) 395 qw2 |= LS_64(info->vlan_id, IRDMA_CQPSQ_QHASH_VLANID); 396 set_64bit_val(wqe, IRDMA_BYTE_16, qw2); 397 if (info->entry_type == IRDMA_QHASH_TYPE_TCP_ESTABLISHED) { 398 qw1 |= LS_64(info->src_port, IRDMA_CQPSQ_QHASH_SRC_PORT); 399 if (!info->ipv4_valid) { 400 set_64bit_val(wqe, IRDMA_BYTE_40, 401 LS_64(info->src_ip[0], IRDMA_CQPSQ_QHASH_ADDR0) | 402 LS_64(info->src_ip[1], IRDMA_CQPSQ_QHASH_ADDR1)); 403 set_64bit_val(wqe, IRDMA_BYTE_32, 404 LS_64(info->src_ip[2], IRDMA_CQPSQ_QHASH_ADDR2) | 405 LS_64(info->src_ip[3], IRDMA_CQPSQ_QHASH_ADDR3)); 406 } else { 407 set_64bit_val(wqe, IRDMA_BYTE_32, 408 LS_64(info->src_ip[0], IRDMA_CQPSQ_QHASH_ADDR3)); 409 } 410 } 411 412 set_64bit_val(wqe, IRDMA_BYTE_8, qw1); 413 temp = LS_64(cqp->polarity, IRDMA_CQPSQ_QHASH_WQEVALID) | 414 LS_64(IRDMA_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, 415 IRDMA_CQPSQ_QHASH_OPCODE) | 416 LS_64(info->manage, IRDMA_CQPSQ_QHASH_MANAGE) | 417 LS_64(info->ipv4_valid, IRDMA_CQPSQ_QHASH_IPV4VALID) | 418 LS_64(info->vlan_valid, IRDMA_CQPSQ_QHASH_VLANVALID) | 419 LS_64(info->entry_type, IRDMA_CQPSQ_QHASH_ENTRYTYPE); 420 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 421 422 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 423 424 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_QHASH WQE", wqe, 425 IRDMA_CQP_WQE_SIZE * 8); 426 if (post_sq) 427 irdma_sc_cqp_post_sq(cqp); 428 429 return 0; 430 } 431 432 /** 433 * irdma_sc_qp_init - initialize qp 434 * @qp: sc qp 435 * @info: initialization qp info 436 */ 437 int 438 irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info) 439 { 440 int ret_code; 441 u32 pble_obj_cnt; 442 u16 wqe_size; 443 444 if (info->qp_uk_init_info.max_sq_frag_cnt > 445 info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags || 446 info->qp_uk_init_info.max_rq_frag_cnt > 447 info->pd->dev->hw_attrs.uk_attrs.max_hw_wq_frags) 448 return -EINVAL; 449 450 qp->dev = info->pd->dev; 451 qp->vsi = info->vsi; 452 qp->ieq_qp = info->vsi->exception_lan_q; 453 qp->sq_pa = info->sq_pa; 454 qp->rq_pa = info->rq_pa; 455 qp->hw_host_ctx_pa = info->host_ctx_pa; 456 qp->q2_pa = info->q2_pa; 457 qp->shadow_area_pa = info->shadow_area_pa; 458 qp->q2_buf = info->q2; 459 qp->pd = info->pd; 460 qp->hw_host_ctx = info->host_ctx; 461 info->qp_uk_init_info.wqe_alloc_db = qp->pd->dev->wqe_alloc_db; 462 ret_code = irdma_uk_qp_init(&qp->qp_uk, &info->qp_uk_init_info); 463 if (ret_code) 464 return ret_code; 465 466 qp->virtual_map = info->virtual_map; 467 pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 468 469 if ((info->virtual_map && info->sq_pa >= pble_obj_cnt) || 470 (info->virtual_map && info->rq_pa >= pble_obj_cnt)) 471 return -EINVAL; 472 473 qp->llp_stream_handle = (void *)(-1); 474 qp->qp_uk.force_fence = true; 475 qp->hw_sq_size = irdma_get_encoded_wqe_size(qp->qp_uk.sq_ring.size, 476 IRDMA_QUEUE_TYPE_SQ_RQ); 477 irdma_debug(qp->dev, IRDMA_DEBUG_WQE, 478 "hw_sq_size[%04d] sq_ring.size[%04d]\n", qp->hw_sq_size, 479 qp->qp_uk.sq_ring.size); 480 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) 481 wqe_size = IRDMA_WQE_SIZE_128; 482 else 483 ret_code = irdma_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt, 484 &wqe_size); 485 if (ret_code) 486 return ret_code; 487 488 qp->hw_rq_size = 489 irdma_get_encoded_wqe_size(qp->qp_uk.rq_size * 490 (wqe_size / IRDMA_QP_WQE_MIN_SIZE), 491 IRDMA_QUEUE_TYPE_SQ_RQ); 492 irdma_debug(qp->dev, IRDMA_DEBUG_WQE, 493 "hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n", 494 qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size); 495 496 qp->sq_tph_val = info->sq_tph_val; 497 qp->rq_tph_val = info->rq_tph_val; 498 qp->sq_tph_en = info->sq_tph_en; 499 qp->rq_tph_en = info->rq_tph_en; 500 qp->rcv_tph_en = info->rcv_tph_en; 501 qp->xmit_tph_en = info->xmit_tph_en; 502 qp->qp_uk.first_sq_wq = info->qp_uk_init_info.first_sq_wq; 503 qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle; 504 505 return 0; 506 } 507 508 /** 509 * irdma_sc_qp_create - create qp 510 * @qp: sc qp 511 * @info: qp create info 512 * @scratch: u64 saved to be used during cqp completion 513 * @post_sq: flag for cqp db to ring 514 */ 515 int 516 irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info, 517 u64 scratch, bool post_sq) 518 { 519 struct irdma_sc_cqp *cqp; 520 __le64 *wqe; 521 u64 hdr; 522 523 cqp = qp->dev->cqp; 524 if (qp->qp_uk.qp_id < cqp->dev->hw_attrs.min_hw_qp_id || 525 qp->qp_uk.qp_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt - 1)) 526 return -EINVAL; 527 528 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 529 if (!wqe) 530 return -ENOSPC; 531 532 set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa); 533 set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa); 534 535 hdr = qp->qp_uk.qp_id | 536 LS_64(IRDMA_CQP_OP_CREATE_QP, IRDMA_CQPSQ_OPCODE) | 537 LS_64((info->ord_valid ? 1 : 0), IRDMA_CQPSQ_QP_ORDVALID) | 538 LS_64(info->tcp_ctx_valid, IRDMA_CQPSQ_QP_TOECTXVALID) | 539 LS_64(info->mac_valid, IRDMA_CQPSQ_QP_MACVALID) | 540 LS_64(qp->qp_uk.qp_type, IRDMA_CQPSQ_QP_QPTYPE) | 541 LS_64(qp->virtual_map, IRDMA_CQPSQ_QP_VQ) | 542 LS_64(info->force_lpb, IRDMA_CQPSQ_QP_FORCELOOPBACK) | 543 LS_64(info->cq_num_valid, IRDMA_CQPSQ_QP_CQNUMVALID) | 544 LS_64(info->arp_cache_idx_valid, IRDMA_CQPSQ_QP_ARPTABIDXVALID) | 545 LS_64(info->next_iwarp_state, IRDMA_CQPSQ_QP_NEXTIWSTATE) | 546 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 547 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 548 549 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 550 551 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_CREATE WQE", wqe, 552 IRDMA_CQP_WQE_SIZE * 8); 553 if (post_sq) 554 irdma_sc_cqp_post_sq(cqp); 555 556 return 0; 557 } 558 559 /** 560 * irdma_sc_qp_modify - modify qp cqp wqe 561 * @qp: sc qp 562 * @info: modify qp info 563 * @scratch: u64 saved to be used during cqp completion 564 * @post_sq: flag for cqp db to ring 565 */ 566 int 567 irdma_sc_qp_modify(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info, 568 u64 scratch, bool post_sq) 569 { 570 __le64 *wqe; 571 struct irdma_sc_cqp *cqp; 572 u64 hdr; 573 u8 term_actions = 0; 574 u8 term_len = 0; 575 576 cqp = qp->dev->cqp; 577 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 578 if (!wqe) 579 return -ENOSPC; 580 581 if (info->next_iwarp_state == IRDMA_QP_STATE_TERMINATE) { 582 if (info->dont_send_fin) 583 term_actions += IRDMAQP_TERM_SEND_TERM_ONLY; 584 if (info->dont_send_term) 585 term_actions += IRDMAQP_TERM_SEND_FIN_ONLY; 586 if (term_actions == IRDMAQP_TERM_SEND_TERM_AND_FIN || 587 term_actions == IRDMAQP_TERM_SEND_TERM_ONLY) 588 term_len = info->termlen; 589 } 590 591 set_64bit_val(wqe, IRDMA_BYTE_8, 592 LS_64(info->new_mss, IRDMA_CQPSQ_QP_NEWMSS) | 593 LS_64(term_len, IRDMA_CQPSQ_QP_TERMLEN)); 594 set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa); 595 set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa); 596 597 hdr = qp->qp_uk.qp_id | 598 LS_64(IRDMA_CQP_OP_MODIFY_QP, IRDMA_CQPSQ_OPCODE) | 599 LS_64(info->ord_valid, IRDMA_CQPSQ_QP_ORDVALID) | 600 LS_64(info->tcp_ctx_valid, IRDMA_CQPSQ_QP_TOECTXVALID) | 601 LS_64(info->cached_var_valid, IRDMA_CQPSQ_QP_CACHEDVARVALID) | 602 LS_64(qp->virtual_map, IRDMA_CQPSQ_QP_VQ) | 603 LS_64(info->force_lpb, IRDMA_CQPSQ_QP_FORCELOOPBACK) | 604 LS_64(info->cq_num_valid, IRDMA_CQPSQ_QP_CQNUMVALID) | 605 LS_64(info->mac_valid, IRDMA_CQPSQ_QP_MACVALID) | 606 LS_64(qp->qp_uk.qp_type, IRDMA_CQPSQ_QP_QPTYPE) | 607 LS_64(info->mss_change, IRDMA_CQPSQ_QP_MSSCHANGE) | 608 LS_64(info->remove_hash_idx, IRDMA_CQPSQ_QP_REMOVEHASHENTRY) | 609 LS_64(term_actions, IRDMA_CQPSQ_QP_TERMACT) | 610 LS_64(info->reset_tcp_conn, IRDMA_CQPSQ_QP_RESETCON) | 611 LS_64(info->arp_cache_idx_valid, IRDMA_CQPSQ_QP_ARPTABIDXVALID) | 612 LS_64(info->next_iwarp_state, IRDMA_CQPSQ_QP_NEXTIWSTATE) | 613 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 614 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 615 616 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 617 618 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_MODIFY WQE", wqe, 619 IRDMA_CQP_WQE_SIZE * 8); 620 if (post_sq) 621 irdma_sc_cqp_post_sq(cqp); 622 623 return 0; 624 } 625 626 /** 627 * irdma_sc_qp_destroy - cqp destroy qp 628 * @qp: sc qp 629 * @scratch: u64 saved to be used during cqp completion 630 * @remove_hash_idx: flag if to remove hash idx 631 * @ignore_mw_bnd: memory window bind flag 632 * @post_sq: flag for cqp db to ring 633 */ 634 int 635 irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch, 636 bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq) 637 { 638 __le64 *wqe; 639 struct irdma_sc_cqp *cqp; 640 u64 hdr; 641 642 cqp = qp->dev->cqp; 643 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 644 if (!wqe) 645 return -ENOSPC; 646 647 set_64bit_val(wqe, IRDMA_BYTE_16, qp->hw_host_ctx_pa); 648 set_64bit_val(wqe, IRDMA_BYTE_40, qp->shadow_area_pa); 649 650 hdr = qp->qp_uk.qp_id | 651 LS_64(IRDMA_CQP_OP_DESTROY_QP, IRDMA_CQPSQ_OPCODE) | 652 LS_64(qp->qp_uk.qp_type, IRDMA_CQPSQ_QP_QPTYPE) | 653 LS_64(ignore_mw_bnd, IRDMA_CQPSQ_QP_IGNOREMWBOUND) | 654 LS_64(remove_hash_idx, IRDMA_CQPSQ_QP_REMOVEHASHENTRY) | 655 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 656 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 657 658 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 659 660 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_DESTROY WQE", wqe, 661 IRDMA_CQP_WQE_SIZE * 8); 662 if (post_sq) 663 irdma_sc_cqp_post_sq(cqp); 664 665 return 0; 666 } 667 668 /** 669 * irdma_sc_get_encoded_ird_size - 670 * @ird_size: IRD size 671 * The ird from the connection is rounded to a supported HW setting and then encoded 672 * for ird_size field of qp_ctx. Consumers are expected to provide valid ird size based 673 * on hardware attributes. IRD size defaults to a value of 4 in case of invalid input 674 */ 675 static u8 irdma_sc_get_encoded_ird_size(u16 ird_size) { 676 switch (ird_size ? 677 roundup_pow_of_two(2 * ird_size) : 4) { 678 case 256: 679 return IRDMA_IRD_HW_SIZE_256; 680 case 128: 681 return IRDMA_IRD_HW_SIZE_128; 682 case 64: 683 case 32: 684 return IRDMA_IRD_HW_SIZE_64; 685 case 16: 686 case 8: 687 return IRDMA_IRD_HW_SIZE_16; 688 case 4: 689 default: 690 break; 691 } 692 693 return IRDMA_IRD_HW_SIZE_4; 694 } 695 696 /** 697 * irdma_sc_qp_setctx_roce - set qp's context 698 * @qp: sc qp 699 * @qp_ctx: context ptr 700 * @info: ctx info 701 */ 702 void 703 irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 * qp_ctx, 704 struct irdma_qp_host_ctx_info *info) 705 { 706 struct irdma_roce_offload_info *roce_info; 707 struct irdma_udp_offload_info *udp; 708 u8 push_mode_en; 709 u32 push_idx; 710 u64 mac; 711 712 roce_info = info->roce_info; 713 udp = info->udp_info; 714 715 mac = LS_64_1(roce_info->mac_addr[5], 16) | 716 LS_64_1(roce_info->mac_addr[4], 24) | 717 LS_64_1(roce_info->mac_addr[3], 32) | 718 LS_64_1(roce_info->mac_addr[2], 40) | 719 LS_64_1(roce_info->mac_addr[1], 48) | 720 LS_64_1(roce_info->mac_addr[0], 56); 721 722 qp->user_pri = info->user_pri; 723 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) { 724 push_mode_en = 0; 725 push_idx = 0; 726 } else { 727 push_mode_en = 1; 728 push_idx = qp->push_idx; 729 } 730 set_64bit_val(qp_ctx, IRDMA_BYTE_0, 731 LS_64(qp->qp_uk.rq_wqe_size, IRDMAQPC_RQWQESIZE) | 732 LS_64(qp->rcv_tph_en, IRDMAQPC_RCVTPHEN) | 733 LS_64(qp->xmit_tph_en, IRDMAQPC_XMITTPHEN) | 734 LS_64(qp->rq_tph_en, IRDMAQPC_RQTPHEN) | 735 LS_64(qp->sq_tph_en, IRDMAQPC_SQTPHEN) | 736 LS_64(push_idx, IRDMAQPC_PPIDX) | 737 LS_64(push_mode_en, IRDMAQPC_PMENA) | 738 LS_64(roce_info->pd_id >> 16, IRDMAQPC_PDIDXHI) | 739 LS_64(roce_info->dctcp_en, IRDMAQPC_DC_TCP_EN) | 740 LS_64(roce_info->err_rq_idx_valid, IRDMAQPC_ERR_RQ_IDX_VALID) | 741 LS_64(roce_info->is_qp1, IRDMAQPC_ISQP1) | 742 LS_64(roce_info->roce_tver, IRDMAQPC_ROCE_TVER) | 743 LS_64(udp->ipv4, IRDMAQPC_IPV4) | 744 LS_64(udp->insert_vlan_tag, IRDMAQPC_INSERTVLANTAG)); 745 set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa); 746 set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa); 747 if (roce_info->dcqcn_en || roce_info->dctcp_en) { 748 udp->tos &= ~ECN_CODE_PT_MASK; 749 udp->tos |= ECN_CODE_PT_VAL; 750 } 751 752 set_64bit_val(qp_ctx, IRDMA_BYTE_24, 753 LS_64(qp->hw_rq_size, IRDMAQPC_RQSIZE) | 754 LS_64(qp->hw_sq_size, IRDMAQPC_SQSIZE) | 755 LS_64(udp->ttl, IRDMAQPC_TTL) | LS_64(udp->tos, IRDMAQPC_TOS) | 756 LS_64(udp->src_port, IRDMAQPC_SRCPORTNUM) | 757 LS_64(udp->dst_port, IRDMAQPC_DESTPORTNUM)); 758 set_64bit_val(qp_ctx, IRDMA_BYTE_32, 759 LS_64(udp->dest_ip_addr[2], IRDMAQPC_DESTIPADDR2) | 760 LS_64(udp->dest_ip_addr[3], IRDMAQPC_DESTIPADDR3)); 761 set_64bit_val(qp_ctx, IRDMA_BYTE_40, 762 LS_64(udp->dest_ip_addr[0], IRDMAQPC_DESTIPADDR0) | 763 LS_64(udp->dest_ip_addr[1], IRDMAQPC_DESTIPADDR1)); 764 set_64bit_val(qp_ctx, IRDMA_BYTE_48, 765 LS_64(udp->snd_mss, IRDMAQPC_SNDMSS) | 766 LS_64(udp->vlan_tag, IRDMAQPC_VLANTAG) | 767 LS_64(udp->arp_idx, IRDMAQPC_ARPIDX)); 768 set_64bit_val(qp_ctx, IRDMA_BYTE_56, 769 LS_64(roce_info->p_key, IRDMAQPC_PKEY) | 770 LS_64(roce_info->pd_id, IRDMAQPC_PDIDX) | 771 LS_64(roce_info->ack_credits, IRDMAQPC_ACKCREDITS) | 772 LS_64(udp->flow_label, IRDMAQPC_FLOWLABEL)); 773 set_64bit_val(qp_ctx, IRDMA_BYTE_64, 774 LS_64(roce_info->qkey, IRDMAQPC_QKEY) | 775 LS_64(roce_info->dest_qp, IRDMAQPC_DESTQP)); 776 set_64bit_val(qp_ctx, IRDMA_BYTE_80, 777 LS_64(udp->psn_nxt, IRDMAQPC_PSNNXT) | 778 LS_64(udp->lsn, IRDMAQPC_LSN)); 779 set_64bit_val(qp_ctx, IRDMA_BYTE_88, LS_64(udp->epsn, IRDMAQPC_EPSN)); 780 set_64bit_val(qp_ctx, IRDMA_BYTE_96, 781 LS_64(udp->psn_max, IRDMAQPC_PSNMAX) | 782 LS_64(udp->psn_una, IRDMAQPC_PSNUNA)); 783 set_64bit_val(qp_ctx, IRDMA_BYTE_112, 784 LS_64(udp->cwnd, IRDMAQPC_CWNDROCE)); 785 set_64bit_val(qp_ctx, IRDMA_BYTE_128, 786 LS_64(roce_info->err_rq_idx, IRDMAQPC_ERR_RQ_IDX) | 787 LS_64(udp->rnr_nak_thresh, IRDMAQPC_RNRNAK_THRESH) | 788 LS_64(udp->rexmit_thresh, IRDMAQPC_REXMIT_THRESH) | 789 LS_64(roce_info->rtomin, IRDMAQPC_RTOMIN)); 790 set_64bit_val(qp_ctx, IRDMA_BYTE_136, 791 LS_64(info->send_cq_num, IRDMAQPC_TXCQNUM) | 792 LS_64(info->rcv_cq_num, IRDMAQPC_RXCQNUM)); 793 set_64bit_val(qp_ctx, IRDMA_BYTE_144, 794 LS_64(info->stats_idx, IRDMAQPC_STAT_INDEX)); 795 set_64bit_val(qp_ctx, IRDMA_BYTE_152, mac); 796 set_64bit_val(qp_ctx, IRDMA_BYTE_160, 797 LS_64(roce_info->ord_size, IRDMAQPC_ORDSIZE) | 798 LS_64(irdma_sc_get_encoded_ird_size(roce_info->ird_size), IRDMAQPC_IRDSIZE) | 799 LS_64(roce_info->wr_rdresp_en, IRDMAQPC_WRRDRSPOK) | 800 LS_64(roce_info->rd_en, IRDMAQPC_RDOK) | 801 LS_64(info->stats_idx_valid, IRDMAQPC_USESTATSINSTANCE) | 802 LS_64(roce_info->bind_en, IRDMAQPC_BINDEN) | 803 LS_64(roce_info->fast_reg_en, IRDMAQPC_FASTREGEN) | 804 LS_64(roce_info->dcqcn_en, IRDMAQPC_DCQCNENABLE) | 805 LS_64(roce_info->rcv_no_icrc, IRDMAQPC_RCVNOICRC) | 806 LS_64(roce_info->fw_cc_enable, IRDMAQPC_FW_CC_ENABLE) | 807 LS_64(roce_info->udprivcq_en, IRDMAQPC_UDPRIVCQENABLE) | 808 LS_64(roce_info->priv_mode_en, IRDMAQPC_PRIVEN) | 809 LS_64(roce_info->timely_en, IRDMAQPC_TIMELYENABLE)); 810 set_64bit_val(qp_ctx, IRDMA_BYTE_168, 811 LS_64(info->qp_compl_ctx, IRDMAQPC_QPCOMPCTX)); 812 set_64bit_val(qp_ctx, IRDMA_BYTE_176, 813 LS_64(qp->sq_tph_val, IRDMAQPC_SQTPHVAL) | 814 LS_64(qp->rq_tph_val, IRDMAQPC_RQTPHVAL) | 815 LS_64(qp->qs_handle, IRDMAQPC_QSHANDLE)); 816 set_64bit_val(qp_ctx, IRDMA_BYTE_184, 817 LS_64(udp->local_ipaddr[3], IRDMAQPC_LOCAL_IPADDR3) | 818 LS_64(udp->local_ipaddr[2], IRDMAQPC_LOCAL_IPADDR2)); 819 set_64bit_val(qp_ctx, IRDMA_BYTE_192, 820 LS_64(udp->local_ipaddr[1], IRDMAQPC_LOCAL_IPADDR1) | 821 LS_64(udp->local_ipaddr[0], IRDMAQPC_LOCAL_IPADDR0)); 822 set_64bit_val(qp_ctx, IRDMA_BYTE_200, 823 LS_64(roce_info->t_high, IRDMAQPC_THIGH) | 824 LS_64(roce_info->t_low, IRDMAQPC_TLOW)); 825 set_64bit_val(qp_ctx, IRDMA_BYTE_208, 826 LS_64(info->rem_endpoint_idx, IRDMAQPC_REMENDPOINTIDX)); 827 828 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "QP_HOST CTX WQE", qp_ctx, 829 IRDMA_QP_CTX_SIZE); 830 } 831 832 /* 833 * irdma_sc_alloc_local_mac_entry - allocate a mac entry @cqp: struct for cqp hw @scratch: u64 saved to be used during 834 * cqp completion @post_sq: flag for cqp db to ring 835 */ 836 static int 837 irdma_sc_alloc_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, 838 bool post_sq) 839 { 840 __le64 *wqe; 841 u64 hdr; 842 843 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 844 if (!wqe) 845 return -ENOSPC; 846 847 hdr = LS_64(IRDMA_CQP_OP_ALLOCATE_LOC_MAC_TABLE_ENTRY, 848 IRDMA_CQPSQ_OPCODE) | 849 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 850 851 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 852 853 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 854 855 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ALLOCATE_LOCAL_MAC WQE", 856 wqe, IRDMA_CQP_WQE_SIZE * 8); 857 858 if (post_sq) 859 irdma_sc_cqp_post_sq(cqp); 860 return 0; 861 } 862 863 /** 864 * irdma_sc_add_local_mac_entry - add mac enry 865 * @cqp: struct for cqp hw 866 * @info:mac addr info 867 * @scratch: u64 saved to be used during cqp completion 868 * @post_sq: flag for cqp db to ring 869 */ 870 static int 871 irdma_sc_add_local_mac_entry(struct irdma_sc_cqp *cqp, 872 struct irdma_local_mac_entry_info *info, 873 u64 scratch, bool post_sq) 874 { 875 __le64 *wqe; 876 u64 temp, header; 877 878 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 879 if (!wqe) 880 return -ENOSPC; 881 temp = info->mac_addr[5] | LS_64_1(info->mac_addr[4], 8) | 882 LS_64_1(info->mac_addr[3], 16) | LS_64_1(info->mac_addr[2], 24) | 883 LS_64_1(info->mac_addr[1], 32) | LS_64_1(info->mac_addr[0], 40); 884 885 set_64bit_val(wqe, IRDMA_BYTE_32, temp); 886 887 header = LS_64(info->entry_idx, IRDMA_CQPSQ_MLM_TABLEIDX) | 888 LS_64(IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE, IRDMA_CQPSQ_OPCODE) | 889 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 890 891 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 892 893 set_64bit_val(wqe, IRDMA_BYTE_24, header); 894 895 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "ADD_LOCAL_MAC WQE", wqe, 896 IRDMA_CQP_WQE_SIZE * 8); 897 898 if (post_sq) 899 irdma_sc_cqp_post_sq(cqp); 900 return 0; 901 } 902 903 /** 904 * irdma_sc_del_local_mac_entry - cqp wqe to dele local mac 905 * @cqp: struct for cqp hw 906 * @scratch: u64 saved to be used during cqp completion 907 * @entry_idx: index of mac entry 908 * @ignore_ref_count: to force mac adde delete 909 * @post_sq: flag for cqp db to ring 910 */ 911 static int 912 irdma_sc_del_local_mac_entry(struct irdma_sc_cqp *cqp, u64 scratch, 913 u16 entry_idx, u8 ignore_ref_count, 914 bool post_sq) 915 { 916 __le64 *wqe; 917 u64 header; 918 919 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 920 if (!wqe) 921 return -ENOSPC; 922 header = LS_64(entry_idx, IRDMA_CQPSQ_MLM_TABLEIDX) | 923 LS_64(IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE, IRDMA_CQPSQ_OPCODE) | 924 LS_64(1, IRDMA_CQPSQ_MLM_FREEENTRY) | 925 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID) | 926 LS_64(ignore_ref_count, IRDMA_CQPSQ_MLM_IGNORE_REF_CNT); 927 928 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 929 930 set_64bit_val(wqe, IRDMA_BYTE_24, header); 931 932 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE", 933 wqe, IRDMA_CQP_WQE_SIZE * 8); 934 935 if (post_sq) 936 irdma_sc_cqp_post_sq(cqp); 937 return 0; 938 } 939 940 /** 941 * irdma_sc_qp_setctx - set qp's context 942 * @qp: sc qp 943 * @qp_ctx: context ptr 944 * @info: ctx info 945 */ 946 void 947 irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 * qp_ctx, 948 struct irdma_qp_host_ctx_info *info) 949 { 950 struct irdma_iwarp_offload_info *iw; 951 struct irdma_tcp_offload_info *tcp; 952 struct irdma_sc_dev *dev; 953 u8 push_mode_en; 954 u32 push_idx; 955 u64 qw0, qw3, qw7 = 0, qw16 = 0; 956 u64 mac = 0; 957 958 iw = info->iwarp_info; 959 tcp = info->tcp_info; 960 dev = qp->dev; 961 if (iw->rcv_mark_en) { 962 qp->pfpdu.marker_len = 4; 963 qp->pfpdu.rcv_start_seq = tcp->rcv_nxt; 964 } 965 qp->user_pri = info->user_pri; 966 if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX) { 967 push_mode_en = 0; 968 push_idx = 0; 969 } else { 970 push_mode_en = 1; 971 push_idx = qp->push_idx; 972 } 973 qw0 = LS_64(qp->qp_uk.rq_wqe_size, IRDMAQPC_RQWQESIZE) | 974 LS_64(qp->rcv_tph_en, IRDMAQPC_RCVTPHEN) | 975 LS_64(qp->xmit_tph_en, IRDMAQPC_XMITTPHEN) | 976 LS_64(qp->rq_tph_en, IRDMAQPC_RQTPHEN) | 977 LS_64(qp->sq_tph_en, IRDMAQPC_SQTPHEN) | 978 LS_64(push_idx, IRDMAQPC_PPIDX) | 979 LS_64(push_mode_en, IRDMAQPC_PMENA); 980 981 set_64bit_val(qp_ctx, IRDMA_BYTE_8, qp->sq_pa); 982 set_64bit_val(qp_ctx, IRDMA_BYTE_16, qp->rq_pa); 983 984 qw3 = LS_64(qp->hw_rq_size, IRDMAQPC_RQSIZE) | 985 LS_64(qp->hw_sq_size, IRDMAQPC_SQSIZE); 986 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 987 qw3 |= LS_64(qp->src_mac_addr_idx, IRDMAQPC_GEN1_SRCMACADDRIDX); 988 set_64bit_val(qp_ctx, IRDMA_BYTE_136, 989 LS_64(info->send_cq_num, IRDMAQPC_TXCQNUM) | 990 LS_64(info->rcv_cq_num, IRDMAQPC_RXCQNUM)); 991 set_64bit_val(qp_ctx, IRDMA_BYTE_168, 992 LS_64(info->qp_compl_ctx, IRDMAQPC_QPCOMPCTX)); 993 set_64bit_val(qp_ctx, IRDMA_BYTE_176, 994 LS_64(qp->sq_tph_val, IRDMAQPC_SQTPHVAL) | 995 LS_64(qp->rq_tph_val, IRDMAQPC_RQTPHVAL) | 996 LS_64(qp->qs_handle, IRDMAQPC_QSHANDLE) | 997 LS_64(qp->ieq_qp, IRDMAQPC_EXCEPTION_LAN_QUEUE)); 998 if (info->iwarp_info_valid) { 999 qw0 |= LS_64(iw->ddp_ver, IRDMAQPC_DDP_VER) | 1000 LS_64(iw->rdmap_ver, IRDMAQPC_RDMAP_VER) | 1001 LS_64(iw->dctcp_en, IRDMAQPC_DC_TCP_EN) | 1002 LS_64(iw->ecn_en, IRDMAQPC_ECN_EN) | 1003 LS_64(iw->ib_rd_en, IRDMAQPC_IBRDENABLE) | 1004 LS_64(iw->pd_id >> 16, IRDMAQPC_PDIDXHI) | 1005 LS_64(iw->err_rq_idx_valid, IRDMAQPC_ERR_RQ_IDX_VALID); 1006 qw7 |= LS_64(iw->pd_id, IRDMAQPC_PDIDX); 1007 qw16 |= LS_64(iw->err_rq_idx, IRDMAQPC_ERR_RQ_IDX) | 1008 LS_64(iw->rtomin, IRDMAQPC_RTOMIN); 1009 set_64bit_val(qp_ctx, IRDMA_BYTE_144, 1010 LS_64(qp->q2_pa >> 8, IRDMAQPC_Q2ADDR) | 1011 LS_64(info->stats_idx, IRDMAQPC_STAT_INDEX)); 1012 1013 if (dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) { 1014 mac = LS_64_1(iw->mac_addr[5], 16) | 1015 LS_64_1(iw->mac_addr[4], 24) | 1016 LS_64_1(iw->mac_addr[3], 32) | 1017 LS_64_1(iw->mac_addr[2], 40) | 1018 LS_64_1(iw->mac_addr[1], 48) | 1019 LS_64_1(iw->mac_addr[0], 56); 1020 } 1021 1022 set_64bit_val(qp_ctx, IRDMA_BYTE_152, 1023 mac | LS_64(iw->last_byte_sent, IRDMAQPC_LASTBYTESENT)); 1024 set_64bit_val(qp_ctx, IRDMA_BYTE_160, 1025 LS_64(iw->ord_size, IRDMAQPC_ORDSIZE) | 1026 LS_64(irdma_sc_get_encoded_ird_size(iw->ird_size), IRDMAQPC_IRDSIZE) | 1027 LS_64(iw->wr_rdresp_en, IRDMAQPC_WRRDRSPOK) | 1028 LS_64(iw->rd_en, IRDMAQPC_RDOK) | 1029 LS_64(iw->snd_mark_en, IRDMAQPC_SNDMARKERS) | 1030 LS_64(iw->bind_en, IRDMAQPC_BINDEN) | 1031 LS_64(iw->fast_reg_en, IRDMAQPC_FASTREGEN) | 1032 LS_64(iw->priv_mode_en, IRDMAQPC_PRIVEN) | 1033 LS_64(info->stats_idx_valid, IRDMAQPC_USESTATSINSTANCE) | 1034 LS_64(1, IRDMAQPC_IWARPMODE) | 1035 LS_64(iw->rcv_mark_en, IRDMAQPC_RCVMARKERS) | 1036 LS_64(iw->align_hdrs, IRDMAQPC_ALIGNHDRS) | 1037 LS_64(iw->rcv_no_mpa_crc, IRDMAQPC_RCVNOMPACRC) | 1038 LS_64(iw->rcv_mark_offset, IRDMAQPC_RCVMARKOFFSET) | 1039 LS_64(iw->snd_mark_offset, IRDMAQPC_SNDMARKOFFSET) | 1040 LS_64(iw->timely_en, IRDMAQPC_TIMELYENABLE)); 1041 } 1042 if (info->tcp_info_valid) { 1043 qw0 |= LS_64(tcp->ipv4, IRDMAQPC_IPV4) | 1044 LS_64(tcp->no_nagle, IRDMAQPC_NONAGLE) | 1045 LS_64(tcp->insert_vlan_tag, IRDMAQPC_INSERTVLANTAG) | 1046 LS_64(tcp->time_stamp, IRDMAQPC_TIMESTAMP) | 1047 LS_64(tcp->cwnd_inc_limit, IRDMAQPC_LIMIT) | 1048 LS_64(tcp->drop_ooo_seg, IRDMAQPC_DROPOOOSEG) | 1049 LS_64(tcp->dup_ack_thresh, IRDMAQPC_DUPACK_THRESH); 1050 1051 if (iw->ecn_en || iw->dctcp_en) { 1052 tcp->tos &= ~ECN_CODE_PT_MASK; 1053 tcp->tos |= ECN_CODE_PT_VAL; 1054 } 1055 1056 qw3 |= LS_64(tcp->ttl, IRDMAQPC_TTL) | 1057 LS_64(tcp->avoid_stretch_ack, IRDMAQPC_AVOIDSTRETCHACK) | 1058 LS_64(tcp->tos, IRDMAQPC_TOS) | 1059 LS_64(tcp->src_port, IRDMAQPC_SRCPORTNUM) | 1060 LS_64(tcp->dst_port, IRDMAQPC_DESTPORTNUM); 1061 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { 1062 qw3 |= LS_64(tcp->src_mac_addr_idx, 1063 IRDMAQPC_GEN1_SRCMACADDRIDX); 1064 1065 qp->src_mac_addr_idx = tcp->src_mac_addr_idx; 1066 } 1067 set_64bit_val(qp_ctx, IRDMA_BYTE_32, 1068 LS_64(tcp->dest_ip_addr[2], IRDMAQPC_DESTIPADDR2) | 1069 LS_64(tcp->dest_ip_addr[3], IRDMAQPC_DESTIPADDR3)); 1070 set_64bit_val(qp_ctx, IRDMA_BYTE_40, 1071 LS_64(tcp->dest_ip_addr[0], IRDMAQPC_DESTIPADDR0) | 1072 LS_64(tcp->dest_ip_addr[1], IRDMAQPC_DESTIPADDR1)); 1073 set_64bit_val(qp_ctx, IRDMA_BYTE_48, 1074 LS_64(tcp->snd_mss, IRDMAQPC_SNDMSS) | 1075 LS_64(tcp->syn_rst_handling, IRDMAQPC_SYN_RST_HANDLING) | 1076 LS_64(tcp->vlan_tag, IRDMAQPC_VLANTAG) | 1077 LS_64(tcp->arp_idx, IRDMAQPC_ARPIDX)); 1078 qw7 |= LS_64(tcp->flow_label, IRDMAQPC_FLOWLABEL) | 1079 LS_64(tcp->wscale, IRDMAQPC_WSCALE) | 1080 LS_64(tcp->ignore_tcp_opt, IRDMAQPC_IGNORE_TCP_OPT) | 1081 LS_64(tcp->ignore_tcp_uns_opt, 1082 IRDMAQPC_IGNORE_TCP_UNS_OPT) | 1083 LS_64(tcp->tcp_state, IRDMAQPC_TCPSTATE) | 1084 LS_64(tcp->rcv_wscale, IRDMAQPC_RCVSCALE) | 1085 LS_64(tcp->snd_wscale, IRDMAQPC_SNDSCALE); 1086 set_64bit_val(qp_ctx, IRDMA_BYTE_72, 1087 LS_64(tcp->time_stamp_recent, IRDMAQPC_TIMESTAMP_RECENT) | 1088 LS_64(tcp->time_stamp_age, IRDMAQPC_TIMESTAMP_AGE)); 1089 set_64bit_val(qp_ctx, IRDMA_BYTE_80, 1090 LS_64(tcp->snd_nxt, IRDMAQPC_SNDNXT) | 1091 LS_64(tcp->snd_wnd, IRDMAQPC_SNDWND)); 1092 set_64bit_val(qp_ctx, IRDMA_BYTE_88, 1093 LS_64(tcp->rcv_nxt, IRDMAQPC_RCVNXT) | 1094 LS_64(tcp->rcv_wnd, IRDMAQPC_RCVWND)); 1095 set_64bit_val(qp_ctx, IRDMA_BYTE_96, 1096 LS_64(tcp->snd_max, IRDMAQPC_SNDMAX) | 1097 LS_64(tcp->snd_una, IRDMAQPC_SNDUNA)); 1098 set_64bit_val(qp_ctx, IRDMA_BYTE_104, 1099 LS_64(tcp->srtt, IRDMAQPC_SRTT) | 1100 LS_64(tcp->rtt_var, IRDMAQPC_RTTVAR)); 1101 set_64bit_val(qp_ctx, IRDMA_BYTE_112, 1102 LS_64(tcp->ss_thresh, IRDMAQPC_SSTHRESH) | 1103 LS_64(tcp->cwnd, IRDMAQPC_CWND)); 1104 set_64bit_val(qp_ctx, IRDMA_BYTE_120, 1105 LS_64(tcp->snd_wl1, IRDMAQPC_SNDWL1) | 1106 LS_64(tcp->snd_wl2, IRDMAQPC_SNDWL2)); 1107 qw16 |= LS_64(tcp->max_snd_window, IRDMAQPC_MAXSNDWND) | 1108 LS_64(tcp->rexmit_thresh, IRDMAQPC_REXMIT_THRESH); 1109 set_64bit_val(qp_ctx, IRDMA_BYTE_184, 1110 LS_64(tcp->local_ipaddr[3], IRDMAQPC_LOCAL_IPADDR3) | 1111 LS_64(tcp->local_ipaddr[2], IRDMAQPC_LOCAL_IPADDR2)); 1112 set_64bit_val(qp_ctx, IRDMA_BYTE_192, 1113 LS_64(tcp->local_ipaddr[1], IRDMAQPC_LOCAL_IPADDR1) | 1114 LS_64(tcp->local_ipaddr[0], IRDMAQPC_LOCAL_IPADDR0)); 1115 set_64bit_val(qp_ctx, IRDMA_BYTE_200, 1116 LS_64(iw->t_high, IRDMAQPC_THIGH) | 1117 LS_64(iw->t_low, IRDMAQPC_TLOW)); 1118 set_64bit_val(qp_ctx, IRDMA_BYTE_208, 1119 LS_64(info->rem_endpoint_idx, IRDMAQPC_REMENDPOINTIDX)); 1120 } 1121 1122 set_64bit_val(qp_ctx, IRDMA_BYTE_0, qw0); 1123 set_64bit_val(qp_ctx, IRDMA_BYTE_24, qw3); 1124 set_64bit_val(qp_ctx, IRDMA_BYTE_56, qw7); 1125 set_64bit_val(qp_ctx, IRDMA_BYTE_128, qw16); 1126 1127 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "QP_HOST CTX", qp_ctx, 1128 IRDMA_QP_CTX_SIZE); 1129 } 1130 1131 /** 1132 * irdma_sc_alloc_stag - mr stag alloc 1133 * @dev: sc device struct 1134 * @info: stag info 1135 * @scratch: u64 saved to be used during cqp completion 1136 * @post_sq: flag for cqp db to ring 1137 */ 1138 static int 1139 irdma_sc_alloc_stag(struct irdma_sc_dev *dev, 1140 struct irdma_allocate_stag_info *info, 1141 u64 scratch, bool post_sq) 1142 { 1143 __le64 *wqe; 1144 struct irdma_sc_cqp *cqp; 1145 u64 hdr; 1146 enum irdma_page_size page_size; 1147 1148 if (info->page_size == 0x40000000) 1149 page_size = IRDMA_PAGE_SIZE_1G; 1150 else if (info->page_size == 0x200000) 1151 page_size = IRDMA_PAGE_SIZE_2M; 1152 else 1153 page_size = IRDMA_PAGE_SIZE_4K; 1154 1155 cqp = dev->cqp; 1156 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 1157 if (!wqe) 1158 return -ENOSPC; 1159 1160 set_64bit_val(wqe, IRDMA_BYTE_8, 1161 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID) | 1162 LS_64(info->total_len, IRDMA_CQPSQ_STAG_STAGLEN)); 1163 set_64bit_val(wqe, IRDMA_BYTE_16, 1164 LS_64(info->stag_idx, IRDMA_CQPSQ_STAG_IDX)); 1165 set_64bit_val(wqe, IRDMA_BYTE_40, 1166 LS_64(info->hmc_fcn_index, IRDMA_CQPSQ_STAG_HMCFNIDX)); 1167 1168 if (info->chunk_size) 1169 set_64bit_val(wqe, IRDMA_BYTE_48, 1170 LS_64(info->first_pm_pbl_idx, IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX)); 1171 1172 hdr = LS_64(IRDMA_CQP_OP_ALLOC_STAG, IRDMA_CQPSQ_OPCODE) | 1173 LS_64(1, IRDMA_CQPSQ_STAG_MR) | 1174 LS_64(info->access_rights, IRDMA_CQPSQ_STAG_ARIGHTS) | 1175 LS_64(info->chunk_size, IRDMA_CQPSQ_STAG_LPBLSIZE) | 1176 LS_64(page_size, IRDMA_CQPSQ_STAG_HPAGESIZE) | 1177 LS_64(info->remote_access, IRDMA_CQPSQ_STAG_REMACCENABLED) | 1178 LS_64(info->use_hmc_fcn_index, IRDMA_CQPSQ_STAG_USEHMCFNIDX) | 1179 LS_64(info->use_pf_rid, IRDMA_CQPSQ_STAG_USEPFRID) | 1180 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 1181 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1182 1183 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1184 1185 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "ALLOC_STAG WQE", wqe, 1186 IRDMA_CQP_WQE_SIZE * 8); 1187 if (post_sq) 1188 irdma_sc_cqp_post_sq(cqp); 1189 1190 return 0; 1191 } 1192 1193 /** 1194 * irdma_sc_mr_reg_non_shared - non-shared mr registration 1195 * @dev: sc device struct 1196 * @info: mr info 1197 * @scratch: u64 saved to be used during cqp completion 1198 * @post_sq: flag for cqp db to ring 1199 */ 1200 static int 1201 irdma_sc_mr_reg_non_shared(struct irdma_sc_dev *dev, 1202 struct irdma_reg_ns_stag_info *info, 1203 u64 scratch, bool post_sq) 1204 { 1205 __le64 *wqe; 1206 u64 fbo; 1207 struct irdma_sc_cqp *cqp; 1208 u64 hdr; 1209 u32 pble_obj_cnt; 1210 bool remote_access; 1211 u8 addr_type; 1212 enum irdma_page_size page_size; 1213 1214 if (info->page_size == 0x40000000) 1215 page_size = IRDMA_PAGE_SIZE_1G; 1216 else if (info->page_size == 0x200000) 1217 page_size = IRDMA_PAGE_SIZE_2M; 1218 else if (info->page_size == 0x1000) 1219 page_size = IRDMA_PAGE_SIZE_4K; 1220 else 1221 return -EINVAL; 1222 1223 if (info->access_rights & (IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY | 1224 IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY)) 1225 remote_access = true; 1226 else 1227 remote_access = false; 1228 1229 pble_obj_cnt = dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 1230 if (info->chunk_size && info->first_pm_pbl_index >= pble_obj_cnt) 1231 return -EINVAL; 1232 1233 cqp = dev->cqp; 1234 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 1235 if (!wqe) 1236 return -ENOSPC; 1237 fbo = info->va & (info->page_size - 1); 1238 1239 set_64bit_val(wqe, IRDMA_BYTE_0, 1240 (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED ? 1241 info->va : fbo)); 1242 set_64bit_val(wqe, IRDMA_BYTE_8, 1243 LS_64(info->total_len, IRDMA_CQPSQ_STAG_STAGLEN) | 1244 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID)); 1245 set_64bit_val(wqe, IRDMA_BYTE_16, 1246 LS_64(info->stag_key, IRDMA_CQPSQ_STAG_KEY) | 1247 LS_64(info->stag_idx, IRDMA_CQPSQ_STAG_IDX)); 1248 if (!info->chunk_size) 1249 set_64bit_val(wqe, IRDMA_BYTE_32, info->reg_addr_pa); 1250 else 1251 set_64bit_val(wqe, IRDMA_BYTE_48, 1252 LS_64(info->first_pm_pbl_index, IRDMA_CQPSQ_STAG_FIRSTPMPBLIDX)); 1253 1254 set_64bit_val(wqe, IRDMA_BYTE_40, info->hmc_fcn_index); 1255 1256 addr_type = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1 : 0; 1257 hdr = LS_64(IRDMA_CQP_OP_REG_MR, IRDMA_CQPSQ_OPCODE) | 1258 LS_64(1, IRDMA_CQPSQ_STAG_MR) | 1259 LS_64(info->chunk_size, IRDMA_CQPSQ_STAG_LPBLSIZE) | 1260 LS_64(page_size, IRDMA_CQPSQ_STAG_HPAGESIZE) | 1261 LS_64(info->access_rights, IRDMA_CQPSQ_STAG_ARIGHTS) | 1262 LS_64(remote_access, IRDMA_CQPSQ_STAG_REMACCENABLED) | 1263 LS_64(addr_type, IRDMA_CQPSQ_STAG_VABASEDTO) | 1264 LS_64(info->use_hmc_fcn_index, IRDMA_CQPSQ_STAG_USEHMCFNIDX) | 1265 LS_64(info->use_pf_rid, IRDMA_CQPSQ_STAG_USEPFRID) | 1266 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 1267 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1268 1269 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1270 1271 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "MR_REG_NS WQE", wqe, 1272 IRDMA_CQP_WQE_SIZE * 8); 1273 if (post_sq) 1274 irdma_sc_cqp_post_sq(cqp); 1275 1276 return 0; 1277 } 1278 1279 /** 1280 * irdma_sc_dealloc_stag - deallocate stag 1281 * @dev: sc device struct 1282 * @info: dealloc stag info 1283 * @scratch: u64 saved to be used during cqp completion 1284 * @post_sq: flag for cqp db to ring 1285 */ 1286 static int 1287 irdma_sc_dealloc_stag(struct irdma_sc_dev *dev, 1288 struct irdma_dealloc_stag_info *info, 1289 u64 scratch, bool post_sq) 1290 { 1291 u64 hdr; 1292 __le64 *wqe; 1293 struct irdma_sc_cqp *cqp; 1294 1295 cqp = dev->cqp; 1296 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 1297 if (!wqe) 1298 return -ENOSPC; 1299 1300 set_64bit_val(wqe, IRDMA_BYTE_8, 1301 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID)); 1302 set_64bit_val(wqe, IRDMA_BYTE_16, 1303 LS_64(info->stag_idx, IRDMA_CQPSQ_STAG_IDX)); 1304 1305 hdr = LS_64(IRDMA_CQP_OP_DEALLOC_STAG, IRDMA_CQPSQ_OPCODE) | 1306 LS_64(info->mr, IRDMA_CQPSQ_STAG_MR) | 1307 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 1308 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1309 1310 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1311 1312 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "DEALLOC_STAG WQE", wqe, 1313 IRDMA_CQP_WQE_SIZE * 8); 1314 if (post_sq) 1315 irdma_sc_cqp_post_sq(cqp); 1316 1317 return 0; 1318 } 1319 1320 /** 1321 * irdma_sc_mw_alloc - mw allocate 1322 * @dev: sc device struct 1323 * @info: memory window allocation information 1324 * @scratch: u64 saved to be used during cqp completion 1325 * @post_sq: flag for cqp db to ring 1326 */ 1327 static int 1328 irdma_sc_mw_alloc(struct irdma_sc_dev *dev, 1329 struct irdma_mw_alloc_info *info, u64 scratch, 1330 bool post_sq) 1331 { 1332 u64 hdr; 1333 struct irdma_sc_cqp *cqp; 1334 __le64 *wqe; 1335 1336 cqp = dev->cqp; 1337 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 1338 if (!wqe) 1339 return -ENOSPC; 1340 1341 set_64bit_val(wqe, IRDMA_BYTE_8, 1342 FLD_LS_64(dev, info->pd_id, IRDMA_CQPSQ_STAG_PDID)); 1343 set_64bit_val(wqe, IRDMA_BYTE_16, 1344 LS_64(info->mw_stag_index, IRDMA_CQPSQ_STAG_IDX)); 1345 1346 hdr = LS_64(IRDMA_CQP_OP_ALLOC_STAG, IRDMA_CQPSQ_OPCODE) | 1347 LS_64(info->mw_wide, IRDMA_CQPSQ_STAG_MWTYPE) | 1348 LS_64(info->mw1_bind_dont_vldt_key, 1349 IRDMA_CQPSQ_STAG_MW1_BIND_DONT_VLDT_KEY) | 1350 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 1351 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1352 1353 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1354 1355 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "MW_ALLOC WQE", wqe, 1356 IRDMA_CQP_WQE_SIZE * 8); 1357 if (post_sq) 1358 irdma_sc_cqp_post_sq(cqp); 1359 1360 return 0; 1361 } 1362 1363 /** 1364 * irdma_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp 1365 * @qp: sc qp struct 1366 * @info: fast mr info 1367 * @post_sq: flag for cqp db to ring 1368 */ 1369 int 1370 irdma_sc_mr_fast_register(struct irdma_sc_qp *qp, 1371 struct irdma_fast_reg_stag_info *info, 1372 bool post_sq) 1373 { 1374 u64 temp, hdr; 1375 __le64 *wqe; 1376 u32 wqe_idx; 1377 enum irdma_page_size page_size; 1378 struct irdma_post_sq_info sq_info = {0}; 1379 1380 if (info->page_size == 0x40000000) 1381 page_size = IRDMA_PAGE_SIZE_1G; 1382 else if (info->page_size == 0x200000) 1383 page_size = IRDMA_PAGE_SIZE_2M; 1384 else 1385 page_size = IRDMA_PAGE_SIZE_4K; 1386 1387 sq_info.wr_id = info->wr_id; 1388 sq_info.signaled = info->signaled; 1389 sq_info.push_wqe = info->push_wqe; 1390 1391 wqe = irdma_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, 1392 IRDMA_QP_WQE_MIN_QUANTA, 0, &sq_info); 1393 if (!wqe) 1394 return -ENOSPC; 1395 1396 irdma_clr_wqes(&qp->qp_uk, wqe_idx); 1397 1398 qp->qp_uk.sq_wrtrk_array[wqe_idx].signaled = info->signaled; 1399 irdma_debug(qp->dev, IRDMA_DEBUG_MR, 1400 "wr_id[%llxh] wqe_idx[%04d] location[%p]\n", (unsigned long long)info->wr_id, 1401 wqe_idx, &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid); 1402 1403 temp = (info->addr_type == IRDMA_ADDR_TYPE_VA_BASED) ? 1404 (uintptr_t)info->va : info->fbo; 1405 set_64bit_val(wqe, IRDMA_BYTE_0, temp); 1406 1407 temp = RS_64(info->first_pm_pbl_index >> 16, IRDMAQPSQ_FIRSTPMPBLIDXHI); 1408 set_64bit_val(wqe, IRDMA_BYTE_8, 1409 LS_64(temp, IRDMAQPSQ_FIRSTPMPBLIDXHI) | 1410 LS_64(info->reg_addr_pa >> IRDMAQPSQ_PBLADDR_S, IRDMAQPSQ_PBLADDR)); 1411 set_64bit_val(wqe, IRDMA_BYTE_16, 1412 info->total_len | 1413 LS_64(info->first_pm_pbl_index, IRDMAQPSQ_FIRSTPMPBLIDXLO)); 1414 1415 hdr = LS_64(info->stag_key, IRDMAQPSQ_STAGKEY) | 1416 LS_64(info->stag_idx, IRDMAQPSQ_STAGINDEX) | 1417 LS_64(IRDMAQP_OP_FAST_REGISTER, IRDMAQPSQ_OPCODE) | 1418 LS_64(info->chunk_size, IRDMAQPSQ_LPBLSIZE) | 1419 LS_64(page_size, IRDMAQPSQ_HPAGESIZE) | 1420 LS_64(info->access_rights, IRDMAQPSQ_STAGRIGHTS) | 1421 LS_64(info->addr_type, IRDMAQPSQ_VABASEDTO) | 1422 LS_64((sq_info.push_wqe ? 1 : 0), IRDMAQPSQ_PUSHWQE) | 1423 LS_64(info->read_fence, IRDMAQPSQ_READFENCE) | 1424 LS_64(info->local_fence, IRDMAQPSQ_LOCALFENCE) | 1425 LS_64(info->signaled, IRDMAQPSQ_SIGCOMPL) | 1426 LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID); 1427 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1428 1429 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1430 1431 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "FAST_REG WQE", wqe, 1432 IRDMA_QP_WQE_MIN_SIZE); 1433 if (sq_info.push_wqe) { 1434 irdma_qp_push_wqe(&qp->qp_uk, wqe, IRDMA_QP_WQE_MIN_QUANTA, 1435 wqe_idx, post_sq); 1436 } else { 1437 if (post_sq) 1438 irdma_uk_qp_post_wr(&qp->qp_uk); 1439 } 1440 1441 return 0; 1442 } 1443 1444 /** 1445 * irdma_sc_gen_rts_ae - request AE generated after RTS 1446 * @qp: sc qp struct 1447 */ 1448 static void 1449 irdma_sc_gen_rts_ae(struct irdma_sc_qp *qp) 1450 { 1451 __le64 *wqe; 1452 u64 hdr; 1453 struct irdma_qp_uk *qp_uk; 1454 1455 qp_uk = &qp->qp_uk; 1456 1457 wqe = qp_uk->sq_base[1].elem; 1458 1459 hdr = LS_64(IRDMAQP_OP_NOP, IRDMAQPSQ_OPCODE) | 1460 LS_64(1, IRDMAQPSQ_LOCALFENCE) | 1461 LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID); 1462 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1463 1464 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1465 irdma_debug_buf(qp->dev, IRDMA_DEBUG_QP, "NOP W/LOCAL FENCE WQE", wqe, 1466 IRDMA_QP_WQE_MIN_SIZE); 1467 1468 wqe = qp_uk->sq_base[2].elem; 1469 hdr = LS_64(IRDMAQP_OP_GEN_RTS_AE, IRDMAQPSQ_OPCODE) | 1470 LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID); 1471 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1472 1473 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1474 irdma_debug_buf(qp->dev, IRDMA_DEBUG_QP, "CONN EST WQE", wqe, 1475 IRDMA_QP_WQE_MIN_SIZE); 1476 } 1477 1478 /** 1479 * irdma_sc_send_lsmm - send last streaming mode message 1480 * @qp: sc qp struct 1481 * @lsmm_buf: buffer with lsmm message 1482 * @size: size of lsmm buffer 1483 * @stag: stag of lsmm buffer 1484 */ 1485 int 1486 irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size, 1487 irdma_stag stag) 1488 { 1489 __le64 *wqe; 1490 u64 hdr; 1491 struct irdma_qp_uk *qp_uk; 1492 1493 qp_uk = &qp->qp_uk; 1494 wqe = qp_uk->sq_base->elem; 1495 1496 set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)lsmm_buf); 1497 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { 1498 set_64bit_val(wqe, IRDMA_BYTE_8, 1499 LS_64(size, IRDMAQPSQ_GEN1_FRAG_LEN) | 1500 LS_64(stag, IRDMAQPSQ_GEN1_FRAG_STAG)); 1501 } else { 1502 set_64bit_val(wqe, IRDMA_BYTE_8, 1503 LS_64(size, IRDMAQPSQ_FRAG_LEN) | 1504 LS_64(stag, IRDMAQPSQ_FRAG_STAG) | 1505 LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID)); 1506 } 1507 set_64bit_val(wqe, IRDMA_BYTE_16, 0); 1508 1509 hdr = LS_64(IRDMAQP_OP_RDMA_SEND, IRDMAQPSQ_OPCODE) | 1510 LS_64(1, IRDMAQPSQ_STREAMMODE) | 1511 LS_64(1, IRDMAQPSQ_WAITFORRCVPDU) | 1512 LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID); 1513 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1514 1515 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1516 1517 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "SEND_LSMM WQE", wqe, 1518 IRDMA_QP_WQE_MIN_SIZE); 1519 1520 if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) 1521 irdma_sc_gen_rts_ae(qp); 1522 1523 return 0; 1524 } 1525 1526 /** 1527 * irdma_sc_send_lsmm_nostag - for privilege qp 1528 * @qp: sc qp struct 1529 * @lsmm_buf: buffer with lsmm message 1530 * @size: size of lsmm buffer 1531 */ 1532 int 1533 irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size) 1534 { 1535 __le64 *wqe; 1536 u64 hdr; 1537 struct irdma_qp_uk *qp_uk; 1538 1539 qp_uk = &qp->qp_uk; 1540 wqe = qp_uk->sq_base->elem; 1541 1542 set_64bit_val(wqe, IRDMA_BYTE_0, (uintptr_t)lsmm_buf); 1543 1544 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) 1545 set_64bit_val(wqe, IRDMA_BYTE_8, 1546 LS_64(size, IRDMAQPSQ_GEN1_FRAG_LEN)); 1547 else 1548 set_64bit_val(wqe, IRDMA_BYTE_8, 1549 LS_64(size, IRDMAQPSQ_FRAG_LEN) | 1550 LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID)); 1551 set_64bit_val(wqe, IRDMA_BYTE_16, 0); 1552 1553 hdr = LS_64(IRDMAQP_OP_RDMA_SEND, IRDMAQPSQ_OPCODE) | 1554 LS_64(1, IRDMAQPSQ_STREAMMODE) | 1555 LS_64(1, IRDMAQPSQ_WAITFORRCVPDU) | 1556 LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID); 1557 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1558 1559 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1560 1561 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE", wqe, 1562 IRDMA_QP_WQE_MIN_SIZE); 1563 1564 return 0; 1565 } 1566 1567 /** 1568 * irdma_sc_send_rtt - send last read0 or write0 1569 * @qp: sc qp struct 1570 * @read: Do read0 or write0 1571 */ 1572 int 1573 irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read) 1574 { 1575 __le64 *wqe; 1576 u64 hdr; 1577 struct irdma_qp_uk *qp_uk; 1578 1579 qp_uk = &qp->qp_uk; 1580 wqe = qp_uk->sq_base->elem; 1581 1582 set_64bit_val(wqe, IRDMA_BYTE_0, 0); 1583 set_64bit_val(wqe, IRDMA_BYTE_16, 0); 1584 if (read) { 1585 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { 1586 set_64bit_val(wqe, IRDMA_BYTE_8, 1587 LS_64(0xabcd, IRDMAQPSQ_GEN1_FRAG_STAG)); 1588 } else { 1589 set_64bit_val(wqe, IRDMA_BYTE_8, 1590 (u64)0xabcd | LS_64(qp->qp_uk.swqe_polarity, 1591 IRDMAQPSQ_VALID)); 1592 } 1593 hdr = LS_64(0x1234, IRDMAQPSQ_REMSTAG) | 1594 LS_64(IRDMAQP_OP_RDMA_READ, IRDMAQPSQ_OPCODE) | 1595 LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID); 1596 1597 } else { 1598 if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) { 1599 set_64bit_val(wqe, IRDMA_BYTE_8, 0); 1600 } else { 1601 set_64bit_val(wqe, IRDMA_BYTE_8, 1602 LS_64(qp->qp_uk.swqe_polarity, 1603 IRDMAQPSQ_VALID)); 1604 } 1605 hdr = LS_64(IRDMAQP_OP_RDMA_WRITE, IRDMAQPSQ_OPCODE) | 1606 LS_64(qp->qp_uk.swqe_polarity, IRDMAQPSQ_VALID); 1607 } 1608 1609 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 1610 1611 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 1612 1613 irdma_debug_buf(qp->dev, IRDMA_DEBUG_WQE, "RTR WQE", wqe, 1614 IRDMA_QP_WQE_MIN_SIZE); 1615 1616 if (qp->dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE) 1617 irdma_sc_gen_rts_ae(qp); 1618 1619 return 0; 1620 } 1621 1622 /** 1623 * irdma_iwarp_opcode - determine if incoming is rdma layer 1624 * @info: aeq info for the packet 1625 * @pkt: packet for error 1626 */ 1627 static u32 irdma_iwarp_opcode(struct irdma_aeqe_info *info, u8 *pkt){ 1628 BE16 *mpa; 1629 u32 opcode = 0xffffffff; 1630 1631 if (info->q2_data_written) { 1632 mpa = (BE16 *) pkt; 1633 opcode = IRDMA_NTOHS(mpa[1]) & 0xf; 1634 } 1635 1636 return opcode; 1637 } 1638 1639 /** 1640 * irdma_locate_mpa - return pointer to mpa in the pkt 1641 * @pkt: packet with data 1642 */ 1643 static u8 *irdma_locate_mpa(u8 *pkt) { 1644 /* skip over ethernet header */ 1645 pkt += IRDMA_MAC_HLEN; 1646 1647 /* Skip over IP and TCP headers */ 1648 pkt += 4 * (pkt[0] & 0x0f); 1649 pkt += 4 * ((pkt[12] >> 4) & 0x0f); 1650 1651 return pkt; 1652 } 1653 1654 /** 1655 * irdma_bld_termhdr_ctrl - setup terminate hdr control fields 1656 * @qp: sc qp ptr for pkt 1657 * @hdr: term hdr 1658 * @opcode: flush opcode for termhdr 1659 * @layer_etype: error layer + error type 1660 * @err: error cod ein the header 1661 */ 1662 static void 1663 irdma_bld_termhdr_ctrl(struct irdma_sc_qp *qp, 1664 struct irdma_terminate_hdr *hdr, 1665 enum irdma_flush_opcode opcode, 1666 u8 layer_etype, u8 err) 1667 { 1668 qp->flush_code = opcode; 1669 hdr->layer_etype = layer_etype; 1670 hdr->error_code = err; 1671 } 1672 1673 /** 1674 * irdma_bld_termhdr_ddp_rdma - setup ddp and rdma hdrs in terminate hdr 1675 * @pkt: ptr to mpa in offending pkt 1676 * @hdr: term hdr 1677 * @copy_len: offending pkt length to be copied to term hdr 1678 * @is_tagged: DDP tagged or untagged 1679 */ 1680 static void 1681 irdma_bld_termhdr_ddp_rdma(u8 *pkt, struct irdma_terminate_hdr *hdr, 1682 int *copy_len, u8 *is_tagged) 1683 { 1684 u16 ddp_seg_len; 1685 1686 ddp_seg_len = IRDMA_NTOHS(*(BE16 *) pkt); 1687 if (ddp_seg_len) { 1688 *copy_len = 2; 1689 hdr->hdrct = DDP_LEN_FLAG; 1690 if (pkt[2] & 0x80) { 1691 *is_tagged = 1; 1692 if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) { 1693 *copy_len += TERM_DDP_LEN_TAGGED; 1694 hdr->hdrct |= DDP_HDR_FLAG; 1695 } 1696 } else { 1697 if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) { 1698 *copy_len += TERM_DDP_LEN_UNTAGGED; 1699 hdr->hdrct |= DDP_HDR_FLAG; 1700 } 1701 if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN) && 1702 ((pkt[3] & RDMA_OPCODE_M) == RDMA_READ_REQ_OPCODE)) { 1703 *copy_len += TERM_RDMA_LEN; 1704 hdr->hdrct |= RDMA_HDR_FLAG; 1705 } 1706 } 1707 } 1708 } 1709 1710 /** 1711 * irdma_bld_terminate_hdr - build terminate message header 1712 * @qp: qp associated with received terminate AE 1713 * @info: the struct contiaing AE information 1714 */ 1715 static int 1716 irdma_bld_terminate_hdr(struct irdma_sc_qp *qp, 1717 struct irdma_aeqe_info *info) 1718 { 1719 u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET; 1720 int copy_len = 0; 1721 u8 is_tagged = 0; 1722 u32 opcode; 1723 struct irdma_terminate_hdr *termhdr; 1724 1725 termhdr = (struct irdma_terminate_hdr *)qp->q2_buf; 1726 memset(termhdr, 0, Q2_BAD_FRAME_OFFSET); 1727 1728 if (info->q2_data_written) { 1729 pkt = irdma_locate_mpa(pkt); 1730 irdma_bld_termhdr_ddp_rdma(pkt, termhdr, ©_len, &is_tagged); 1731 } 1732 1733 opcode = irdma_iwarp_opcode(info, pkt); 1734 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; 1735 qp->sq_flush_code = info->sq; 1736 qp->rq_flush_code = info->rq; 1737 1738 switch (info->ae_id) { 1739 case IRDMA_AE_AMP_UNALLOCATED_STAG: 1740 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1741 if (opcode == IRDMA_OP_TYPE_RDMA_WRITE) 1742 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR, 1743 (LAYER_DDP << 4) | DDP_TAGGED_BUF, 1744 DDP_TAGGED_INV_STAG); 1745 else 1746 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1747 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1748 RDMAP_INV_STAG); 1749 break; 1750 case IRDMA_AE_AMP_BOUNDS_VIOLATION: 1751 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1752 if (info->q2_data_written) 1753 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR, 1754 (LAYER_DDP << 4) | DDP_TAGGED_BUF, 1755 DDP_TAGGED_BOUNDS); 1756 else 1757 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1758 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1759 RDMAP_INV_BOUNDS); 1760 break; 1761 case IRDMA_AE_AMP_BAD_PD: 1762 switch (opcode) { 1763 case IRDMA_OP_TYPE_RDMA_WRITE: 1764 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_PROT_ERR, 1765 (LAYER_DDP << 4) | DDP_TAGGED_BUF, 1766 DDP_TAGGED_UNASSOC_STAG); 1767 break; 1768 case IRDMA_OP_TYPE_SEND_INV: 1769 case IRDMA_OP_TYPE_SEND_SOL_INV: 1770 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1771 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1772 RDMAP_CANT_INV_STAG); 1773 break; 1774 default: 1775 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1776 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1777 RDMAP_UNASSOC_STAG); 1778 } 1779 break; 1780 case IRDMA_AE_AMP_INVALID_STAG: 1781 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1782 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1783 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1784 RDMAP_INV_STAG); 1785 break; 1786 case IRDMA_AE_AMP_BAD_QP: 1787 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_QP_OP_ERR, 1788 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1789 DDP_UNTAGGED_INV_QN); 1790 break; 1791 case IRDMA_AE_AMP_BAD_STAG_KEY: 1792 case IRDMA_AE_AMP_BAD_STAG_INDEX: 1793 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1794 switch (opcode) { 1795 case IRDMA_OP_TYPE_SEND_INV: 1796 case IRDMA_OP_TYPE_SEND_SOL_INV: 1797 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR, 1798 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, 1799 RDMAP_CANT_INV_STAG); 1800 break; 1801 default: 1802 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1803 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, 1804 RDMAP_INV_STAG); 1805 } 1806 break; 1807 case IRDMA_AE_AMP_RIGHTS_VIOLATION: 1808 case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: 1809 case IRDMA_AE_PRIV_OPERATION_DENIED: 1810 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1811 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1812 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1813 RDMAP_ACCESS); 1814 break; 1815 case IRDMA_AE_AMP_TO_WRAP: 1816 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1817 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_ACCESS_ERR, 1818 (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, 1819 RDMAP_TO_WRAP); 1820 break; 1821 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: 1822 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1823 (LAYER_MPA << 4) | DDP_LLP, MPA_CRC); 1824 break; 1825 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: 1826 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR, 1827 (LAYER_DDP << 4) | DDP_CATASTROPHIC, 1828 DDP_CATASTROPHIC_LOCAL); 1829 break; 1830 case IRDMA_AE_LCE_QP_CATASTROPHIC: 1831 case IRDMA_AE_DDP_NO_L_BIT: 1832 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR, 1833 (LAYER_DDP << 4) | DDP_CATASTROPHIC, 1834 DDP_CATASTROPHIC_LOCAL); 1835 break; 1836 case IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN: 1837 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1838 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1839 DDP_UNTAGGED_INV_MSN_RANGE); 1840 break; 1841 case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: 1842 qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; 1843 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_LOC_LEN_ERR, 1844 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1845 DDP_UNTAGGED_INV_TOO_LONG); 1846 break; 1847 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION: 1848 if (is_tagged) 1849 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1850 (LAYER_DDP << 4) | DDP_TAGGED_BUF, 1851 DDP_TAGGED_INV_DDP_VER); 1852 else 1853 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1854 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1855 DDP_UNTAGGED_INV_DDP_VER); 1856 break; 1857 case IRDMA_AE_DDP_UBE_INVALID_MO: 1858 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1859 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1860 DDP_UNTAGGED_INV_MO); 1861 break; 1862 case IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: 1863 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_REM_OP_ERR, 1864 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1865 DDP_UNTAGGED_INV_MSN_NO_BUF); 1866 break; 1867 case IRDMA_AE_DDP_UBE_INVALID_QN: 1868 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1869 (LAYER_DDP << 4) | DDP_UNTAGGED_BUF, 1870 DDP_UNTAGGED_INV_QN); 1871 break; 1872 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: 1873 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_GENERAL_ERR, 1874 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, 1875 RDMAP_INV_RDMAP_VER); 1876 break; 1877 default: 1878 irdma_bld_termhdr_ctrl(qp, termhdr, FLUSH_FATAL_ERR, 1879 (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, 1880 RDMAP_UNSPECIFIED); 1881 break; 1882 } 1883 1884 if (copy_len) 1885 irdma_memcpy(termhdr + 1, pkt, copy_len); 1886 1887 return sizeof(struct irdma_terminate_hdr) + copy_len; 1888 } 1889 1890 /** 1891 * irdma_terminate_send_fin() - Send fin for terminate message 1892 * @qp: qp associated with received terminate AE 1893 */ 1894 void 1895 irdma_terminate_send_fin(struct irdma_sc_qp *qp) 1896 { 1897 irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE, 1898 IRDMAQP_TERM_SEND_FIN_ONLY, 0); 1899 } 1900 1901 /** 1902 * irdma_terminate_connection() - Bad AE and send terminate to remote QP 1903 * @qp: qp associated with received terminate AE 1904 * @info: the struct contiaing AE information 1905 */ 1906 void 1907 irdma_terminate_connection(struct irdma_sc_qp *qp, 1908 struct irdma_aeqe_info *info) 1909 { 1910 u8 termlen = 0; 1911 1912 if (qp->term_flags & IRDMA_TERM_SENT) 1913 return; 1914 1915 termlen = irdma_bld_terminate_hdr(qp, info); 1916 irdma_terminate_start_timer(qp); 1917 qp->term_flags |= IRDMA_TERM_SENT; 1918 irdma_term_modify_qp(qp, IRDMA_QP_STATE_TERMINATE, 1919 IRDMAQP_TERM_SEND_TERM_ONLY, termlen); 1920 } 1921 1922 /** 1923 * irdma_terminate_received - handle terminate received AE 1924 * @qp: qp associated with received terminate AE 1925 * @info: the struct contiaing AE information 1926 */ 1927 void 1928 irdma_terminate_received(struct irdma_sc_qp *qp, 1929 struct irdma_aeqe_info *info) 1930 { 1931 u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET; 1932 BE32 *mpa; 1933 u8 ddp_ctl; 1934 u8 rdma_ctl; 1935 u16 aeq_id = 0; 1936 struct irdma_terminate_hdr *termhdr; 1937 1938 mpa = (BE32 *) irdma_locate_mpa(pkt); 1939 if (info->q2_data_written) { 1940 /* did not validate the frame - do it now */ 1941 ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff; 1942 rdma_ctl = ntohl(mpa[0]) & 0xff; 1943 if ((ddp_ctl & 0xc0) != 0x40) 1944 aeq_id = IRDMA_AE_LCE_QP_CATASTROPHIC; 1945 else if ((ddp_ctl & 0x03) != 1) 1946 aeq_id = IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION; 1947 else if (ntohl(mpa[2]) != 2) 1948 aeq_id = IRDMA_AE_DDP_UBE_INVALID_QN; 1949 else if (ntohl(mpa[3]) != 1) 1950 aeq_id = IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN; 1951 else if (ntohl(mpa[4]) != 0) 1952 aeq_id = IRDMA_AE_DDP_UBE_INVALID_MO; 1953 else if ((rdma_ctl & 0xc0) != 0x40) 1954 aeq_id = IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION; 1955 1956 info->ae_id = aeq_id; 1957 if (info->ae_id) { 1958 /* Bad terminate recvd - send back a terminate */ 1959 irdma_terminate_connection(qp, info); 1960 return; 1961 } 1962 } 1963 1964 qp->term_flags |= IRDMA_TERM_RCVD; 1965 qp->event_type = IRDMA_QP_EVENT_CATASTROPHIC; 1966 termhdr = (struct irdma_terminate_hdr *)&mpa[5]; 1967 if (termhdr->layer_etype == RDMAP_REMOTE_PROT || 1968 termhdr->layer_etype == RDMAP_REMOTE_OP) { 1969 irdma_terminate_done(qp, 0); 1970 } else { 1971 irdma_terminate_start_timer(qp); 1972 irdma_terminate_send_fin(qp); 1973 } 1974 } 1975 1976 static int 1977 irdma_null_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) 1978 { 1979 return 0; 1980 } 1981 1982 static void 1983 irdma_null_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri) 1984 { 1985 /* do nothing */ 1986 } 1987 1988 static void 1989 irdma_null_ws_reset(struct irdma_sc_vsi *vsi) 1990 { 1991 /* do nothing */ 1992 } 1993 1994 /** 1995 * irdma_sc_vsi_init - Init the vsi structure 1996 * @vsi: pointer to vsi structure to initialize 1997 * @info: the info used to initialize the vsi struct 1998 */ 1999 void 2000 irdma_sc_vsi_init(struct irdma_sc_vsi *vsi, 2001 struct irdma_vsi_init_info *info) 2002 { 2003 u8 i; 2004 2005 vsi->dev = info->dev; 2006 vsi->back_vsi = info->back_vsi; 2007 vsi->register_qset = info->register_qset; 2008 vsi->unregister_qset = info->unregister_qset; 2009 vsi->mtu = info->params->mtu; 2010 vsi->exception_lan_q = info->exception_lan_q; 2011 vsi->vsi_idx = info->pf_data_vsi_num; 2012 if (vsi->dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 2013 vsi->fcn_id = info->dev->hmc_fn_id; 2014 2015 irdma_set_qos_info(vsi, info->params); 2016 for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { 2017 mutex_init(&vsi->qos[i].qos_mutex); 2018 INIT_LIST_HEAD(&vsi->qos[i].qplist); 2019 } 2020 if (vsi->register_qset) { 2021 vsi->dev->ws_add = irdma_ws_add; 2022 vsi->dev->ws_remove = irdma_ws_remove; 2023 vsi->dev->ws_reset = irdma_ws_reset; 2024 } else { 2025 vsi->dev->ws_add = irdma_null_ws_add; 2026 vsi->dev->ws_remove = irdma_null_ws_remove; 2027 vsi->dev->ws_reset = irdma_null_ws_reset; 2028 } 2029 } 2030 2031 /** 2032 * irdma_get_fcn_id - Return the function id 2033 * @vsi: pointer to the vsi 2034 */ 2035 static u8 irdma_get_fcn_id(struct irdma_sc_vsi *vsi){ 2036 struct irdma_stats_inst_info stats_info = {0}; 2037 struct irdma_sc_dev *dev = vsi->dev; 2038 u8 fcn_id = IRDMA_INVALID_FCN_ID; 2039 u8 start_idx, max_stats, i; 2040 2041 if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) { 2042 if (!irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_ALLOCATE, 2043 &stats_info)) 2044 return stats_info.stats_idx; 2045 } 2046 2047 start_idx = 1; 2048 max_stats = 16; 2049 for (i = start_idx; i < max_stats; i++) 2050 if (!dev->fcn_id_array[i]) { 2051 fcn_id = i; 2052 dev->fcn_id_array[i] = true; 2053 break; 2054 } 2055 2056 return fcn_id; 2057 } 2058 2059 /** 2060 * irdma_vsi_stats_init - Initialize the vsi statistics 2061 * @vsi: pointer to the vsi structure 2062 * @info: The info structure used for initialization 2063 */ 2064 int 2065 irdma_vsi_stats_init(struct irdma_sc_vsi *vsi, 2066 struct irdma_vsi_stats_info *info) 2067 { 2068 u8 fcn_id = info->fcn_id; 2069 struct irdma_dma_mem *stats_buff_mem; 2070 2071 vsi->pestat = info->pestat; 2072 vsi->pestat->hw = vsi->dev->hw; 2073 vsi->pestat->vsi = vsi; 2074 stats_buff_mem = &vsi->pestat->gather_info.stats_buff_mem; 2075 stats_buff_mem->size = IRDMA_GATHER_STATS_BUF_SIZE * 2; 2076 stats_buff_mem->va = irdma_allocate_dma_mem(vsi->pestat->hw, 2077 stats_buff_mem, 2078 stats_buff_mem->size, 1); 2079 if (!stats_buff_mem->va) 2080 return -ENOMEM; 2081 2082 vsi->pestat->gather_info.gather_stats_va = stats_buff_mem->va; 2083 vsi->pestat->gather_info.last_gather_stats_va = 2084 (void *)((uintptr_t)stats_buff_mem->va + 2085 IRDMA_GATHER_STATS_BUF_SIZE); 2086 2087 irdma_hw_stats_start_timer(vsi); 2088 if (info->alloc_fcn_id) 2089 fcn_id = irdma_get_fcn_id(vsi); 2090 if (fcn_id == IRDMA_INVALID_FCN_ID) 2091 goto stats_error; 2092 2093 vsi->stats_fcn_id_alloc = info->alloc_fcn_id; 2094 vsi->fcn_id = fcn_id; 2095 if (info->alloc_fcn_id) { 2096 vsi->pestat->gather_info.use_stats_inst = true; 2097 vsi->pestat->gather_info.stats_inst_index = fcn_id; 2098 } 2099 2100 return 0; 2101 2102 stats_error: 2103 irdma_free_dma_mem(vsi->pestat->hw, stats_buff_mem); 2104 2105 return -EIO; 2106 } 2107 2108 /** 2109 * irdma_vsi_stats_free - Free the vsi stats 2110 * @vsi: pointer to the vsi structure 2111 */ 2112 void 2113 irdma_vsi_stats_free(struct irdma_sc_vsi *vsi) 2114 { 2115 struct irdma_stats_inst_info stats_info = {0}; 2116 u8 fcn_id = vsi->fcn_id; 2117 struct irdma_sc_dev *dev = vsi->dev; 2118 2119 if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) { 2120 if (vsi->stats_fcn_id_alloc) { 2121 stats_info.stats_idx = vsi->fcn_id; 2122 irdma_cqp_stats_inst_cmd(vsi, IRDMA_OP_STATS_FREE, 2123 &stats_info); 2124 } 2125 } else { 2126 if (vsi->stats_fcn_id_alloc && 2127 fcn_id < vsi->dev->hw_attrs.max_stat_inst) 2128 vsi->dev->fcn_id_array[fcn_id] = false; 2129 } 2130 2131 if (!vsi->pestat) 2132 return; 2133 irdma_hw_stats_stop_timer(vsi); 2134 irdma_free_dma_mem(vsi->pestat->hw, 2135 &vsi->pestat->gather_info.stats_buff_mem); 2136 } 2137 2138 /** 2139 * irdma_get_encoded_wqe_size - given wq size, returns hardware encoded size 2140 * @wqsize: size of the wq (sq, rq) to encoded_size 2141 * @queue_type: queue type selected for the calculation algorithm 2142 */ 2143 u8 2144 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type) 2145 { 2146 u8 encoded_size = 0; 2147 2148 /* 2149 * cqp sq's hw coded value starts from 1 for size of 4 while it starts from 0 for qp' wq's. 2150 */ 2151 if (queue_type == IRDMA_QUEUE_TYPE_CQP) 2152 encoded_size = 1; 2153 wqsize >>= 2; 2154 while (wqsize >>= 1) 2155 encoded_size++; 2156 2157 return encoded_size; 2158 } 2159 2160 /** 2161 * irdma_sc_gather_stats - collect the statistics 2162 * @cqp: struct for cqp hw 2163 * @info: gather stats info structure 2164 * @scratch: u64 saved to be used during cqp completion 2165 */ 2166 static int 2167 irdma_sc_gather_stats(struct irdma_sc_cqp *cqp, 2168 struct irdma_stats_gather_info *info, 2169 u64 scratch) 2170 { 2171 __le64 *wqe; 2172 u64 temp; 2173 2174 if (info->stats_buff_mem.size < IRDMA_GATHER_STATS_BUF_SIZE) 2175 return -ENOSPC; 2176 2177 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2178 if (!wqe) 2179 return -ENOSPC; 2180 2181 set_64bit_val(wqe, IRDMA_BYTE_40, 2182 LS_64(info->hmc_fcn_index, IRDMA_CQPSQ_STATS_HMC_FCN_INDEX)); 2183 set_64bit_val(wqe, IRDMA_BYTE_32, info->stats_buff_mem.pa); 2184 2185 temp = LS_64(cqp->polarity, IRDMA_CQPSQ_STATS_WQEVALID) | 2186 LS_64(info->use_stats_inst, IRDMA_CQPSQ_STATS_USE_INST) | 2187 LS_64(info->stats_inst_index, IRDMA_CQPSQ_STATS_INST_INDEX) | 2188 LS_64(info->use_hmc_fcn_index, 2189 IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX) | 2190 LS_64(IRDMA_CQP_OP_GATHER_STATS, IRDMA_CQPSQ_STATS_OP); 2191 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2192 2193 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 2194 2195 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_STATS, "GATHER_STATS WQE", wqe, 2196 IRDMA_CQP_WQE_SIZE * 8); 2197 2198 irdma_sc_cqp_post_sq(cqp); 2199 irdma_debug(cqp->dev, IRDMA_DEBUG_STATS, 2200 "CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head, 2201 cqp->sq_ring.tail, cqp->sq_ring.size); 2202 2203 return 0; 2204 } 2205 2206 /** 2207 * irdma_sc_manage_stats_inst - allocate or free stats instance 2208 * @cqp: struct for cqp hw 2209 * @info: stats info structure 2210 * @alloc: alloc vs. delete flag 2211 * @scratch: u64 saved to be used during cqp completion 2212 */ 2213 static int 2214 irdma_sc_manage_stats_inst(struct irdma_sc_cqp *cqp, 2215 struct irdma_stats_inst_info *info, 2216 bool alloc, u64 scratch) 2217 { 2218 __le64 *wqe; 2219 u64 temp; 2220 2221 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2222 if (!wqe) 2223 return -ENOSPC; 2224 2225 set_64bit_val(wqe, IRDMA_BYTE_40, 2226 LS_64(info->hmc_fn_id, IRDMA_CQPSQ_STATS_HMC_FCN_INDEX)); 2227 temp = LS_64(cqp->polarity, IRDMA_CQPSQ_STATS_WQEVALID) | 2228 LS_64(alloc, IRDMA_CQPSQ_STATS_ALLOC_INST) | 2229 LS_64(info->use_hmc_fcn_index, IRDMA_CQPSQ_STATS_USE_HMC_FCN_INDEX) | 2230 LS_64(info->stats_idx, IRDMA_CQPSQ_STATS_INST_INDEX) | 2231 LS_64(IRDMA_CQP_OP_MANAGE_STATS, IRDMA_CQPSQ_STATS_OP); 2232 2233 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2234 2235 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 2236 2237 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_STATS WQE", wqe, 2238 IRDMA_CQP_WQE_SIZE * 8); 2239 2240 irdma_sc_cqp_post_sq(cqp); 2241 return 0; 2242 } 2243 2244 /** 2245 * irdma_sc_set_up_map - set the up map table 2246 * @cqp: struct for cqp hw 2247 * @info: User priority map info 2248 * @scratch: u64 saved to be used during cqp completion 2249 */ 2250 static int 2251 irdma_sc_set_up_map(struct irdma_sc_cqp *cqp, 2252 struct irdma_up_info *info, u64 scratch) 2253 { 2254 __le64 *wqe; 2255 u64 temp; 2256 2257 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2258 if (!wqe) 2259 return -ENOSPC; 2260 2261 temp = info->map[0] | LS_64_1(info->map[1], 8) | 2262 LS_64_1(info->map[2], 16) | LS_64_1(info->map[3], 24) | 2263 LS_64_1(info->map[4], 32) | LS_64_1(info->map[5], 40) | 2264 LS_64_1(info->map[6], 48) | LS_64_1(info->map[7], 56); 2265 2266 set_64bit_val(wqe, IRDMA_BYTE_0, temp); 2267 set_64bit_val(wqe, IRDMA_BYTE_40, 2268 LS_64(info->cnp_up_override, IRDMA_CQPSQ_UP_CNPOVERRIDE) | 2269 LS_64(info->hmc_fcn_idx, IRDMA_CQPSQ_UP_HMCFCNIDX)); 2270 2271 temp = LS_64(cqp->polarity, IRDMA_CQPSQ_UP_WQEVALID) | 2272 LS_64(info->use_vlan, IRDMA_CQPSQ_UP_USEVLAN) | 2273 LS_64(info->use_cnp_up_override, IRDMA_CQPSQ_UP_USEOVERRIDE) | 2274 LS_64(IRDMA_CQP_OP_UP_MAP, IRDMA_CQPSQ_UP_OP); 2275 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2276 2277 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 2278 2279 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPMAP WQE", wqe, 2280 IRDMA_CQP_WQE_SIZE * 8); 2281 irdma_sc_cqp_post_sq(cqp); 2282 2283 return 0; 2284 } 2285 2286 /** 2287 * irdma_sc_manage_ws_node - create/modify/destroy WS node 2288 * @cqp: struct for cqp hw 2289 * @info: node info structure 2290 * @node_op: 0 for add 1 for modify, 2 for delete 2291 * @scratch: u64 saved to be used during cqp completion 2292 */ 2293 static int 2294 irdma_sc_manage_ws_node(struct irdma_sc_cqp *cqp, 2295 struct irdma_ws_node_info *info, 2296 enum irdma_ws_node_op node_op, u64 scratch) 2297 { 2298 __le64 *wqe; 2299 u64 temp = 0; 2300 2301 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2302 if (!wqe) 2303 return -ENOSPC; 2304 2305 set_64bit_val(wqe, IRDMA_BYTE_32, 2306 LS_64(info->vsi, IRDMA_CQPSQ_WS_VSI) | 2307 LS_64(info->weight, IRDMA_CQPSQ_WS_WEIGHT)); 2308 2309 temp = LS_64(cqp->polarity, IRDMA_CQPSQ_WS_WQEVALID) | 2310 LS_64(node_op, IRDMA_CQPSQ_WS_NODEOP) | 2311 LS_64(info->enable, IRDMA_CQPSQ_WS_ENABLENODE) | 2312 LS_64(info->type_leaf, IRDMA_CQPSQ_WS_NODETYPE) | 2313 LS_64(info->prio_type, IRDMA_CQPSQ_WS_PRIOTYPE) | 2314 LS_64(info->tc, IRDMA_CQPSQ_WS_TC) | 2315 LS_64(IRDMA_CQP_OP_WORK_SCHED_NODE, IRDMA_CQPSQ_WS_OP) | 2316 LS_64(info->parent_id, IRDMA_CQPSQ_WS_PARENTID) | 2317 LS_64(info->id, IRDMA_CQPSQ_WS_NODEID); 2318 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2319 2320 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 2321 2322 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_WS WQE", wqe, 2323 IRDMA_CQP_WQE_SIZE * 8); 2324 irdma_sc_cqp_post_sq(cqp); 2325 2326 return 0; 2327 } 2328 2329 /** 2330 * irdma_sc_qp_flush_wqes - flush qp's wqe 2331 * @qp: sc qp 2332 * @info: dlush information 2333 * @scratch: u64 saved to be used during cqp completion 2334 * @post_sq: flag for cqp db to ring 2335 */ 2336 int 2337 irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp, 2338 struct irdma_qp_flush_info *info, u64 scratch, 2339 bool post_sq) 2340 { 2341 u64 temp = 0; 2342 __le64 *wqe; 2343 struct irdma_sc_cqp *cqp; 2344 u64 hdr; 2345 bool flush_sq = false, flush_rq = false; 2346 2347 if (info->rq && !qp->flush_rq) 2348 flush_rq = true; 2349 if (info->sq && !qp->flush_sq) 2350 flush_sq = true; 2351 qp->flush_sq |= flush_sq; 2352 qp->flush_rq |= flush_rq; 2353 2354 if (!flush_sq && !flush_rq) { 2355 irdma_debug(qp->dev, IRDMA_DEBUG_CQP, 2356 "Additional flush request ignored for qp %x\n", qp->qp_uk.qp_id); 2357 return -EALREADY; 2358 } 2359 2360 cqp = qp->pd->dev->cqp; 2361 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2362 if (!wqe) 2363 return -ENOSPC; 2364 2365 if (info->userflushcode) { 2366 if (flush_rq) 2367 temp |= LS_64(info->rq_minor_code, IRDMA_CQPSQ_FWQE_RQMNERR) | 2368 LS_64(info->rq_major_code, IRDMA_CQPSQ_FWQE_RQMJERR); 2369 if (flush_sq) 2370 temp |= LS_64(info->sq_minor_code, IRDMA_CQPSQ_FWQE_SQMNERR) | 2371 LS_64(info->sq_major_code, IRDMA_CQPSQ_FWQE_SQMJERR); 2372 } 2373 set_64bit_val(wqe, IRDMA_BYTE_16, temp); 2374 2375 temp = (info->generate_ae) ? 2376 info->ae_code | LS_64(info->ae_src, IRDMA_CQPSQ_FWQE_AESOURCE) : 0; 2377 set_64bit_val(wqe, IRDMA_BYTE_8, temp); 2378 2379 hdr = qp->qp_uk.qp_id | 2380 LS_64(IRDMA_CQP_OP_FLUSH_WQES, IRDMA_CQPSQ_OPCODE) | 2381 LS_64(info->generate_ae, IRDMA_CQPSQ_FWQE_GENERATE_AE) | 2382 LS_64(info->userflushcode, IRDMA_CQPSQ_FWQE_USERFLCODE) | 2383 LS_64(flush_sq, IRDMA_CQPSQ_FWQE_FLUSHSQ) | 2384 LS_64(flush_rq, IRDMA_CQPSQ_FWQE_FLUSHRQ) | 2385 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 2386 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2387 2388 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2389 2390 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QP_FLUSH WQE", wqe, 2391 IRDMA_CQP_WQE_SIZE * 8); 2392 if (post_sq) 2393 irdma_sc_cqp_post_sq(cqp); 2394 2395 return 0; 2396 } 2397 2398 /** 2399 * irdma_sc_gen_ae - generate AE, uses flush WQE CQP OP 2400 * @qp: sc qp 2401 * @info: gen ae information 2402 * @scratch: u64 saved to be used during cqp completion 2403 * @post_sq: flag for cqp db to ring 2404 */ 2405 static int 2406 irdma_sc_gen_ae(struct irdma_sc_qp *qp, 2407 struct irdma_gen_ae_info *info, u64 scratch, 2408 bool post_sq) 2409 { 2410 u64 temp; 2411 __le64 *wqe; 2412 struct irdma_sc_cqp *cqp; 2413 u64 hdr; 2414 2415 cqp = qp->pd->dev->cqp; 2416 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2417 if (!wqe) 2418 return -ENOSPC; 2419 2420 temp = info->ae_code | LS_64(info->ae_src, IRDMA_CQPSQ_FWQE_AESOURCE); 2421 set_64bit_val(wqe, IRDMA_BYTE_8, temp); 2422 2423 hdr = qp->qp_uk.qp_id | LS_64(IRDMA_CQP_OP_GEN_AE, IRDMA_CQPSQ_OPCODE) | 2424 LS_64(1, IRDMA_CQPSQ_FWQE_GENERATE_AE) | 2425 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 2426 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2427 2428 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2429 2430 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "GEN_AE WQE", wqe, 2431 IRDMA_CQP_WQE_SIZE * 8); 2432 if (post_sq) 2433 irdma_sc_cqp_post_sq(cqp); 2434 2435 return 0; 2436 } 2437 2438 /*** irdma_sc_qp_upload_context - upload qp's context 2439 * @dev: sc device struct 2440 * @info: upload context info ptr for return 2441 * @scratch: u64 saved to be used during cqp completion 2442 * @post_sq: flag for cqp db to ring 2443 */ 2444 static int 2445 irdma_sc_qp_upload_context(struct irdma_sc_dev *dev, 2446 struct irdma_upload_context_info *info, 2447 u64 scratch, bool post_sq) 2448 { 2449 __le64 *wqe; 2450 struct irdma_sc_cqp *cqp; 2451 u64 hdr; 2452 2453 cqp = dev->cqp; 2454 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2455 if (!wqe) 2456 return -ENOSPC; 2457 2458 set_64bit_val(wqe, IRDMA_BYTE_16, info->buf_pa); 2459 2460 hdr = LS_64(info->qp_id, IRDMA_CQPSQ_UCTX_QPID) | 2461 LS_64(IRDMA_CQP_OP_UPLOAD_CONTEXT, IRDMA_CQPSQ_OPCODE) | 2462 LS_64(info->qp_type, IRDMA_CQPSQ_UCTX_QPTYPE) | 2463 LS_64(info->raw_format, IRDMA_CQPSQ_UCTX_RAWFORMAT) | 2464 LS_64(info->freeze_qp, IRDMA_CQPSQ_UCTX_FREEZEQP) | 2465 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 2466 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2467 2468 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2469 2470 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QP_UPLOAD_CTX WQE", wqe, 2471 IRDMA_CQP_WQE_SIZE * 8); 2472 if (post_sq) 2473 irdma_sc_cqp_post_sq(cqp); 2474 2475 return 0; 2476 } 2477 2478 /** 2479 * irdma_sc_manage_push_page - Handle push page 2480 * @cqp: struct for cqp hw 2481 * @info: push page info 2482 * @scratch: u64 saved to be used during cqp completion 2483 * @post_sq: flag for cqp db to ring 2484 */ 2485 static int 2486 irdma_sc_manage_push_page(struct irdma_sc_cqp *cqp, 2487 struct irdma_cqp_manage_push_page_info *info, 2488 u64 scratch, bool post_sq) 2489 { 2490 __le64 *wqe; 2491 u64 hdr; 2492 2493 if (info->free_page && 2494 info->push_idx >= cqp->dev->hw_attrs.max_hw_device_pages) 2495 return -EINVAL; 2496 2497 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2498 if (!wqe) 2499 return -ENOSPC; 2500 2501 set_64bit_val(wqe, IRDMA_BYTE_16, info->qs_handle); 2502 hdr = LS_64(info->push_idx, IRDMA_CQPSQ_MPP_PPIDX) | 2503 LS_64(info->push_page_type, IRDMA_CQPSQ_MPP_PPTYPE) | 2504 LS_64(IRDMA_CQP_OP_MANAGE_PUSH_PAGES, IRDMA_CQPSQ_OPCODE) | 2505 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID) | 2506 LS_64(info->free_page, IRDMA_CQPSQ_MPP_FREE_PAGE); 2507 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2508 2509 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2510 2511 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE", wqe, 2512 IRDMA_CQP_WQE_SIZE * 8); 2513 if (post_sq) 2514 irdma_sc_cqp_post_sq(cqp); 2515 2516 return 0; 2517 } 2518 2519 /** 2520 * irdma_sc_suspend_qp - suspend qp for param change 2521 * @cqp: struct for cqp hw 2522 * @qp: sc qp struct 2523 * @scratch: u64 saved to be used during cqp completion 2524 */ 2525 static int 2526 irdma_sc_suspend_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp, 2527 u64 scratch) 2528 { 2529 u64 hdr; 2530 __le64 *wqe; 2531 2532 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2533 if (!wqe) 2534 return -ENOSPC; 2535 2536 hdr = LS_64(qp->qp_uk.qp_id, IRDMA_CQPSQ_SUSPENDQP_QPID) | 2537 LS_64(IRDMA_CQP_OP_SUSPEND_QP, IRDMA_CQPSQ_OPCODE) | 2538 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 2539 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2540 2541 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2542 2543 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "SUSPEND_QP WQE", wqe, 2544 IRDMA_CQP_WQE_SIZE * 8); 2545 irdma_sc_cqp_post_sq(cqp); 2546 2547 return 0; 2548 } 2549 2550 /** 2551 * irdma_sc_resume_qp - resume qp after suspend 2552 * @cqp: struct for cqp hw 2553 * @qp: sc qp struct 2554 * @scratch: u64 saved to be used during cqp completion 2555 */ 2556 static int 2557 irdma_sc_resume_qp(struct irdma_sc_cqp *cqp, struct irdma_sc_qp *qp, 2558 u64 scratch) 2559 { 2560 u64 hdr; 2561 __le64 *wqe; 2562 2563 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2564 if (!wqe) 2565 return -ENOSPC; 2566 2567 set_64bit_val(wqe, IRDMA_BYTE_16, 2568 LS_64(qp->qs_handle, IRDMA_CQPSQ_RESUMEQP_QSHANDLE)); 2569 2570 hdr = LS_64(qp->qp_uk.qp_id, IRDMA_CQPSQ_RESUMEQP_QPID) | 2571 LS_64(IRDMA_CQP_OP_RESUME_QP, IRDMA_CQPSQ_OPCODE) | 2572 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 2573 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2574 2575 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2576 2577 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "RESUME_QP WQE", wqe, 2578 IRDMA_CQP_WQE_SIZE * 8); 2579 irdma_sc_cqp_post_sq(cqp); 2580 2581 return 0; 2582 } 2583 2584 /** 2585 * irdma_sc_cq_ack - acknowledge completion q 2586 * @cq: cq struct 2587 */ 2588 static inline void 2589 irdma_sc_cq_ack(struct irdma_sc_cq *cq) 2590 { 2591 db_wr32(cq->cq_uk.cq_id, cq->cq_uk.cq_ack_db); 2592 } 2593 2594 /** 2595 * irdma_sc_cq_init - initialize completion q 2596 * @cq: cq struct 2597 * @info: cq initialization info 2598 */ 2599 int 2600 irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info) 2601 { 2602 int ret_code; 2603 u32 pble_obj_cnt; 2604 2605 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 2606 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) 2607 return -EINVAL; 2608 2609 cq->cq_pa = info->cq_base_pa; 2610 cq->dev = info->dev; 2611 cq->ceq_id = info->ceq_id; 2612 info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db; 2613 info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db; 2614 ret_code = irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info); 2615 if (ret_code) 2616 return ret_code; 2617 2618 cq->virtual_map = info->virtual_map; 2619 cq->pbl_chunk_size = info->pbl_chunk_size; 2620 cq->ceqe_mask = info->ceqe_mask; 2621 cq->cq_type = (info->type) ? info->type : IRDMA_CQ_TYPE_IWARP; 2622 cq->shadow_area_pa = info->shadow_area_pa; 2623 cq->shadow_read_threshold = info->shadow_read_threshold; 2624 cq->ceq_id_valid = info->ceq_id_valid; 2625 cq->tph_en = info->tph_en; 2626 cq->tph_val = info->tph_val; 2627 cq->first_pm_pbl_idx = info->first_pm_pbl_idx; 2628 cq->vsi = info->vsi; 2629 2630 return 0; 2631 } 2632 2633 /** 2634 * irdma_sc_cq_create - create completion q 2635 * @cq: cq struct 2636 * @scratch: u64 saved to be used during cqp completion 2637 * @check_overflow: flag for overflow check 2638 * @post_sq: flag for cqp db to ring 2639 */ 2640 static int 2641 irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch, 2642 bool check_overflow, bool post_sq) 2643 { 2644 __le64 *wqe; 2645 struct irdma_sc_cqp *cqp; 2646 u64 hdr; 2647 struct irdma_sc_ceq *ceq; 2648 int ret_code = 0; 2649 2650 cqp = cq->dev->cqp; 2651 if (cq->cq_uk.cq_id > (cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt - 1)) 2652 return -EINVAL; 2653 2654 if (cq->ceq_id > (cq->dev->hmc_fpm_misc.max_ceqs - 1)) 2655 return -EINVAL; 2656 2657 ceq = cq->dev->ceq[cq->ceq_id]; 2658 if (ceq && ceq->reg_cq) 2659 ret_code = irdma_sc_add_cq_ctx(ceq, cq); 2660 2661 if (ret_code) 2662 return ret_code; 2663 2664 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2665 if (!wqe) { 2666 if (ceq && ceq->reg_cq) 2667 irdma_sc_remove_cq_ctx(ceq, cq); 2668 return -ENOSPC; 2669 } 2670 2671 set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size); 2672 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1)); 2673 set_64bit_val(wqe, IRDMA_BYTE_16, 2674 LS_64(cq->shadow_read_threshold, 2675 IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); 2676 set_64bit_val(wqe, IRDMA_BYTE_32, (cq->virtual_map ? 0 : cq->cq_pa)); 2677 set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa); 2678 set_64bit_val(wqe, IRDMA_BYTE_48, 2679 LS_64((cq->virtual_map ? cq->first_pm_pbl_idx : 0), 2680 IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX)); 2681 set_64bit_val(wqe, IRDMA_BYTE_56, 2682 LS_64(cq->tph_val, IRDMA_CQPSQ_TPHVAL) | 2683 LS_64(cq->vsi->vsi_idx, IRDMA_CQPSQ_VSIIDX)); 2684 2685 hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) | 2686 FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0), 2687 IRDMA_CQPSQ_CQ_CEQID) | 2688 LS_64(IRDMA_CQP_OP_CREATE_CQ, IRDMA_CQPSQ_OPCODE) | 2689 LS_64(cq->pbl_chunk_size, IRDMA_CQPSQ_CQ_LPBLSIZE) | 2690 LS_64(check_overflow, IRDMA_CQPSQ_CQ_CHKOVERFLOW) | 2691 LS_64(cq->virtual_map, IRDMA_CQPSQ_CQ_VIRTMAP) | 2692 LS_64(cq->ceqe_mask, IRDMA_CQPSQ_CQ_ENCEQEMASK) | 2693 LS_64(cq->ceq_id_valid, IRDMA_CQPSQ_CQ_CEQIDVALID) | 2694 LS_64(cq->tph_en, IRDMA_CQPSQ_TPHEN) | 2695 LS_64(cq->cq_uk.avoid_mem_cflct, IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT) | 2696 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 2697 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2698 2699 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2700 2701 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_CREATE WQE", wqe, 2702 IRDMA_CQP_WQE_SIZE * 8); 2703 if (post_sq) 2704 irdma_sc_cqp_post_sq(cqp); 2705 2706 return 0; 2707 } 2708 2709 /** 2710 * irdma_sc_cq_destroy - destroy completion q 2711 * @cq: cq struct 2712 * @scratch: u64 saved to be used during cqp completion 2713 * @post_sq: flag for cqp db to ring 2714 */ 2715 int 2716 irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq) 2717 { 2718 struct irdma_sc_cqp *cqp; 2719 __le64 *wqe; 2720 u64 hdr; 2721 struct irdma_sc_ceq *ceq; 2722 2723 cqp = cq->dev->cqp; 2724 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2725 if (!wqe) 2726 return -ENOSPC; 2727 2728 ceq = cq->dev->ceq[cq->ceq_id]; 2729 if (ceq && ceq->reg_cq) 2730 irdma_sc_remove_cq_ctx(ceq, cq); 2731 2732 set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size); 2733 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1)); 2734 set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa); 2735 set_64bit_val(wqe, IRDMA_BYTE_48, 2736 (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); 2737 2738 hdr = cq->cq_uk.cq_id | 2739 FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0), 2740 IRDMA_CQPSQ_CQ_CEQID) | 2741 LS_64(IRDMA_CQP_OP_DESTROY_CQ, IRDMA_CQPSQ_OPCODE) | 2742 LS_64(cq->pbl_chunk_size, IRDMA_CQPSQ_CQ_LPBLSIZE) | 2743 LS_64(cq->virtual_map, IRDMA_CQPSQ_CQ_VIRTMAP) | 2744 LS_64(cq->ceqe_mask, IRDMA_CQPSQ_CQ_ENCEQEMASK) | 2745 LS_64(cq->ceq_id_valid, IRDMA_CQPSQ_CQ_CEQIDVALID) | 2746 LS_64(cq->tph_en, IRDMA_CQPSQ_TPHEN) | 2747 LS_64(cq->cq_uk.avoid_mem_cflct, IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT) | 2748 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 2749 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2750 2751 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2752 2753 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_DESTROY WQE", wqe, 2754 IRDMA_CQP_WQE_SIZE * 8); 2755 if (post_sq) 2756 irdma_sc_cqp_post_sq(cqp); 2757 2758 return 0; 2759 } 2760 2761 /** 2762 * irdma_sc_cq_resize - set resized cq buffer info 2763 * @cq: resized cq 2764 * @info: resized cq buffer info 2765 */ 2766 void 2767 irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info) 2768 { 2769 cq->virtual_map = info->virtual_map; 2770 cq->cq_pa = info->cq_pa; 2771 cq->first_pm_pbl_idx = info->first_pm_pbl_idx; 2772 cq->pbl_chunk_size = info->pbl_chunk_size; 2773 irdma_uk_cq_resize(&cq->cq_uk, info->cq_base, info->cq_size); 2774 } 2775 2776 /** 2777 * irdma_sc_cq_modify - modify a Completion Queue 2778 * @cq: cq struct 2779 * @info: modification info struct 2780 * @scratch: u64 saved to be used during cqp completion 2781 * @post_sq: flag to post to sq 2782 */ 2783 static int 2784 irdma_sc_cq_modify(struct irdma_sc_cq *cq, 2785 struct irdma_modify_cq_info *info, u64 scratch, 2786 bool post_sq) 2787 { 2788 struct irdma_sc_cqp *cqp; 2789 __le64 *wqe; 2790 u64 hdr; 2791 u32 pble_obj_cnt; 2792 2793 pble_obj_cnt = cq->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 2794 if (info->cq_resize && info->virtual_map && 2795 info->first_pm_pbl_idx >= pble_obj_cnt) 2796 return -EINVAL; 2797 2798 cqp = cq->dev->cqp; 2799 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 2800 if (!wqe) 2801 return -ENOSPC; 2802 2803 set_64bit_val(wqe, IRDMA_BYTE_0, info->cq_size); 2804 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1)); 2805 set_64bit_val(wqe, IRDMA_BYTE_16, 2806 LS_64(info->shadow_read_threshold, 2807 IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); 2808 set_64bit_val(wqe, IRDMA_BYTE_32, info->cq_pa); 2809 set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa); 2810 set_64bit_val(wqe, IRDMA_BYTE_48, info->first_pm_pbl_idx); 2811 set_64bit_val(wqe, IRDMA_BYTE_56, 2812 LS_64(cq->tph_val, IRDMA_CQPSQ_TPHVAL) | 2813 LS_64(cq->vsi->vsi_idx, IRDMA_CQPSQ_VSIIDX)); 2814 2815 hdr = cq->cq_uk.cq_id | 2816 LS_64(IRDMA_CQP_OP_MODIFY_CQ, IRDMA_CQPSQ_OPCODE) | 2817 LS_64(info->cq_resize, IRDMA_CQPSQ_CQ_CQRESIZE) | 2818 LS_64(info->pbl_chunk_size, IRDMA_CQPSQ_CQ_LPBLSIZE) | 2819 LS_64(info->check_overflow, IRDMA_CQPSQ_CQ_CHKOVERFLOW) | 2820 LS_64(info->virtual_map, IRDMA_CQPSQ_CQ_VIRTMAP) | 2821 LS_64(cq->ceqe_mask, IRDMA_CQPSQ_CQ_ENCEQEMASK) | 2822 LS_64(cq->tph_en, IRDMA_CQPSQ_TPHEN) | 2823 LS_64(cq->cq_uk.avoid_mem_cflct, IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT) | 2824 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 2825 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 2826 2827 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 2828 2829 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQ_MODIFY WQE", wqe, 2830 IRDMA_CQP_WQE_SIZE * 8); 2831 if (post_sq) 2832 irdma_sc_cqp_post_sq(cqp); 2833 2834 return 0; 2835 } 2836 2837 /** 2838 * irdma_check_cqp_progress - check cqp processing progress 2839 * @timeout: timeout info struct 2840 * @dev: sc device struct 2841 */ 2842 void 2843 irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev) 2844 { 2845 if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) { 2846 timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]; 2847 timeout->count = 0; 2848 } else { 2849 if (dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] != 2850 timeout->compl_cqp_cmds) 2851 timeout->count++; 2852 } 2853 } 2854 2855 /** 2856 * irdma_get_cqp_reg_info - get head and tail for cqp using registers 2857 * @cqp: struct for cqp hw 2858 * @val: cqp tail register value 2859 * @tail: wqtail register value 2860 * @error: cqp processing err 2861 */ 2862 static inline void 2863 irdma_get_cqp_reg_info(struct irdma_sc_cqp *cqp, u32 *val, 2864 u32 *tail, u32 *error) 2865 { 2866 *val = readl(cqp->dev->hw_regs[IRDMA_CQPTAIL]); 2867 *tail = RS_32(*val, IRDMA_CQPTAIL_WQTAIL); 2868 *error = RS_32(*val, IRDMA_CQPTAIL_CQP_OP_ERR); 2869 } 2870 2871 /** 2872 * irdma_cqp_poll_registers - poll cqp registers 2873 * @cqp: struct for cqp hw 2874 * @tail: wqtail register value 2875 * @count: how many times to try for completion 2876 */ 2877 static int 2878 irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, u32 tail, 2879 u32 count) 2880 { 2881 u32 i = 0; 2882 u32 newtail, error, val; 2883 2884 while (i++ < count) { 2885 irdma_get_cqp_reg_info(cqp, &val, &newtail, &error); 2886 if (error) { 2887 error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]); 2888 irdma_debug(cqp->dev, IRDMA_DEBUG_CQP, 2889 "CQPERRCODES error_code[x%08X]\n", error); 2890 return -EIO; 2891 } 2892 if (newtail != tail) { 2893 /* SUCCESS */ 2894 IRDMA_RING_MOVE_TAIL(cqp->sq_ring); 2895 cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++; 2896 return 0; 2897 } 2898 irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count); 2899 } 2900 2901 return -ETIMEDOUT; 2902 } 2903 2904 /** 2905 * irdma_sc_decode_fpm_commit - decode a 64 bit value into count and base 2906 * @dev: sc device struct 2907 * @buf: pointer to commit buffer 2908 * @buf_idx: buffer index 2909 * @obj_info: object info pointer 2910 * @rsrc_idx: indexs of memory resource 2911 */ 2912 static u64 irdma_sc_decode_fpm_commit(struct irdma_sc_dev *dev, __le64 * buf, 2913 u32 buf_idx, struct irdma_hmc_obj_info *obj_info, 2914 u32 rsrc_idx){ 2915 u64 temp; 2916 2917 get_64bit_val(buf, buf_idx, &temp); 2918 2919 switch (rsrc_idx) { 2920 case IRDMA_HMC_IW_QP: 2921 obj_info[rsrc_idx].cnt = (u32)RS_64(temp, IRDMA_COMMIT_FPM_QPCNT); 2922 break; 2923 case IRDMA_HMC_IW_CQ: 2924 obj_info[rsrc_idx].cnt = (u32)FLD_RS_64(dev, temp, IRDMA_COMMIT_FPM_CQCNT); 2925 break; 2926 case IRDMA_HMC_IW_APBVT_ENTRY: 2927 obj_info[rsrc_idx].cnt = 1; 2928 break; 2929 default: 2930 obj_info[rsrc_idx].cnt = (u32)temp; 2931 break; 2932 } 2933 2934 obj_info[rsrc_idx].base = (u64)RS_64_1(temp, IRDMA_COMMIT_FPM_BASE_S) * 512; 2935 2936 return temp; 2937 } 2938 2939 /** 2940 * irdma_sc_parse_fpm_commit_buf - parse fpm commit buffer 2941 * @dev: pointer to dev struct 2942 * @buf: ptr to fpm commit buffer 2943 * @info: ptr to irdma_hmc_obj_info struct 2944 * @sd: number of SDs for HMC objects 2945 * 2946 * parses fpm commit info and copy base value 2947 * of hmc objects in hmc_info 2948 */ 2949 static int 2950 irdma_sc_parse_fpm_commit_buf(struct irdma_sc_dev *dev, __le64 * buf, 2951 struct irdma_hmc_obj_info *info, 2952 u32 *sd) 2953 { 2954 u64 size; 2955 u32 i; 2956 u64 max_base = 0; 2957 u32 last_hmc_obj = 0; 2958 2959 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_0, info, 2960 IRDMA_HMC_IW_QP); 2961 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_8, info, 2962 IRDMA_HMC_IW_CQ); 2963 /* skiping RSRVD */ 2964 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_24, info, 2965 IRDMA_HMC_IW_HTE); 2966 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_32, info, 2967 IRDMA_HMC_IW_ARP); 2968 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_40, info, 2969 IRDMA_HMC_IW_APBVT_ENTRY); 2970 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_48, info, 2971 IRDMA_HMC_IW_MR); 2972 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_56, info, 2973 IRDMA_HMC_IW_XF); 2974 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_64, info, 2975 IRDMA_HMC_IW_XFFL); 2976 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_72, info, 2977 IRDMA_HMC_IW_Q1); 2978 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_80, info, 2979 IRDMA_HMC_IW_Q1FL); 2980 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_88, info, 2981 IRDMA_HMC_IW_TIMER); 2982 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_112, info, 2983 IRDMA_HMC_IW_PBLE); 2984 /* skipping RSVD. */ 2985 if (dev->hw_attrs.uk_attrs.hw_rev != IRDMA_GEN_1) { 2986 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_96, info, 2987 IRDMA_HMC_IW_FSIMC); 2988 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_104, info, 2989 IRDMA_HMC_IW_FSIAV); 2990 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_128, info, 2991 IRDMA_HMC_IW_RRF); 2992 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_136, info, 2993 IRDMA_HMC_IW_RRFFL); 2994 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_144, info, 2995 IRDMA_HMC_IW_HDR); 2996 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_152, info, 2997 IRDMA_HMC_IW_MD); 2998 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_160, info, 2999 IRDMA_HMC_IW_OOISC); 3000 irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_168, info, 3001 IRDMA_HMC_IW_OOISCFFL); 3002 } 3003 3004 /* searching for the last object in HMC to find the size of the HMC area. */ 3005 for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) { 3006 if (info[i].base > max_base) { 3007 max_base = info[i].base; 3008 last_hmc_obj = i; 3009 } 3010 } 3011 3012 size = info[last_hmc_obj].cnt * info[last_hmc_obj].size + 3013 info[last_hmc_obj].base; 3014 3015 if (size & 0x1FFFFF) 3016 *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */ 3017 else 3018 *sd = (u32)(size >> 21); 3019 3020 return 0; 3021 } 3022 3023 /** 3024 * irdma_sc_decode_fpm_query() - Decode a 64 bit value into max count and size 3025 * @buf: ptr to fpm query buffer 3026 * @buf_idx: index into buf 3027 * @obj_info: ptr to irdma_hmc_obj_info struct 3028 * @rsrc_idx: resource index into info 3029 * 3030 * Decode a 64 bit value from fpm query buffer into max count and size 3031 */ 3032 static u64 irdma_sc_decode_fpm_query(__le64 * buf, u32 buf_idx, 3033 struct irdma_hmc_obj_info *obj_info, 3034 u32 rsrc_idx){ 3035 u64 temp; 3036 u32 size; 3037 3038 get_64bit_val(buf, buf_idx, &temp); 3039 obj_info[rsrc_idx].max_cnt = (u32)temp; 3040 size = (u32)RS_64_1(temp, 32); 3041 obj_info[rsrc_idx].size = LS_64_1(1, size); 3042 3043 return temp; 3044 } 3045 3046 /** 3047 * irdma_sc_parse_fpm_query_buf() - parses fpm query buffer 3048 * @dev: ptr to shared code device 3049 * @buf: ptr to fpm query buffer 3050 * @hmc_info: ptr to irdma_hmc_obj_info struct 3051 * @hmc_fpm_misc: ptr to fpm data 3052 * 3053 * parses fpm query buffer and copy max_cnt and 3054 * size value of hmc objects in hmc_info 3055 */ 3056 static int 3057 irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 * buf, 3058 struct irdma_hmc_info *hmc_info, 3059 struct irdma_hmc_fpm_misc *hmc_fpm_misc) 3060 { 3061 struct irdma_hmc_obj_info *obj_info; 3062 u64 temp; 3063 u32 size; 3064 u16 max_pe_sds; 3065 3066 obj_info = hmc_info->hmc_obj; 3067 3068 get_64bit_val(buf, IRDMA_BYTE_0, &temp); 3069 hmc_info->first_sd_index = (u16)RS_64(temp, IRDMA_QUERY_FPM_FIRST_PE_SD_INDEX); 3070 max_pe_sds = (u16)RS_64(temp, IRDMA_QUERY_FPM_MAX_PE_SDS); 3071 3072 hmc_fpm_misc->max_sds = max_pe_sds; 3073 hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; 3074 get_64bit_val(buf, 8, &temp); 3075 obj_info[IRDMA_HMC_IW_QP].max_cnt = (u32)RS_64(temp, IRDMA_QUERY_FPM_MAX_QPS); 3076 size = (u32)RS_64_1(temp, 32); 3077 obj_info[IRDMA_HMC_IW_QP].size = LS_64_1(1, size); 3078 3079 get_64bit_val(buf, 16, &temp); 3080 obj_info[IRDMA_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, IRDMA_QUERY_FPM_MAX_CQS); 3081 size = (u32)RS_64_1(temp, 32); 3082 obj_info[IRDMA_HMC_IW_CQ].size = LS_64_1(1, size); 3083 3084 irdma_sc_decode_fpm_query(buf, 32, obj_info, IRDMA_HMC_IW_HTE); 3085 irdma_sc_decode_fpm_query(buf, 40, obj_info, IRDMA_HMC_IW_ARP); 3086 3087 obj_info[IRDMA_HMC_IW_APBVT_ENTRY].size = 8192; 3088 obj_info[IRDMA_HMC_IW_APBVT_ENTRY].max_cnt = 1; 3089 3090 irdma_sc_decode_fpm_query(buf, 48, obj_info, IRDMA_HMC_IW_MR); 3091 irdma_sc_decode_fpm_query(buf, 56, obj_info, IRDMA_HMC_IW_XF); 3092 3093 get_64bit_val(buf, 64, &temp); 3094 obj_info[IRDMA_HMC_IW_XFFL].max_cnt = (u32)temp; 3095 obj_info[IRDMA_HMC_IW_XFFL].size = 4; 3096 hmc_fpm_misc->xf_block_size = RS_64(temp, IRDMA_QUERY_FPM_XFBLOCKSIZE); 3097 if (!hmc_fpm_misc->xf_block_size) 3098 return -EINVAL; 3099 3100 irdma_sc_decode_fpm_query(buf, 72, obj_info, IRDMA_HMC_IW_Q1); 3101 get_64bit_val(buf, 80, &temp); 3102 obj_info[IRDMA_HMC_IW_Q1FL].max_cnt = (u32)temp; 3103 obj_info[IRDMA_HMC_IW_Q1FL].size = 4; 3104 3105 hmc_fpm_misc->q1_block_size = RS_64(temp, IRDMA_QUERY_FPM_Q1BLOCKSIZE); 3106 if (!hmc_fpm_misc->q1_block_size) 3107 return -EINVAL; 3108 3109 irdma_sc_decode_fpm_query(buf, 88, obj_info, IRDMA_HMC_IW_TIMER); 3110 3111 get_64bit_val(buf, 112, &temp); 3112 obj_info[IRDMA_HMC_IW_PBLE].max_cnt = (u32)temp; 3113 obj_info[IRDMA_HMC_IW_PBLE].size = 8; 3114 3115 get_64bit_val(buf, 120, &temp); 3116 hmc_fpm_misc->max_ceqs = RS_64(temp, IRDMA_QUERY_FPM_MAX_CEQS); 3117 hmc_fpm_misc->ht_multiplier = RS_64(temp, IRDMA_QUERY_FPM_HTMULTIPLIER); 3118 hmc_fpm_misc->timer_bucket = RS_64(temp, IRDMA_QUERY_FPM_TIMERBUCKET); 3119 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 3120 return 0; 3121 irdma_sc_decode_fpm_query(buf, 96, obj_info, IRDMA_HMC_IW_FSIMC); 3122 irdma_sc_decode_fpm_query(buf, 104, obj_info, IRDMA_HMC_IW_FSIAV); 3123 irdma_sc_decode_fpm_query(buf, 128, obj_info, IRDMA_HMC_IW_RRF); 3124 3125 get_64bit_val(buf, IRDMA_BYTE_136, &temp); 3126 obj_info[IRDMA_HMC_IW_RRFFL].max_cnt = (u32)temp; 3127 obj_info[IRDMA_HMC_IW_RRFFL].size = 4; 3128 hmc_fpm_misc->rrf_block_size = RS_64(temp, IRDMA_QUERY_FPM_RRFBLOCKSIZE); 3129 if (!hmc_fpm_misc->rrf_block_size && 3130 obj_info[IRDMA_HMC_IW_RRFFL].max_cnt) 3131 return -EINVAL; 3132 3133 irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR); 3134 irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD); 3135 irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC); 3136 3137 get_64bit_val(buf, IRDMA_BYTE_168, &temp); 3138 obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp; 3139 obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4; 3140 hmc_fpm_misc->ooiscf_block_size = RS_64(temp, IRDMA_QUERY_FPM_OOISCFBLOCKSIZE); 3141 if (!hmc_fpm_misc->ooiscf_block_size && 3142 obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt) 3143 return -EINVAL; 3144 3145 return 0; 3146 } 3147 3148 /** 3149 * irdma_sc_find_reg_cq - find cq ctx index 3150 * @ceq: ceq sc structure 3151 * @cq: cq sc structure 3152 */ 3153 static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq, 3154 struct irdma_sc_cq *cq){ 3155 u32 i; 3156 3157 for (i = 0; i < ceq->reg_cq_size; i++) { 3158 if (cq == ceq->reg_cq[i]) 3159 return i; 3160 } 3161 3162 return IRDMA_INVALID_CQ_IDX; 3163 } 3164 3165 /** 3166 * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq 3167 * @ceq: ceq sc structure 3168 * @cq: cq sc structure 3169 */ 3170 int 3171 irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq) 3172 { 3173 unsigned long flags; 3174 3175 spin_lock_irqsave(&ceq->req_cq_lock, flags); 3176 3177 if (ceq->reg_cq_size == ceq->elem_cnt) { 3178 spin_unlock_irqrestore(&ceq->req_cq_lock, flags); 3179 return -ENOSPC; 3180 } 3181 3182 ceq->reg_cq[ceq->reg_cq_size++] = cq; 3183 3184 spin_unlock_irqrestore(&ceq->req_cq_lock, flags); 3185 3186 return 0; 3187 } 3188 3189 /** 3190 * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq 3191 * @ceq: ceq sc structure 3192 * @cq: cq sc structure 3193 */ 3194 void 3195 irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq) 3196 { 3197 unsigned long flags; 3198 u32 cq_ctx_idx; 3199 3200 spin_lock_irqsave(&ceq->req_cq_lock, flags); 3201 cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq); 3202 if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX) 3203 goto exit; 3204 3205 ceq->reg_cq_size--; 3206 if (cq_ctx_idx != ceq->reg_cq_size) 3207 ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size]; 3208 ceq->reg_cq[ceq->reg_cq_size] = NULL; 3209 3210 exit: 3211 spin_unlock_irqrestore(&ceq->req_cq_lock, flags); 3212 } 3213 3214 /** 3215 * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair 3216 * @cqp: IWARP control queue pair pointer 3217 * @info: IWARP control queue pair init info pointer 3218 * 3219 * Initializes the object and context buffers for a control Queue Pair. 3220 */ 3221 int 3222 irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, 3223 struct irdma_cqp_init_info *info) 3224 { 3225 u8 hw_sq_size; 3226 3227 if (info->sq_size > IRDMA_CQP_SW_SQSIZE_2048 || 3228 info->sq_size < IRDMA_CQP_SW_SQSIZE_4 || 3229 ((info->sq_size & (info->sq_size - 1)))) 3230 return -EINVAL; 3231 3232 hw_sq_size = irdma_get_encoded_wqe_size(info->sq_size, 3233 IRDMA_QUEUE_TYPE_CQP); 3234 cqp->size = sizeof(*cqp); 3235 cqp->sq_size = info->sq_size; 3236 cqp->hw_sq_size = hw_sq_size; 3237 cqp->sq_base = info->sq; 3238 cqp->host_ctx = info->host_ctx; 3239 cqp->sq_pa = info->sq_pa; 3240 cqp->host_ctx_pa = info->host_ctx_pa; 3241 cqp->dev = info->dev; 3242 cqp->struct_ver = info->struct_ver; 3243 cqp->hw_maj_ver = info->hw_maj_ver; 3244 cqp->hw_min_ver = info->hw_min_ver; 3245 cqp->scratch_array = info->scratch_array; 3246 cqp->polarity = 0; 3247 cqp->en_datacenter_tcp = info->en_datacenter_tcp; 3248 cqp->ena_vf_count = info->ena_vf_count; 3249 cqp->hmc_profile = info->hmc_profile; 3250 cqp->ceqs_per_vf = info->ceqs_per_vf; 3251 cqp->disable_packed = info->disable_packed; 3252 cqp->rocev2_rto_policy = info->rocev2_rto_policy; 3253 cqp->protocol_used = info->protocol_used; 3254 irdma_memcpy(&cqp->dcqcn_params, &info->dcqcn_params, sizeof(cqp->dcqcn_params)); 3255 cqp->en_rem_endpoint_trk = info->en_rem_endpoint_trk; 3256 info->dev->cqp = cqp; 3257 3258 IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size); 3259 cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0; 3260 cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0; 3261 /* for the cqp commands backlog. */ 3262 INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); 3263 3264 writel(0, cqp->dev->hw_regs[IRDMA_CQPTAIL]); 3265 writel(0, cqp->dev->hw_regs[IRDMA_CQPDB]); 3266 writel(0, cqp->dev->hw_regs[IRDMA_CCQPSTATUS]); 3267 3268 irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, 3269 "sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04x]\n", 3270 cqp->sq_size, cqp->hw_sq_size, cqp->sq_base, (unsigned long long)cqp->sq_pa, cqp, 3271 cqp->polarity); 3272 return 0; 3273 } 3274 3275 /** 3276 * irdma_sc_cqp_create - create cqp during bringup 3277 * @cqp: struct for cqp hw 3278 * @maj_err: If error, major err number 3279 * @min_err: If error, minor err number 3280 */ 3281 int 3282 irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err) 3283 { 3284 u64 temp; 3285 u8 hw_rev; 3286 u32 cnt = 0, p1, p2, val = 0, err_code; 3287 int ret_code; 3288 3289 hw_rev = cqp->dev->hw_attrs.uk_attrs.hw_rev; 3290 cqp->sdbuf.size = IRDMA_UPDATE_SD_BUFF_SIZE * cqp->sq_size; 3291 cqp->sdbuf.va = irdma_allocate_dma_mem(cqp->dev->hw, &cqp->sdbuf, 3292 cqp->sdbuf.size, 3293 IRDMA_SD_BUF_ALIGNMENT); 3294 if (!cqp->sdbuf.va) 3295 return -ENOMEM; 3296 3297 spin_lock_init(&cqp->dev->cqp_lock); 3298 3299 temp = LS_64(cqp->hw_sq_size, IRDMA_CQPHC_SQSIZE) | 3300 LS_64(cqp->struct_ver, IRDMA_CQPHC_SVER) | 3301 LS_64(cqp->disable_packed, IRDMA_CQPHC_DISABLE_PFPDUS) | 3302 LS_64(cqp->ceqs_per_vf, IRDMA_CQPHC_CEQPERVF); 3303 if (hw_rev >= IRDMA_GEN_2) { 3304 temp |= LS_64(cqp->rocev2_rto_policy, IRDMA_CQPHC_ROCEV2_RTO_POLICY) | 3305 LS_64(cqp->protocol_used, IRDMA_CQPHC_PROTOCOL_USED); 3306 } 3307 3308 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_0, temp); 3309 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_8, cqp->sq_pa); 3310 3311 temp = LS_64(cqp->ena_vf_count, IRDMA_CQPHC_ENABLED_VFS) | 3312 LS_64(cqp->hmc_profile, IRDMA_CQPHC_HMC_PROFILE); 3313 if (hw_rev >= IRDMA_GEN_2) 3314 temp |= LS_64(cqp->en_rem_endpoint_trk, IRDMA_CQPHC_EN_REM_ENDPOINT_TRK); 3315 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_16, temp); 3316 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_24, (uintptr_t)cqp); 3317 temp = LS_64(cqp->hw_maj_ver, IRDMA_CQPHC_HW_MAJVER) | 3318 LS_64(cqp->hw_min_ver, IRDMA_CQPHC_HW_MINVER); 3319 if (hw_rev >= IRDMA_GEN_2) { 3320 temp |= LS_64(cqp->dcqcn_params.min_rate, IRDMA_CQPHC_MIN_RATE) | 3321 LS_64(cqp->dcqcn_params.min_dec_factor, IRDMA_CQPHC_MIN_DEC_FACTOR); 3322 } 3323 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_32, temp); 3324 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_40, 0); 3325 temp = 0; 3326 if (hw_rev >= IRDMA_GEN_2) { 3327 temp |= LS_64(cqp->dcqcn_params.dcqcn_t, IRDMA_CQPHC_DCQCN_T) | 3328 LS_64(cqp->dcqcn_params.rai_factor, IRDMA_CQPHC_RAI_FACTOR) | 3329 LS_64(cqp->dcqcn_params.hai_factor, IRDMA_CQPHC_HAI_FACTOR); 3330 } 3331 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_48, temp); 3332 temp = 0; 3333 if (hw_rev >= IRDMA_GEN_2) { 3334 temp |= LS_64(cqp->dcqcn_params.dcqcn_b, IRDMA_CQPHC_DCQCN_B) | 3335 LS_64(cqp->dcqcn_params.dcqcn_f, IRDMA_CQPHC_DCQCN_F) | 3336 LS_64(cqp->dcqcn_params.cc_cfg_valid, IRDMA_CQPHC_CC_CFG_VALID) | 3337 LS_64(cqp->dcqcn_params.rreduce_mperiod, IRDMA_CQPHC_RREDUCE_MPERIOD); 3338 } 3339 set_64bit_val(cqp->host_ctx, IRDMA_BYTE_56, temp); 3340 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CQP_HOST_CTX WQE", 3341 cqp->host_ctx, IRDMA_CQP_CTX_SIZE * 8); 3342 p1 = RS_32_1(cqp->host_ctx_pa, 32); 3343 p2 = (u32)cqp->host_ctx_pa; 3344 3345 writel(p1, cqp->dev->hw_regs[IRDMA_CCQPHIGH]); 3346 writel(p2, cqp->dev->hw_regs[IRDMA_CCQPLOW]); 3347 3348 do { 3349 if (cnt++ > cqp->dev->hw_attrs.max_done_count) { 3350 ret_code = -ETIMEDOUT; 3351 goto err; 3352 } 3353 irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count); 3354 val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]); 3355 } while (!val); 3356 3357 if (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_ERR)) { 3358 ret_code = -EOPNOTSUPP; 3359 goto err; 3360 } 3361 3362 cqp->process_cqp_sds = irdma_update_sds_noccq; 3363 return 0; 3364 3365 err: 3366 spin_lock_destroy(&cqp->dev->cqp_lock); 3367 irdma_free_dma_mem(cqp->dev->hw, &cqp->sdbuf); 3368 err_code = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]); 3369 *min_err = RS_32(err_code, IRDMA_CQPERRCODES_CQP_MINOR_CODE); 3370 *maj_err = RS_32(err_code, IRDMA_CQPERRCODES_CQP_MAJOR_CODE); 3371 return ret_code; 3372 } 3373 3374 /** 3375 * irdma_sc_cqp_post_sq - post of cqp's sq 3376 * @cqp: struct for cqp hw 3377 */ 3378 void 3379 irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp) 3380 { 3381 db_wr32(IRDMA_RING_CURRENT_HEAD(cqp->sq_ring), cqp->dev->cqp_db); 3382 3383 irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, 3384 "CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head, 3385 cqp->sq_ring.tail, cqp->sq_ring.size); 3386 } 3387 3388 /** 3389 * irdma_sc_cqp_get_next_send_wqe_idx - get next wqe on cqp sq 3390 * and pass back index 3391 * @cqp: CQP HW structure 3392 * @scratch: private data for CQP WQE 3393 * @wqe_idx: WQE index of CQP SQ 3394 */ 3395 __le64 * 3396 irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch, 3397 u32 *wqe_idx) 3398 { 3399 __le64 *wqe = NULL; 3400 int ret_code; 3401 3402 if (IRDMA_RING_FULL_ERR(cqp->sq_ring)) { 3403 irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, 3404 "CQP SQ is full, head 0x%x tail 0x%x size 0x%x\n", 3405 cqp->sq_ring.head, cqp->sq_ring.tail, 3406 cqp->sq_ring.size); 3407 return NULL; 3408 } 3409 IRDMA_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code); 3410 if (ret_code) 3411 return NULL; 3412 3413 cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++; 3414 if (!*wqe_idx) 3415 cqp->polarity = !cqp->polarity; 3416 wqe = cqp->sq_base[*wqe_idx].elem; 3417 cqp->scratch_array[*wqe_idx] = scratch; 3418 3419 memset(&wqe[0], 0, 24); 3420 memset(&wqe[4], 0, 32); 3421 3422 return wqe; 3423 } 3424 3425 /** 3426 * irdma_sc_cqp_destroy - destroy cqp during close 3427 * @cqp: struct for cqp hw 3428 */ 3429 int 3430 irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp) 3431 { 3432 u32 cnt = 0, val; 3433 int ret_code = 0; 3434 3435 writel(0, cqp->dev->hw_regs[IRDMA_CCQPHIGH]); 3436 writel(0, cqp->dev->hw_regs[IRDMA_CCQPLOW]); 3437 do { 3438 if (cnt++ > cqp->dev->hw_attrs.max_done_count) { 3439 ret_code = -ETIMEDOUT; 3440 break; 3441 } 3442 irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count); 3443 val = readl(cqp->dev->hw_regs[IRDMA_CCQPSTATUS]); 3444 } while (FLD_RS_32(cqp->dev, val, IRDMA_CCQPSTATUS_CCQP_DONE)); 3445 3446 irdma_free_dma_mem(cqp->dev->hw, &cqp->sdbuf); 3447 spin_lock_destroy(&cqp->dev->cqp_lock); 3448 return ret_code; 3449 } 3450 3451 /** 3452 * irdma_sc_ccq_arm - enable intr for control cq 3453 * @ccq: ccq sc struct 3454 */ 3455 void 3456 irdma_sc_ccq_arm(struct irdma_sc_cq *ccq) 3457 { 3458 u64 temp_val; 3459 u16 sw_cq_sel; 3460 u8 arm_next_se; 3461 u8 arm_seq_num; 3462 3463 get_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, &temp_val); 3464 sw_cq_sel = (u16)RS_64(temp_val, IRDMA_CQ_DBSA_SW_CQ_SELECT); 3465 arm_next_se = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_NEXT_SE); 3466 arm_seq_num = (u8)RS_64(temp_val, IRDMA_CQ_DBSA_ARM_SEQ_NUM); 3467 arm_seq_num++; 3468 temp_val = LS_64(arm_seq_num, IRDMA_CQ_DBSA_ARM_SEQ_NUM) | 3469 LS_64(sw_cq_sel, IRDMA_CQ_DBSA_SW_CQ_SELECT) | 3470 LS_64(arm_next_se, IRDMA_CQ_DBSA_ARM_NEXT_SE) | 3471 LS_64(1, IRDMA_CQ_DBSA_ARM_NEXT); 3472 set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, temp_val); 3473 3474 irdma_wmb(); /* make sure shadow area is updated before arming */ 3475 3476 db_wr32(ccq->cq_uk.cq_id, ccq->dev->cq_arm_db); 3477 } 3478 3479 /** 3480 * irdma_sc_ccq_get_cqe_info - get ccq's cq entry 3481 * @ccq: ccq sc struct 3482 * @info: completion q entry to return 3483 */ 3484 int 3485 irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, 3486 struct irdma_ccq_cqe_info *info) 3487 { 3488 u64 qp_ctx, temp, temp1; 3489 __le64 *cqe; 3490 struct irdma_sc_cqp *cqp; 3491 u32 wqe_idx; 3492 u32 error; 3493 u8 polarity; 3494 int ret_code = 0; 3495 3496 if (ccq->cq_uk.avoid_mem_cflct) 3497 cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk); 3498 else 3499 cqe = IRDMA_GET_CURRENT_CQ_ELEM(&ccq->cq_uk); 3500 3501 get_64bit_val(cqe, IRDMA_BYTE_24, &temp); 3502 polarity = (u8)RS_64(temp, IRDMA_CQ_VALID); 3503 if (polarity != ccq->cq_uk.polarity) 3504 return -ENOENT; 3505 3506 get_64bit_val(cqe, IRDMA_BYTE_8, &qp_ctx); 3507 cqp = (struct irdma_sc_cqp *)(irdma_uintptr) qp_ctx; 3508 info->error = (bool)RS_64(temp, IRDMA_CQ_ERROR); 3509 info->maj_err_code = IRDMA_CQPSQ_MAJ_NO_ERROR; 3510 info->min_err_code = (u16)RS_64(temp, IRDMA_CQ_MINERR); 3511 if (info->error) { 3512 info->maj_err_code = (u16)RS_64(temp, IRDMA_CQ_MAJERR); 3513 error = readl(cqp->dev->hw_regs[IRDMA_CQPERRCODES]); 3514 irdma_debug(cqp->dev, IRDMA_DEBUG_CQP, 3515 "CQPERRCODES error_code[x%08X]\n", error); 3516 } 3517 3518 wqe_idx = (u32)RS_64(temp, IRDMA_CQ_WQEIDX); 3519 info->scratch = cqp->scratch_array[wqe_idx]; 3520 3521 get_64bit_val(cqe, IRDMA_BYTE_16, &temp1); 3522 info->op_ret_val = (u32)RS_64(temp1, IRDMA_CCQ_OPRETVAL); 3523 get_64bit_val(cqp->sq_base[wqe_idx].elem, IRDMA_BYTE_24, &temp1); 3524 info->op_code = (u8)RS_64(temp1, IRDMA_CQPSQ_OPCODE); 3525 info->cqp = cqp; 3526 3527 /* move the head for cq */ 3528 IRDMA_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code); 3529 if (!IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring)) 3530 ccq->cq_uk.polarity ^= 1; 3531 3532 /* update cq tail in cq shadow memory also */ 3533 IRDMA_RING_MOVE_TAIL(ccq->cq_uk.cq_ring); 3534 set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_0, 3535 IRDMA_RING_CURRENT_HEAD(ccq->cq_uk.cq_ring)); 3536 3537 irdma_wmb(); /* make sure shadow area is updated before moving tail */ 3538 3539 IRDMA_RING_MOVE_TAIL(cqp->sq_ring); 3540 ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++; 3541 3542 return ret_code; 3543 } 3544 3545 /** 3546 * irdma_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ 3547 * @cqp: struct for cqp hw 3548 * @op_code: cqp opcode for completion 3549 * @compl_info: completion q entry to return 3550 */ 3551 int 3552 irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 op_code, 3553 struct irdma_ccq_cqe_info *compl_info) 3554 { 3555 struct irdma_ccq_cqe_info info = {0}; 3556 struct irdma_sc_cq *ccq; 3557 int ret_code = 0; 3558 u32 cnt = 0; 3559 3560 ccq = cqp->dev->ccq; 3561 while (1) { 3562 if (cnt++ > 100 * cqp->dev->hw_attrs.max_done_count) 3563 return -ETIMEDOUT; 3564 3565 if (cqp->dev->no_cqp) 3566 return -ETIMEDOUT; 3567 3568 if (irdma_sc_ccq_get_cqe_info(ccq, &info)) { 3569 irdma_usec_delay(cqp->dev->hw_attrs.max_sleep_count); 3570 continue; 3571 } 3572 if (info.error && info.op_code != IRDMA_CQP_OP_QUERY_STAG) { 3573 ret_code = -EIO; 3574 break; 3575 } 3576 /* make sure op code matches */ 3577 if (op_code == info.op_code) 3578 break; 3579 irdma_debug(cqp->dev, IRDMA_DEBUG_WQE, 3580 "opcode mismatch for my op code 0x%x, returned opcode %x\n", 3581 op_code, info.op_code); 3582 } 3583 3584 if (compl_info) 3585 irdma_memcpy(compl_info, &info, sizeof(*compl_info)); 3586 3587 return ret_code; 3588 } 3589 3590 /** 3591 * irdma_sc_manage_hmc_pm_func_table - manage of function table 3592 * @cqp: struct for cqp hw 3593 * @scratch: u64 saved to be used during cqp completion 3594 * @info: info for the manage function table operation 3595 * @post_sq: flag for cqp db to ring 3596 */ 3597 static int 3598 irdma_sc_manage_hmc_pm_func_table(struct irdma_sc_cqp *cqp, 3599 struct irdma_hmc_fcn_info *info, 3600 u64 scratch, bool post_sq) 3601 { 3602 __le64 *wqe; 3603 u64 hdr; 3604 3605 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3606 if (!wqe) 3607 return -ENOSPC; 3608 3609 hdr = LS_64(info->vf_id, IRDMA_CQPSQ_MHMC_VFIDX) | 3610 LS_64(IRDMA_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, 3611 IRDMA_CQPSQ_OPCODE) | 3612 LS_64(info->free_fcn, IRDMA_CQPSQ_MHMC_FREEPMFN) | 3613 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 3614 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3615 3616 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3617 3618 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, 3619 "MANAGE_HMC_PM_FUNC_TABLE WQE", wqe, 3620 IRDMA_CQP_WQE_SIZE * 8); 3621 if (post_sq) 3622 irdma_sc_cqp_post_sq(cqp); 3623 3624 return 0; 3625 } 3626 3627 /** 3628 * irdma_sc_commit_fpm_val_done - wait for cqp eqe completion 3629 * for fpm commit 3630 * @cqp: struct for cqp hw 3631 */ 3632 static int 3633 irdma_sc_commit_fpm_val_done(struct irdma_sc_cqp *cqp) 3634 { 3635 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_COMMIT_FPM_VAL, 3636 NULL); 3637 } 3638 3639 /** 3640 * irdma_sc_commit_fpm_val - cqp wqe for commit fpm values 3641 * @cqp: struct for cqp hw 3642 * @scratch: u64 saved to be used during cqp completion 3643 * @hmc_fn_id: hmc function id 3644 * @commit_fpm_mem: Memory for fpm values 3645 * @post_sq: flag for cqp db to ring 3646 * @wait_type: poll ccq or cqp registers for cqp completion 3647 */ 3648 static int 3649 irdma_sc_commit_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, 3650 u8 hmc_fn_id, 3651 struct irdma_dma_mem *commit_fpm_mem, 3652 bool post_sq, u8 wait_type) 3653 { 3654 __le64 *wqe; 3655 u64 hdr; 3656 u32 tail, val, error; 3657 int ret_code = 0; 3658 3659 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3660 if (!wqe) 3661 return -ENOSPC; 3662 3663 set_64bit_val(wqe, IRDMA_BYTE_16, hmc_fn_id); 3664 set_64bit_val(wqe, IRDMA_BYTE_32, commit_fpm_mem->pa); 3665 3666 hdr = LS_64(IRDMA_COMMIT_FPM_BUF_SIZE, IRDMA_CQPSQ_BUFSIZE) | 3667 LS_64(IRDMA_CQP_OP_COMMIT_FPM_VAL, IRDMA_CQPSQ_OPCODE) | 3668 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 3669 3670 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3671 3672 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3673 3674 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "COMMIT_FPM_VAL WQE", wqe, 3675 IRDMA_CQP_WQE_SIZE * 8); 3676 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 3677 3678 if (post_sq) { 3679 irdma_sc_cqp_post_sq(cqp); 3680 if (wait_type == IRDMA_CQP_WAIT_POLL_REGS) 3681 ret_code = irdma_cqp_poll_registers(cqp, tail, 3682 cqp->dev->hw_attrs.max_done_count); 3683 else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ) 3684 ret_code = irdma_sc_commit_fpm_val_done(cqp); 3685 } 3686 3687 return ret_code; 3688 } 3689 3690 /** 3691 * irdma_sc_query_fpm_val_done - poll for cqp wqe completion for 3692 * query fpm 3693 * @cqp: struct for cqp hw 3694 */ 3695 static int 3696 irdma_sc_query_fpm_val_done(struct irdma_sc_cqp *cqp) 3697 { 3698 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_QUERY_FPM_VAL, 3699 NULL); 3700 } 3701 3702 /** 3703 * irdma_sc_query_fpm_val - cqp wqe query fpm values 3704 * @cqp: struct for cqp hw 3705 * @scratch: u64 saved to be used during cqp completion 3706 * @hmc_fn_id: hmc function id 3707 * @query_fpm_mem: memory for return fpm values 3708 * @post_sq: flag for cqp db to ring 3709 * @wait_type: poll ccq or cqp registers for cqp completion 3710 */ 3711 static int 3712 irdma_sc_query_fpm_val(struct irdma_sc_cqp *cqp, u64 scratch, 3713 u8 hmc_fn_id, 3714 struct irdma_dma_mem *query_fpm_mem, 3715 bool post_sq, u8 wait_type) 3716 { 3717 __le64 *wqe; 3718 u64 hdr; 3719 u32 tail, val, error; 3720 int ret_code = 0; 3721 3722 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3723 if (!wqe) 3724 return -ENOSPC; 3725 3726 set_64bit_val(wqe, IRDMA_BYTE_16, hmc_fn_id); 3727 set_64bit_val(wqe, IRDMA_BYTE_32, query_fpm_mem->pa); 3728 3729 hdr = LS_64(IRDMA_CQP_OP_QUERY_FPM_VAL, IRDMA_CQPSQ_OPCODE) | 3730 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 3731 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3732 3733 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3734 3735 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QUERY_FPM WQE", wqe, 3736 IRDMA_CQP_WQE_SIZE * 8); 3737 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 3738 3739 if (post_sq) { 3740 irdma_sc_cqp_post_sq(cqp); 3741 if (wait_type == IRDMA_CQP_WAIT_POLL_REGS) 3742 ret_code = irdma_cqp_poll_registers(cqp, tail, 3743 cqp->dev->hw_attrs.max_done_count); 3744 else if (wait_type == IRDMA_CQP_WAIT_POLL_CQ) 3745 ret_code = irdma_sc_query_fpm_val_done(cqp); 3746 } 3747 3748 return ret_code; 3749 } 3750 3751 /** 3752 * irdma_sc_ceq_init - initialize ceq 3753 * @ceq: ceq sc structure 3754 * @info: ceq initialization info 3755 */ 3756 int 3757 irdma_sc_ceq_init(struct irdma_sc_ceq *ceq, 3758 struct irdma_ceq_init_info *info) 3759 { 3760 u32 pble_obj_cnt; 3761 3762 if (info->elem_cnt < info->dev->hw_attrs.min_hw_ceq_size || 3763 info->elem_cnt > info->dev->hw_attrs.max_hw_ceq_size) 3764 return -EINVAL; 3765 3766 if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1)) 3767 return -EINVAL; 3768 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 3769 3770 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) 3771 return -EINVAL; 3772 3773 ceq->size = sizeof(*ceq); 3774 ceq->ceqe_base = (struct irdma_ceqe *)info->ceqe_base; 3775 ceq->ceq_id = info->ceq_id; 3776 ceq->dev = info->dev; 3777 ceq->elem_cnt = info->elem_cnt; 3778 ceq->ceq_elem_pa = info->ceqe_pa; 3779 ceq->virtual_map = info->virtual_map; 3780 ceq->itr_no_expire = info->itr_no_expire; 3781 ceq->reg_cq = info->reg_cq; 3782 ceq->reg_cq_size = 0; 3783 spin_lock_init(&ceq->req_cq_lock); 3784 ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0); 3785 ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0); 3786 ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL); 3787 ceq->tph_en = info->tph_en; 3788 ceq->tph_val = info->tph_val; 3789 ceq->vsi = info->vsi; 3790 ceq->polarity = 1; 3791 IRDMA_RING_INIT(ceq->ceq_ring, ceq->elem_cnt); 3792 ceq->dev->ceq[info->ceq_id] = ceq; 3793 3794 return 0; 3795 } 3796 3797 /** 3798 * irdma_sc_ceq_create - create ceq wqe 3799 * @ceq: ceq sc structure 3800 * @scratch: u64 saved to be used during cqp completion 3801 * @post_sq: flag for cqp db to ring 3802 */ 3803 3804 static int 3805 irdma_sc_ceq_create(struct irdma_sc_ceq *ceq, u64 scratch, 3806 bool post_sq) 3807 { 3808 struct irdma_sc_cqp *cqp; 3809 __le64 *wqe; 3810 u64 hdr; 3811 3812 cqp = ceq->dev->cqp; 3813 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3814 if (!wqe) 3815 return -ENOSPC; 3816 set_64bit_val(wqe, IRDMA_BYTE_16, ceq->elem_cnt); 3817 set_64bit_val(wqe, IRDMA_BYTE_32, 3818 (ceq->virtual_map ? 0 : ceq->ceq_elem_pa)); 3819 set_64bit_val(wqe, IRDMA_BYTE_48, 3820 (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0)); 3821 set_64bit_val(wqe, IRDMA_BYTE_56, 3822 LS_64(ceq->tph_val, IRDMA_CQPSQ_TPHVAL) | 3823 LS_64(ceq->vsi->vsi_idx, IRDMA_CQPSQ_VSIIDX)); 3824 hdr = LS_64(ceq->ceq_id, IRDMA_CQPSQ_CEQ_CEQID) | 3825 LS_64(IRDMA_CQP_OP_CREATE_CEQ, IRDMA_CQPSQ_OPCODE) | 3826 LS_64(ceq->pbl_chunk_size, IRDMA_CQPSQ_CEQ_LPBLSIZE) | 3827 LS_64(ceq->virtual_map, IRDMA_CQPSQ_CEQ_VMAP) | 3828 LS_64(ceq->itr_no_expire, IRDMA_CQPSQ_CEQ_ITRNOEXPIRE) | 3829 LS_64(ceq->tph_en, IRDMA_CQPSQ_TPHEN) | 3830 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 3831 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3832 3833 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3834 3835 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CEQ_CREATE WQE", wqe, 3836 IRDMA_CQP_WQE_SIZE * 8); 3837 if (post_sq) 3838 irdma_sc_cqp_post_sq(cqp); 3839 3840 return 0; 3841 } 3842 3843 /** 3844 * irdma_sc_cceq_create_done - poll for control ceq wqe to complete 3845 * @ceq: ceq sc structure 3846 */ 3847 static int 3848 irdma_sc_cceq_create_done(struct irdma_sc_ceq *ceq) 3849 { 3850 struct irdma_sc_cqp *cqp; 3851 3852 cqp = ceq->dev->cqp; 3853 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CEQ, 3854 NULL); 3855 } 3856 3857 /** 3858 * irdma_sc_cceq_destroy_done - poll for destroy cceq to complete 3859 * @ceq: ceq sc structure 3860 */ 3861 int 3862 irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq) 3863 { 3864 struct irdma_sc_cqp *cqp; 3865 3866 if (ceq->reg_cq) 3867 irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq); 3868 3869 cqp = ceq->dev->cqp; 3870 cqp->process_cqp_sds = irdma_update_sds_noccq; 3871 3872 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_DESTROY_CEQ, 3873 NULL); 3874 } 3875 3876 /** 3877 * irdma_sc_cceq_create - create cceq 3878 * @ceq: ceq sc structure 3879 * @scratch: u64 saved to be used during cqp completion 3880 */ 3881 int 3882 irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch) 3883 { 3884 int ret_code; 3885 struct irdma_sc_dev *dev = ceq->dev; 3886 3887 dev->ccq->vsi = ceq->vsi; 3888 if (ceq->reg_cq) { 3889 ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq); 3890 if (ret_code) 3891 return ret_code; 3892 } 3893 3894 ret_code = irdma_sc_ceq_create(ceq, scratch, true); 3895 if (!ret_code) 3896 return irdma_sc_cceq_create_done(ceq); 3897 3898 return ret_code; 3899 } 3900 3901 /** 3902 * irdma_sc_ceq_destroy - destroy ceq 3903 * @ceq: ceq sc structure 3904 * @scratch: u64 saved to be used during cqp completion 3905 * @post_sq: flag for cqp db to ring 3906 */ 3907 int 3908 irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq) 3909 { 3910 struct irdma_sc_cqp *cqp; 3911 __le64 *wqe; 3912 u64 hdr; 3913 3914 cqp = ceq->dev->cqp; 3915 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 3916 if (!wqe) 3917 return -ENOSPC; 3918 3919 set_64bit_val(wqe, IRDMA_BYTE_16, ceq->elem_cnt); 3920 set_64bit_val(wqe, IRDMA_BYTE_48, ceq->first_pm_pbl_idx); 3921 hdr = ceq->ceq_id | 3922 LS_64(IRDMA_CQP_OP_DESTROY_CEQ, IRDMA_CQPSQ_OPCODE) | 3923 LS_64(ceq->pbl_chunk_size, IRDMA_CQPSQ_CEQ_LPBLSIZE) | 3924 LS_64(ceq->virtual_map, IRDMA_CQPSQ_CEQ_VMAP) | 3925 LS_64(ceq->tph_en, IRDMA_CQPSQ_TPHEN) | 3926 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 3927 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 3928 3929 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 3930 3931 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CEQ_DESTROY WQE", wqe, 3932 IRDMA_CQP_WQE_SIZE * 8); 3933 ceq->dev->ceq[ceq->ceq_id] = NULL; 3934 if (post_sq) 3935 irdma_sc_cqp_post_sq(cqp); 3936 3937 return 0; 3938 } 3939 3940 /** 3941 * irdma_sc_process_ceq - process ceq 3942 * @dev: sc device struct 3943 * @ceq: ceq sc structure 3944 * 3945 * It is expected caller serializes this function with cleanup_ceqes() 3946 * because these functions manipulate the same ceq 3947 */ 3948 void * 3949 irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq) 3950 { 3951 u64 temp; 3952 __le64 *ceqe; 3953 struct irdma_sc_cq *cq = NULL; 3954 struct irdma_sc_cq *temp_cq; 3955 u8 polarity; 3956 u32 cq_idx; 3957 unsigned long flags; 3958 3959 do { 3960 cq_idx = 0; 3961 ceqe = IRDMA_GET_CURRENT_CEQ_ELEM(ceq); 3962 get_64bit_val(ceqe, IRDMA_BYTE_0, &temp); 3963 polarity = (u8)RS_64(temp, IRDMA_CEQE_VALID); 3964 if (polarity != ceq->polarity) 3965 return NULL; 3966 3967 temp_cq = (struct irdma_sc_cq *)(irdma_uintptr) LS_64_1(temp, 1); 3968 if (!temp_cq) { 3969 cq_idx = IRDMA_INVALID_CQ_IDX; 3970 IRDMA_RING_MOVE_TAIL(ceq->ceq_ring); 3971 3972 if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring)) 3973 ceq->polarity ^= 1; 3974 continue; 3975 } 3976 3977 cq = temp_cq; 3978 if (ceq->reg_cq) { 3979 spin_lock_irqsave(&ceq->req_cq_lock, flags); 3980 cq_idx = irdma_sc_find_reg_cq(ceq, cq); 3981 spin_unlock_irqrestore(&ceq->req_cq_lock, flags); 3982 } 3983 3984 IRDMA_RING_MOVE_TAIL(ceq->ceq_ring); 3985 if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring)) 3986 ceq->polarity ^= 1; 3987 } while (cq_idx == IRDMA_INVALID_CQ_IDX); 3988 3989 if (cq) { 3990 cq->cq_uk.armed = false; 3991 irdma_sc_cq_ack(cq); 3992 } 3993 return cq; 3994 } 3995 3996 /** 3997 * irdma_sc_cleanup_ceqes - clear the valid ceqes ctx matching the cq 3998 * @cq: cq for which the ceqes need to be cleaned up 3999 * @ceq: ceq ptr 4000 * 4001 * The function is called after the cq is destroyed to cleanup 4002 * its pending ceqe entries. It is expected caller serializes this 4003 * function with process_ceq() in interrupt context. 4004 */ 4005 void 4006 irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq) 4007 { 4008 struct irdma_sc_cq *next_cq; 4009 u8 ceq_polarity = ceq->polarity; 4010 __le64 *ceqe; 4011 u8 polarity; 4012 u64 temp; 4013 int next; 4014 u32 i; 4015 4016 next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, 0); 4017 4018 for (i = 1; i <= IRDMA_RING_SIZE(*ceq); i++) { 4019 ceqe = IRDMA_GET_CEQ_ELEM_AT_POS(ceq, next); 4020 4021 get_64bit_val(ceqe, IRDMA_BYTE_0, &temp); 4022 polarity = (u8)RS_64(temp, IRDMA_CEQE_VALID); 4023 if (polarity != ceq_polarity) 4024 return; 4025 4026 next_cq = (struct irdma_sc_cq *)(irdma_uintptr) LS_64_1(temp, 1); 4027 if (cq == next_cq) 4028 set_64bit_val(ceqe, IRDMA_BYTE_0, temp & IRDMA_CEQE_VALID_M); 4029 4030 next = IRDMA_RING_GET_NEXT_TAIL(ceq->ceq_ring, i); 4031 if (!next) 4032 ceq_polarity ^= 1; 4033 } 4034 } 4035 4036 /** 4037 * irdma_sc_aeq_init - initialize aeq 4038 * @aeq: aeq structure ptr 4039 * @info: aeq initialization info 4040 */ 4041 int 4042 irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, 4043 struct irdma_aeq_init_info *info) 4044 { 4045 u32 pble_obj_cnt; 4046 4047 if (info->elem_cnt < info->dev->hw_attrs.min_hw_aeq_size || 4048 info->elem_cnt > info->dev->hw_attrs.max_hw_aeq_size) 4049 return -EINVAL; 4050 4051 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 4052 4053 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) 4054 return -EINVAL; 4055 4056 aeq->size = sizeof(*aeq); 4057 aeq->polarity = 1; 4058 aeq->aeqe_base = (struct irdma_sc_aeqe *)info->aeqe_base; 4059 aeq->dev = info->dev; 4060 aeq->elem_cnt = info->elem_cnt; 4061 aeq->aeq_elem_pa = info->aeq_elem_pa; 4062 IRDMA_RING_INIT(aeq->aeq_ring, aeq->elem_cnt); 4063 aeq->virtual_map = info->virtual_map; 4064 aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL); 4065 aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0); 4066 aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0); 4067 aeq->msix_idx = info->msix_idx; 4068 info->dev->aeq = aeq; 4069 4070 return 0; 4071 } 4072 4073 /** 4074 * irdma_sc_aeq_create - create aeq 4075 * @aeq: aeq structure ptr 4076 * @scratch: u64 saved to be used during cqp completion 4077 * @post_sq: flag for cqp db to ring 4078 */ 4079 static int 4080 irdma_sc_aeq_create(struct irdma_sc_aeq *aeq, u64 scratch, 4081 bool post_sq) 4082 { 4083 __le64 *wqe; 4084 struct irdma_sc_cqp *cqp; 4085 u64 hdr; 4086 4087 cqp = aeq->dev->cqp; 4088 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4089 if (!wqe) 4090 return -ENOSPC; 4091 set_64bit_val(wqe, IRDMA_BYTE_16, aeq->elem_cnt); 4092 set_64bit_val(wqe, IRDMA_BYTE_32, 4093 (aeq->virtual_map ? 0 : aeq->aeq_elem_pa)); 4094 set_64bit_val(wqe, IRDMA_BYTE_48, 4095 (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0)); 4096 4097 hdr = LS_64(IRDMA_CQP_OP_CREATE_AEQ, IRDMA_CQPSQ_OPCODE) | 4098 LS_64(aeq->pbl_chunk_size, IRDMA_CQPSQ_AEQ_LPBLSIZE) | 4099 LS_64(aeq->virtual_map, IRDMA_CQPSQ_AEQ_VMAP) | 4100 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 4101 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4102 4103 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4104 4105 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "AEQ_CREATE WQE", wqe, 4106 IRDMA_CQP_WQE_SIZE * 8); 4107 if (post_sq) 4108 irdma_sc_cqp_post_sq(cqp); 4109 4110 return 0; 4111 } 4112 4113 /** 4114 * irdma_sc_aeq_destroy - destroy aeq during close 4115 * @aeq: aeq structure ptr 4116 * @scratch: u64 saved to be used during cqp completion 4117 * @post_sq: flag for cqp db to ring 4118 */ 4119 static int 4120 irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, 4121 bool post_sq) 4122 { 4123 __le64 *wqe; 4124 struct irdma_sc_cqp *cqp; 4125 struct irdma_sc_dev *dev; 4126 u64 hdr; 4127 4128 dev = aeq->dev; 4129 writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]); 4130 4131 cqp = dev->cqp; 4132 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4133 if (!wqe) 4134 return -ENOSPC; 4135 set_64bit_val(wqe, IRDMA_BYTE_16, aeq->elem_cnt); 4136 set_64bit_val(wqe, IRDMA_BYTE_48, aeq->first_pm_pbl_idx); 4137 hdr = LS_64(IRDMA_CQP_OP_DESTROY_AEQ, IRDMA_CQPSQ_OPCODE) | 4138 LS_64(aeq->pbl_chunk_size, IRDMA_CQPSQ_AEQ_LPBLSIZE) | 4139 LS_64(aeq->virtual_map, IRDMA_CQPSQ_AEQ_VMAP) | 4140 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 4141 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4142 4143 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4144 4145 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "AEQ_DESTROY WQE", wqe, 4146 IRDMA_CQP_WQE_SIZE * 8); 4147 if (post_sq) 4148 irdma_sc_cqp_post_sq(cqp); 4149 return 0; 4150 } 4151 4152 /** 4153 * irdma_sc_get_next_aeqe - get next aeq entry 4154 * @aeq: aeq structure ptr 4155 * @info: aeqe info to be returned 4156 */ 4157 int 4158 irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, 4159 struct irdma_aeqe_info *info) 4160 { 4161 u64 temp, compl_ctx; 4162 __le64 *aeqe; 4163 u16 wqe_idx; 4164 u8 ae_src; 4165 u8 polarity; 4166 4167 aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq); 4168 get_64bit_val(aeqe, IRDMA_BYTE_0, &compl_ctx); 4169 get_64bit_val(aeqe, IRDMA_BYTE_8, &temp); 4170 polarity = (u8)RS_64(temp, IRDMA_AEQE_VALID); 4171 4172 if (aeq->polarity != polarity) 4173 return -ENOENT; 4174 4175 irdma_debug_buf(aeq->dev, IRDMA_DEBUG_WQE, "AEQ_ENTRY WQE", aeqe, 16); 4176 4177 ae_src = (u8)RS_64(temp, IRDMA_AEQE_AESRC); 4178 wqe_idx = (u16)RS_64(temp, IRDMA_AEQE_WQDESCIDX); 4179 info->qp_cq_id = (u32)RS_64(temp, IRDMA_AEQE_QPCQID_LOW) | 4180 ((u32)RS_64(temp, IRDMA_AEQE_QPCQID_HI) << 18); 4181 info->ae_id = (u16)RS_64(temp, IRDMA_AEQE_AECODE); 4182 info->tcp_state = (u8)RS_64(temp, IRDMA_AEQE_TCPSTATE); 4183 info->iwarp_state = (u8)RS_64(temp, IRDMA_AEQE_IWSTATE); 4184 info->q2_data_written = (u8)RS_64(temp, IRDMA_AEQE_Q2DATA); 4185 info->aeqe_overflow = (bool)RS_64(temp, IRDMA_AEQE_OVERFLOW); 4186 4187 info->ae_src = ae_src; 4188 switch (info->ae_id) { 4189 case IRDMA_AE_PRIV_OPERATION_DENIED: 4190 case IRDMA_AE_AMP_INVALIDATE_TYPE1_MW: 4191 case IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW: 4192 case IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG: 4193 case IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH: 4194 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: 4195 case IRDMA_AE_UDA_XMIT_BAD_PD: 4196 case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT: 4197 case IRDMA_AE_BAD_CLOSE: 4198 case IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO: 4199 case IRDMA_AE_STAG_ZERO_INVALID: 4200 case IRDMA_AE_IB_RREQ_AND_Q1_FULL: 4201 case IRDMA_AE_IB_INVALID_REQUEST: 4202 case IRDMA_AE_WQE_UNEXPECTED_OPCODE: 4203 case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: 4204 case IRDMA_AE_IB_REMOTE_OP_ERROR: 4205 case IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION: 4206 case IRDMA_AE_DDP_UBE_INVALID_MO: 4207 case IRDMA_AE_DDP_UBE_INVALID_QN: 4208 case IRDMA_AE_DDP_NO_L_BIT: 4209 case IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: 4210 case IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE: 4211 case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST: 4212 case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: 4213 case IRDMA_AE_ROCE_RSP_LENGTH_ERROR: 4214 case IRDMA_AE_INVALID_ARP_ENTRY: 4215 case IRDMA_AE_INVALID_TCP_OPTION_RCVD: 4216 case IRDMA_AE_STALE_ARP_ENTRY: 4217 case IRDMA_AE_INVALID_AH_ENTRY: 4218 case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: 4219 case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: 4220 case IRDMA_AE_LLP_TOO_MANY_RETRIES: 4221 case IRDMA_AE_LLP_DOUBT_REACHABILITY: 4222 case IRDMA_AE_LLP_CONNECTION_ESTABLISHED: 4223 case IRDMA_AE_RESET_SENT: 4224 case IRDMA_AE_TERMINATE_SENT: 4225 case IRDMA_AE_RESET_NOT_SENT: 4226 case IRDMA_AE_LCE_QP_CATASTROPHIC: 4227 case IRDMA_AE_QP_SUSPEND_COMPLETE: 4228 case IRDMA_AE_UDA_L4LEN_INVALID: 4229 info->qp = true; 4230 info->compl_ctx = compl_ctx; 4231 break; 4232 case IRDMA_AE_LCE_CQ_CATASTROPHIC: 4233 info->cq = true; 4234 info->compl_ctx = LS_64_1(compl_ctx, 1); 4235 ae_src = IRDMA_AE_SOURCE_RSVD; 4236 break; 4237 case IRDMA_AE_ROCE_EMPTY_MCG: 4238 case IRDMA_AE_ROCE_BAD_MC_IP_ADDR: 4239 case IRDMA_AE_ROCE_BAD_MC_QPID: 4240 case IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH: 4241 /* fallthrough */ 4242 case IRDMA_AE_LLP_CONNECTION_RESET: 4243 case IRDMA_AE_LLP_SYN_RECEIVED: 4244 case IRDMA_AE_LLP_FIN_RECEIVED: 4245 case IRDMA_AE_LLP_CLOSE_COMPLETE: 4246 case IRDMA_AE_LLP_TERMINATE_RECEIVED: 4247 case IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE: 4248 ae_src = IRDMA_AE_SOURCE_RSVD; 4249 info->qp = true; 4250 info->compl_ctx = compl_ctx; 4251 break; 4252 default: 4253 break; 4254 } 4255 4256 switch (ae_src) { 4257 case IRDMA_AE_SOURCE_RQ: 4258 case IRDMA_AE_SOURCE_RQ_0011: 4259 info->qp = true; 4260 info->rq = true; 4261 info->wqe_idx = wqe_idx; 4262 info->compl_ctx = compl_ctx; 4263 break; 4264 case IRDMA_AE_SOURCE_CQ: 4265 case IRDMA_AE_SOURCE_CQ_0110: 4266 case IRDMA_AE_SOURCE_CQ_1010: 4267 case IRDMA_AE_SOURCE_CQ_1110: 4268 info->cq = true; 4269 info->compl_ctx = LS_64_1(compl_ctx, 1); 4270 break; 4271 case IRDMA_AE_SOURCE_SQ: 4272 case IRDMA_AE_SOURCE_SQ_0111: 4273 info->qp = true; 4274 info->sq = true; 4275 info->wqe_idx = wqe_idx; 4276 info->compl_ctx = compl_ctx; 4277 break; 4278 case IRDMA_AE_SOURCE_IN_WR: 4279 case IRDMA_AE_SOURCE_IN_RR: 4280 info->qp = true; 4281 info->compl_ctx = compl_ctx; 4282 info->in_rdrsp_wr = true; 4283 break; 4284 case IRDMA_AE_SOURCE_OUT_RR: 4285 case IRDMA_AE_SOURCE_OUT_RR_1111: 4286 info->qp = true; 4287 info->compl_ctx = compl_ctx; 4288 info->out_rdrsp = true; 4289 break; 4290 case IRDMA_AE_SOURCE_RSVD: 4291 default: 4292 break; 4293 } 4294 4295 IRDMA_RING_MOVE_TAIL(aeq->aeq_ring); 4296 if (!IRDMA_RING_CURRENT_TAIL(aeq->aeq_ring)) 4297 aeq->polarity ^= 1; 4298 4299 return 0; 4300 } 4301 4302 /** 4303 * irdma_sc_repost_aeq_entries - repost completed aeq entries 4304 * @dev: sc device struct 4305 * @count: allocate count 4306 */ 4307 int 4308 irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count) 4309 { 4310 writel(count, dev->hw_regs[IRDMA_AEQALLOC]); 4311 4312 return 0; 4313 } 4314 4315 /** 4316 * irdma_sc_ccq_init - initialize control cq 4317 * @cq: sc's cq ctruct 4318 * @info: info for control cq initialization 4319 */ 4320 int 4321 irdma_sc_ccq_init(struct irdma_sc_cq *cq, struct irdma_ccq_init_info *info) 4322 { 4323 u32 pble_obj_cnt; 4324 4325 if (info->num_elem < info->dev->hw_attrs.uk_attrs.min_hw_cq_size || 4326 info->num_elem > info->dev->hw_attrs.uk_attrs.max_hw_cq_size) 4327 return -EINVAL; 4328 4329 if (info->ceq_id > (info->dev->hmc_fpm_misc.max_ceqs - 1)) 4330 return -EINVAL; 4331 4332 pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; 4333 4334 if (info->virtual_map && info->first_pm_pbl_idx >= pble_obj_cnt) 4335 return -EINVAL; 4336 4337 cq->cq_pa = info->cq_pa; 4338 cq->cq_uk.cq_base = info->cq_base; 4339 cq->shadow_area_pa = info->shadow_area_pa; 4340 cq->cq_uk.shadow_area = info->shadow_area; 4341 cq->shadow_read_threshold = info->shadow_read_threshold; 4342 cq->dev = info->dev; 4343 cq->ceq_id = info->ceq_id; 4344 cq->cq_uk.cq_size = info->num_elem; 4345 cq->cq_type = IRDMA_CQ_TYPE_CQP; 4346 cq->ceqe_mask = info->ceqe_mask; 4347 IRDMA_RING_INIT(cq->cq_uk.cq_ring, info->num_elem); 4348 cq->cq_uk.cq_id = 0; /* control cq is id 0 always */ 4349 cq->ceq_id_valid = info->ceq_id_valid; 4350 cq->tph_en = info->tph_en; 4351 cq->tph_val = info->tph_val; 4352 cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct; 4353 cq->pbl_list = info->pbl_list; 4354 cq->virtual_map = info->virtual_map; 4355 cq->pbl_chunk_size = info->pbl_chunk_size; 4356 cq->first_pm_pbl_idx = info->first_pm_pbl_idx; 4357 cq->cq_uk.polarity = true; 4358 cq->vsi = info->vsi; 4359 cq->cq_uk.cq_ack_db = cq->dev->cq_ack_db; 4360 4361 /* Only applicable to CQs other than CCQ so initialize to zero */ 4362 cq->cq_uk.cqe_alloc_db = NULL; 4363 4364 info->dev->ccq = cq; 4365 return 0; 4366 } 4367 4368 /** 4369 * irdma_sc_ccq_create_done - poll cqp for ccq create 4370 * @ccq: ccq sc struct 4371 */ 4372 static inline int 4373 irdma_sc_ccq_create_done(struct irdma_sc_cq *ccq) 4374 { 4375 struct irdma_sc_cqp *cqp; 4376 4377 cqp = ccq->dev->cqp; 4378 4379 return irdma_sc_poll_for_cqp_op_done(cqp, IRDMA_CQP_OP_CREATE_CQ, NULL); 4380 } 4381 4382 /** 4383 * irdma_sc_ccq_create - create control cq 4384 * @ccq: ccq sc struct 4385 * @scratch: u64 saved to be used during cqp completion 4386 * @check_overflow: overlow flag for ccq 4387 * @post_sq: flag for cqp db to ring 4388 */ 4389 int 4390 irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch, 4391 bool check_overflow, bool post_sq) 4392 { 4393 int ret_code; 4394 4395 ret_code = irdma_sc_cq_create(ccq, scratch, check_overflow, post_sq); 4396 if (ret_code) 4397 return ret_code; 4398 4399 if (post_sq) { 4400 ret_code = irdma_sc_ccq_create_done(ccq); 4401 if (ret_code) 4402 return ret_code; 4403 } 4404 ccq->dev->cqp->process_cqp_sds = irdma_cqp_sds_cmd; 4405 4406 return 0; 4407 } 4408 4409 /** 4410 * irdma_sc_ccq_destroy - destroy ccq during close 4411 * @ccq: ccq sc struct 4412 * @scratch: u64 saved to be used during cqp completion 4413 * @post_sq: flag for cqp db to ring 4414 */ 4415 int 4416 irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq) 4417 { 4418 struct irdma_sc_cqp *cqp; 4419 __le64 *wqe; 4420 u64 hdr; 4421 int ret_code = 0; 4422 u32 tail, val, error; 4423 4424 cqp = ccq->dev->cqp; 4425 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4426 if (!wqe) 4427 return -ENOSPC; 4428 4429 set_64bit_val(wqe, IRDMA_BYTE_0, ccq->cq_uk.cq_size); 4430 set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(ccq, 1)); 4431 set_64bit_val(wqe, IRDMA_BYTE_40, ccq->shadow_area_pa); 4432 4433 hdr = ccq->cq_uk.cq_id | 4434 FLD_LS_64(ccq->dev, (ccq->ceq_id_valid ? ccq->ceq_id : 0), 4435 IRDMA_CQPSQ_CQ_CEQID) | 4436 LS_64(IRDMA_CQP_OP_DESTROY_CQ, IRDMA_CQPSQ_OPCODE) | 4437 LS_64(ccq->ceqe_mask, IRDMA_CQPSQ_CQ_ENCEQEMASK) | 4438 LS_64(ccq->ceq_id_valid, IRDMA_CQPSQ_CQ_CEQIDVALID) | 4439 LS_64(ccq->tph_en, IRDMA_CQPSQ_TPHEN) | 4440 LS_64(ccq->cq_uk.avoid_mem_cflct, IRDMA_CQPSQ_CQ_AVOIDMEMCNFLCT) | 4441 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 4442 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4443 4444 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4445 4446 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "CCQ_DESTROY WQE", wqe, 4447 IRDMA_CQP_WQE_SIZE * 8); 4448 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 4449 4450 if (post_sq) { 4451 irdma_sc_cqp_post_sq(cqp); 4452 ret_code = irdma_cqp_poll_registers(cqp, tail, 4453 cqp->dev->hw_attrs.max_done_count); 4454 } 4455 4456 cqp->process_cqp_sds = irdma_update_sds_noccq; 4457 4458 return ret_code; 4459 } 4460 4461 /** 4462 * irdma_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info 4463 * @dev : ptr to irdma_dev struct 4464 * @hmc_fn_id: hmc function id 4465 */ 4466 int 4467 irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id) 4468 { 4469 struct irdma_hmc_info *hmc_info; 4470 struct irdma_hmc_fpm_misc *hmc_fpm_misc; 4471 struct irdma_dma_mem query_fpm_mem; 4472 int ret_code = 0; 4473 u8 wait_type; 4474 4475 hmc_info = dev->hmc_info; 4476 hmc_fpm_misc = &dev->hmc_fpm_misc; 4477 query_fpm_mem.pa = dev->fpm_query_buf_pa; 4478 query_fpm_mem.va = dev->fpm_query_buf; 4479 hmc_info->hmc_fn_id = hmc_fn_id; 4480 wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS; 4481 4482 ret_code = irdma_sc_query_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id, 4483 &query_fpm_mem, true, wait_type); 4484 if (ret_code) 4485 return ret_code; 4486 4487 /* parse the fpm_query_buf and fill hmc obj info */ 4488 ret_code = irdma_sc_parse_fpm_query_buf(dev, query_fpm_mem.va, hmc_info, 4489 hmc_fpm_misc); 4490 4491 irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "QUERY FPM BUFFER", 4492 query_fpm_mem.va, IRDMA_QUERY_FPM_BUF_SIZE); 4493 return ret_code; 4494 } 4495 4496 /** 4497 * irdma_sc_cfg_iw_fpm() - commits hmc obj cnt values using cqp 4498 * command and populates fpm base address in hmc_info 4499 * @dev : ptr to irdma_dev struct 4500 * @hmc_fn_id: hmc function id 4501 */ 4502 static int 4503 irdma_sc_cfg_iw_fpm(struct irdma_sc_dev *dev, u8 hmc_fn_id) 4504 { 4505 struct irdma_hmc_obj_info *obj_info; 4506 __le64 *buf; 4507 struct irdma_hmc_info *hmc_info; 4508 struct irdma_dma_mem commit_fpm_mem; 4509 int ret_code = 0; 4510 u8 wait_type; 4511 4512 hmc_info = dev->hmc_info; 4513 obj_info = hmc_info->hmc_obj; 4514 buf = dev->fpm_commit_buf; 4515 4516 set_64bit_val(buf, IRDMA_BYTE_0, (u64)obj_info[IRDMA_HMC_IW_QP].cnt); 4517 set_64bit_val(buf, IRDMA_BYTE_8, (u64)obj_info[IRDMA_HMC_IW_CQ].cnt); 4518 set_64bit_val(buf, IRDMA_BYTE_16, (u64)0); /* RSRVD */ 4519 set_64bit_val(buf, IRDMA_BYTE_24, (u64)obj_info[IRDMA_HMC_IW_HTE].cnt); 4520 set_64bit_val(buf, IRDMA_BYTE_32, (u64)obj_info[IRDMA_HMC_IW_ARP].cnt); 4521 set_64bit_val(buf, IRDMA_BYTE_40, (u64)0); /* RSVD */ 4522 set_64bit_val(buf, IRDMA_BYTE_48, (u64)obj_info[IRDMA_HMC_IW_MR].cnt); 4523 set_64bit_val(buf, IRDMA_BYTE_56, (u64)obj_info[IRDMA_HMC_IW_XF].cnt); 4524 set_64bit_val(buf, IRDMA_BYTE_64, (u64)obj_info[IRDMA_HMC_IW_XFFL].cnt); 4525 set_64bit_val(buf, IRDMA_BYTE_72, (u64)obj_info[IRDMA_HMC_IW_Q1].cnt); 4526 set_64bit_val(buf, IRDMA_BYTE_80, (u64)obj_info[IRDMA_HMC_IW_Q1FL].cnt); 4527 set_64bit_val(buf, IRDMA_BYTE_88, 4528 (u64)obj_info[IRDMA_HMC_IW_TIMER].cnt); 4529 set_64bit_val(buf, IRDMA_BYTE_96, 4530 (u64)obj_info[IRDMA_HMC_IW_FSIMC].cnt); 4531 set_64bit_val(buf, IRDMA_BYTE_104, 4532 (u64)obj_info[IRDMA_HMC_IW_FSIAV].cnt); 4533 set_64bit_val(buf, IRDMA_BYTE_112, 4534 (u64)obj_info[IRDMA_HMC_IW_PBLE].cnt); 4535 set_64bit_val(buf, IRDMA_BYTE_120, (u64)0); /* RSVD */ 4536 set_64bit_val(buf, IRDMA_BYTE_128, (u64)obj_info[IRDMA_HMC_IW_RRF].cnt); 4537 set_64bit_val(buf, IRDMA_BYTE_136, 4538 (u64)obj_info[IRDMA_HMC_IW_RRFFL].cnt); 4539 set_64bit_val(buf, IRDMA_BYTE_144, (u64)obj_info[IRDMA_HMC_IW_HDR].cnt); 4540 set_64bit_val(buf, IRDMA_BYTE_152, (u64)obj_info[IRDMA_HMC_IW_MD].cnt); 4541 set_64bit_val(buf, IRDMA_BYTE_160, 4542 (u64)obj_info[IRDMA_HMC_IW_OOISC].cnt); 4543 set_64bit_val(buf, IRDMA_BYTE_168, 4544 (u64)obj_info[IRDMA_HMC_IW_OOISCFFL].cnt); 4545 commit_fpm_mem.pa = dev->fpm_commit_buf_pa; 4546 commit_fpm_mem.va = dev->fpm_commit_buf; 4547 4548 wait_type = (u8)IRDMA_CQP_WAIT_POLL_REGS; 4549 irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "COMMIT FPM BUFFER", 4550 commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE); 4551 ret_code = irdma_sc_commit_fpm_val(dev->cqp, 0, hmc_info->hmc_fn_id, 4552 &commit_fpm_mem, true, wait_type); 4553 if (!ret_code) 4554 ret_code = irdma_sc_parse_fpm_commit_buf(dev, dev->fpm_commit_buf, 4555 hmc_info->hmc_obj, 4556 &hmc_info->sd_table.sd_cnt); 4557 irdma_debug_buf(dev, IRDMA_DEBUG_HMC, "COMMIT FPM BUFFER", 4558 commit_fpm_mem.va, IRDMA_COMMIT_FPM_BUF_SIZE); 4559 4560 return ret_code; 4561 } 4562 4563 /** 4564 * cqp_sds_wqe_fill - fill cqp wqe doe sd 4565 * @cqp: struct for cqp hw 4566 * @info: sd info for wqe 4567 * @scratch: u64 saved to be used during cqp completion 4568 */ 4569 static int 4570 cqp_sds_wqe_fill(struct irdma_sc_cqp *cqp, 4571 struct irdma_update_sds_info *info, u64 scratch) 4572 { 4573 u64 data; 4574 u64 hdr; 4575 __le64 *wqe; 4576 int mem_entries, wqe_entries; 4577 struct irdma_dma_mem *sdbuf = &cqp->sdbuf; 4578 u64 offset = 0; 4579 u32 wqe_idx; 4580 4581 wqe = irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); 4582 if (!wqe) 4583 return -ENOSPC; 4584 4585 wqe_entries = (info->cnt > 3) ? 3 : info->cnt; 4586 mem_entries = info->cnt - wqe_entries; 4587 4588 if (mem_entries) { 4589 offset = wqe_idx * IRDMA_UPDATE_SD_BUFF_SIZE; 4590 irdma_memcpy(((char *)sdbuf->va + offset), &info->entry[3], mem_entries << 4); 4591 4592 data = (u64)sdbuf->pa + offset; 4593 } else { 4594 data = 0; 4595 } 4596 data |= LS_64(info->hmc_fn_id, IRDMA_CQPSQ_UPESD_HMCFNID); 4597 set_64bit_val(wqe, IRDMA_BYTE_16, data); 4598 4599 switch (wqe_entries) { 4600 case 3: 4601 set_64bit_val(wqe, IRDMA_BYTE_48, 4602 (LS_64(info->entry[2].cmd, IRDMA_CQPSQ_UPESD_SDCMD) | 4603 LS_64(1, IRDMA_CQPSQ_UPESD_ENTRY_VALID))); 4604 4605 set_64bit_val(wqe, IRDMA_BYTE_56, info->entry[2].data); 4606 /* fallthrough */ 4607 case 2: 4608 set_64bit_val(wqe, IRDMA_BYTE_32, 4609 (LS_64(info->entry[1].cmd, IRDMA_CQPSQ_UPESD_SDCMD) | 4610 LS_64(1, IRDMA_CQPSQ_UPESD_ENTRY_VALID))); 4611 4612 set_64bit_val(wqe, IRDMA_BYTE_40, info->entry[1].data); 4613 /* fallthrough */ 4614 case 1: 4615 set_64bit_val(wqe, IRDMA_BYTE_0, 4616 LS_64(info->entry[0].cmd, IRDMA_CQPSQ_UPESD_SDCMD)); 4617 4618 set_64bit_val(wqe, IRDMA_BYTE_8, info->entry[0].data); 4619 break; 4620 default: 4621 break; 4622 } 4623 4624 hdr = LS_64(IRDMA_CQP_OP_UPDATE_PE_SDS, IRDMA_CQPSQ_OPCODE) | 4625 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID) | 4626 LS_64(mem_entries, IRDMA_CQPSQ_UPESD_ENTRY_COUNT); 4627 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4628 4629 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4630 4631 if (mem_entries) 4632 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPDATE_PE_SDS WQE Buffer", 4633 (char *)sdbuf->va + offset, mem_entries << 4); 4634 4635 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "UPDATE_PE_SDS WQE", wqe, 4636 IRDMA_CQP_WQE_SIZE * 8); 4637 4638 return 0; 4639 } 4640 4641 /** 4642 * irdma_update_pe_sds - cqp wqe for sd 4643 * @dev: ptr to irdma_dev struct 4644 * @info: sd info for sd's 4645 * @scratch: u64 saved to be used during cqp completion 4646 */ 4647 static int 4648 irdma_update_pe_sds(struct irdma_sc_dev *dev, 4649 struct irdma_update_sds_info *info, u64 scratch) 4650 { 4651 struct irdma_sc_cqp *cqp = dev->cqp; 4652 int ret_code; 4653 4654 ret_code = cqp_sds_wqe_fill(cqp, info, scratch); 4655 if (!ret_code) 4656 irdma_sc_cqp_post_sq(cqp); 4657 4658 return ret_code; 4659 } 4660 4661 /** 4662 * irdma_update_sds_noccq - update sd before ccq created 4663 * @dev: sc device struct 4664 * @info: sd info for sd's 4665 */ 4666 int 4667 irdma_update_sds_noccq(struct irdma_sc_dev *dev, 4668 struct irdma_update_sds_info *info) 4669 { 4670 u32 error, val, tail; 4671 struct irdma_sc_cqp *cqp = dev->cqp; 4672 int ret_code; 4673 4674 ret_code = cqp_sds_wqe_fill(cqp, info, 0); 4675 if (ret_code) 4676 return ret_code; 4677 4678 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 4679 4680 irdma_sc_cqp_post_sq(cqp); 4681 return irdma_cqp_poll_registers(cqp, tail, 4682 cqp->dev->hw_attrs.max_done_count); 4683 } 4684 4685 /** 4686 * irdma_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages 4687 * @cqp: struct for cqp hw 4688 * @scratch: u64 saved to be used during cqp completion 4689 * @hmc_fn_id: hmc function id 4690 * @post_sq: flag for cqp db to ring 4691 * @poll_registers: flag to poll register for cqp completion 4692 */ 4693 int 4694 irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch, 4695 u8 hmc_fn_id, bool post_sq, 4696 bool poll_registers) 4697 { 4698 u64 hdr; 4699 __le64 *wqe; 4700 u32 tail, val, error; 4701 4702 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4703 if (!wqe) 4704 return -ENOSPC; 4705 4706 set_64bit_val(wqe, IRDMA_BYTE_16, 4707 LS_64(hmc_fn_id, IRDMA_SHMC_PAGE_ALLOCATED_HMC_FN_ID)); 4708 4709 hdr = LS_64(IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED, IRDMA_CQPSQ_OPCODE) | 4710 LS_64(cqp->polarity, IRDMA_CQPSQ_WQEVALID); 4711 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4712 4713 set_64bit_val(wqe, IRDMA_BYTE_24, hdr); 4714 4715 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE", 4716 wqe, IRDMA_CQP_WQE_SIZE * 8); 4717 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 4718 4719 if (post_sq) { 4720 irdma_sc_cqp_post_sq(cqp); 4721 if (poll_registers) 4722 /* check for cqp sq tail update */ 4723 return irdma_cqp_poll_registers(cqp, tail, 4724 cqp->dev->hw_attrs.max_done_count); 4725 else 4726 return irdma_sc_poll_for_cqp_op_done(cqp, 4727 IRDMA_CQP_OP_SHMC_PAGES_ALLOCATED, 4728 NULL); 4729 } 4730 4731 return 0; 4732 } 4733 4734 /** 4735 * irdma_cqp_ring_full - check if cqp ring is full 4736 * @cqp: struct for cqp hw 4737 */ 4738 static bool 4739 irdma_cqp_ring_full(struct irdma_sc_cqp *cqp) 4740 { 4741 return IRDMA_RING_FULL_ERR(cqp->sq_ring); 4742 } 4743 4744 /** 4745 * irdma_est_sd - returns approximate number of SDs for HMC 4746 * @dev: sc device struct 4747 * @hmc_info: hmc structure, size and count for HMC objects 4748 */ 4749 static u32 irdma_est_sd(struct irdma_sc_dev *dev, 4750 struct irdma_hmc_info *hmc_info){ 4751 int i; 4752 u64 size = 0; 4753 u64 sd; 4754 4755 for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) 4756 if (i != IRDMA_HMC_IW_PBLE) 4757 size += round_up(hmc_info->hmc_obj[i].cnt * 4758 hmc_info->hmc_obj[i].size, 512); 4759 size += round_up(hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt * 4760 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].size, 512); 4761 if (size & 0x1FFFFF) 4762 sd = (size >> 21) + 1; /* add 1 for remainder */ 4763 else 4764 sd = size >> 21; 4765 if (sd > 0xFFFFFFFF) { 4766 irdma_debug(dev, IRDMA_DEBUG_HMC, "sd overflow[%ld]\n", sd); 4767 sd = 0xFFFFFFFF - 1; 4768 } 4769 4770 return (u32)sd; 4771 } 4772 4773 /** 4774 * irdma_sc_query_rdma_features - query RDMA features and FW ver 4775 * @cqp: struct for cqp hw 4776 * @buf: buffer to hold query info 4777 * @scratch: u64 saved to be used during cqp completion 4778 */ 4779 static int 4780 irdma_sc_query_rdma_features(struct irdma_sc_cqp *cqp, 4781 struct irdma_dma_mem *buf, u64 scratch) 4782 { 4783 __le64 *wqe; 4784 u64 temp; 4785 u32 tail, val, error; 4786 int status; 4787 4788 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); 4789 if (!wqe) 4790 return -ENOSPC; 4791 4792 temp = buf->pa; 4793 set_64bit_val(wqe, IRDMA_BYTE_32, temp); 4794 4795 temp = LS_64(cqp->polarity, IRDMA_CQPSQ_QUERY_RDMA_FEATURES_WQEVALID) | 4796 LS_64(buf->size, IRDMA_CQPSQ_QUERY_RDMA_FEATURES_BUF_LEN) | 4797 LS_64(IRDMA_CQP_OP_QUERY_RDMA_FEATURES, IRDMA_CQPSQ_UP_OP); 4798 irdma_wmb(); /* make sure WQE is written before valid bit is set */ 4799 4800 set_64bit_val(wqe, IRDMA_BYTE_24, temp); 4801 4802 irdma_debug_buf(cqp->dev, IRDMA_DEBUG_WQE, "QUERY RDMA FEATURES", wqe, 4803 IRDMA_CQP_WQE_SIZE * 8); 4804 irdma_get_cqp_reg_info(cqp, &val, &tail, &error); 4805 4806 irdma_sc_cqp_post_sq(cqp); 4807 status = irdma_cqp_poll_registers(cqp, tail, 4808 cqp->dev->hw_attrs.max_done_count); 4809 if (error || status) 4810 status = -EIO; 4811 4812 return status; 4813 } 4814 4815 /** 4816 * irdma_get_rdma_features - get RDMA features 4817 * @dev: sc device struct 4818 */ 4819 int 4820 irdma_get_rdma_features(struct irdma_sc_dev *dev) 4821 { 4822 int ret_code; 4823 struct irdma_dma_mem feat_buf; 4824 u64 temp; 4825 u16 byte_idx, feat_type, feat_cnt, feat_idx; 4826 4827 feat_buf.size = IRDMA_FEATURE_BUF_SIZE; 4828 feat_buf.va = irdma_allocate_dma_mem(dev->hw, &feat_buf, feat_buf.size, 4829 IRDMA_FEATURE_BUF_ALIGNMENT); 4830 if (!feat_buf.va) 4831 return -ENOMEM; 4832 4833 ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0); 4834 if (ret_code) 4835 goto exit; 4836 4837 get_64bit_val(feat_buf.va, IRDMA_BYTE_0, &temp); 4838 feat_cnt = (u16)RS_64(temp, IRDMA_FEATURE_CNT); 4839 if (feat_cnt < IRDMA_MIN_FEATURES) { 4840 ret_code = -EINVAL; 4841 goto exit; 4842 } else if (feat_cnt > IRDMA_MAX_FEATURES) { 4843 irdma_debug(dev, IRDMA_DEBUG_DEV, 4844 "feature buf size insufficient," 4845 "retrying with larger buffer\n"); 4846 irdma_free_dma_mem(dev->hw, &feat_buf); 4847 feat_buf.size = 8 * feat_cnt; 4848 feat_buf.va = irdma_allocate_dma_mem(dev->hw, &feat_buf, 4849 feat_buf.size, 4850 IRDMA_FEATURE_BUF_ALIGNMENT); 4851 if (!feat_buf.va) 4852 return -ENOMEM; 4853 4854 ret_code = irdma_sc_query_rdma_features(dev->cqp, &feat_buf, 0); 4855 if (ret_code) 4856 goto exit; 4857 4858 get_64bit_val(feat_buf.va, IRDMA_BYTE_0, &temp); 4859 feat_cnt = (u16)RS_64(temp, IRDMA_FEATURE_CNT); 4860 if (feat_cnt < IRDMA_MIN_FEATURES) { 4861 ret_code = -EINVAL; 4862 goto exit; 4863 } 4864 } 4865 4866 irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QUERY RDMA FEATURES", feat_buf.va, 4867 feat_cnt * 8); 4868 4869 for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES); 4870 feat_idx++, byte_idx += 8) { 4871 get_64bit_val(feat_buf.va, byte_idx, &temp); 4872 feat_type = RS_64(temp, IRDMA_FEATURE_TYPE); 4873 dev->feature_info[feat_type] = temp; 4874 } 4875 exit: 4876 irdma_free_dma_mem(dev->hw, &feat_buf); 4877 return ret_code; 4878 } 4879 4880 static u32 irdma_q1_cnt(struct irdma_sc_dev *dev, 4881 struct irdma_hmc_info *hmc_info, u32 qpwanted){ 4882 u32 q1_cnt; 4883 4884 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { 4885 q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted); 4886 } else { 4887 if (dev->cqp->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY) 4888 q1_cnt = roundup_pow_of_two(dev->hw_attrs.max_hw_ird * 2 * qpwanted + 512); 4889 else 4890 q1_cnt = dev->hw_attrs.max_hw_ird * 2 * qpwanted; 4891 } 4892 4893 return q1_cnt; 4894 } 4895 4896 static void 4897 cfg_fpm_value_gen_1(struct irdma_sc_dev *dev, 4898 struct irdma_hmc_info *hmc_info, u32 qpwanted) 4899 { 4900 hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = roundup_pow_of_two(qpwanted * dev->hw_attrs.max_hw_wqes); 4901 } 4902 4903 static void 4904 cfg_fpm_value_gen_2(struct irdma_sc_dev *dev, 4905 struct irdma_hmc_info *hmc_info, u32 qpwanted) 4906 { 4907 struct irdma_hmc_fpm_misc *hmc_fpm_misc = &dev->hmc_fpm_misc; 4908 4909 hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt = 4910 4 * hmc_fpm_misc->xf_block_size * qpwanted; 4911 4912 hmc_info->hmc_obj[IRDMA_HMC_IW_HDR].cnt = qpwanted; 4913 4914 if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].max_cnt) 4915 hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt = 32 * qpwanted; 4916 if (hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].max_cnt) 4917 hmc_info->hmc_obj[IRDMA_HMC_IW_RRFFL].cnt = 4918 hmc_info->hmc_obj[IRDMA_HMC_IW_RRF].cnt / 4919 hmc_fpm_misc->rrf_block_size; 4920 if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].max_cnt) 4921 hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt = 32 * qpwanted; 4922 if (hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].max_cnt) 4923 hmc_info->hmc_obj[IRDMA_HMC_IW_OOISCFFL].cnt = 4924 hmc_info->hmc_obj[IRDMA_HMC_IW_OOISC].cnt / 4925 hmc_fpm_misc->ooiscf_block_size; 4926 } 4927 4928 /** 4929 * irdma_cfg_fpm_val - configure HMC objects 4930 * @dev: sc device struct 4931 * @qp_count: desired qp count 4932 */ 4933 int 4934 irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count) 4935 { 4936 struct irdma_virt_mem virt_mem; 4937 u32 i, mem_size; 4938 u32 qpwanted, mrwanted, pblewanted; 4939 u32 powerof2, hte; 4940 u32 sd_needed; 4941 u32 sd_diff; 4942 u32 loop_count = 0; 4943 struct irdma_hmc_info *hmc_info; 4944 struct irdma_hmc_fpm_misc *hmc_fpm_misc; 4945 int ret_code = 0; 4946 u32 max_sds; 4947 4948 hmc_info = dev->hmc_info; 4949 hmc_fpm_misc = &dev->hmc_fpm_misc; 4950 ret_code = irdma_sc_init_iw_hmc(dev, dev->hmc_fn_id); 4951 if (ret_code) { 4952 irdma_debug(dev, IRDMA_DEBUG_HMC, 4953 "irdma_sc_init_iw_hmc returned error_code = %d\n", 4954 ret_code); 4955 return ret_code; 4956 } 4957 4958 max_sds = hmc_fpm_misc->max_sds; 4959 4960 for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) 4961 hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt; 4962 4963 sd_needed = irdma_est_sd(dev, hmc_info); 4964 irdma_debug(dev, IRDMA_DEBUG_HMC, "sd count %d where max sd is %d\n", 4965 hmc_info->sd_table.sd_cnt, max_sds); 4966 4967 qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt); 4968 4969 powerof2 = 1; 4970 while (powerof2 <= qpwanted) 4971 powerof2 *= 2; 4972 powerof2 /= 2; 4973 qpwanted = powerof2; 4974 4975 mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt; 4976 pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt; 4977 4978 irdma_debug(dev, IRDMA_DEBUG_HMC, 4979 "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d, mc=%d, av=%d\n", 4980 qp_count, max_sds, 4981 hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt, 4982 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt, 4983 hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt, 4984 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt, 4985 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt, 4986 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt); 4987 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt = 4988 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].max_cnt; 4989 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt = 4990 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].max_cnt; 4991 hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt = 4992 hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].max_cnt; 4993 4994 hmc_info->hmc_obj[IRDMA_HMC_IW_APBVT_ENTRY].cnt = 1; 4995 4996 while (irdma_q1_cnt(dev, hmc_info, qpwanted) > hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].max_cnt) 4997 qpwanted /= 2; 4998 4999 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) { 5000 cfg_fpm_value_gen_1(dev, hmc_info, qpwanted); 5001 while (hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt > hmc_info->hmc_obj[IRDMA_HMC_IW_XF].max_cnt) { 5002 qpwanted /= 2; 5003 cfg_fpm_value_gen_1(dev, hmc_info, qpwanted); 5004 } 5005 } 5006 5007 do { 5008 ++loop_count; 5009 hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt = qpwanted; 5010 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt = 5011 min(2 * qpwanted, hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt); 5012 hmc_info->hmc_obj[IRDMA_HMC_IW_RESERVED].cnt = 0; /* Reserved */ 5013 hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted; 5014 5015 hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512); 5016 powerof2 = 1; 5017 while (powerof2 < hte) 5018 powerof2 *= 2; 5019 hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt = 5020 powerof2 * hmc_fpm_misc->ht_multiplier; 5021 if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 5022 cfg_fpm_value_gen_1(dev, hmc_info, qpwanted); 5023 else 5024 cfg_fpm_value_gen_2(dev, hmc_info, qpwanted); 5025 5026 hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt = irdma_q1_cnt(dev, hmc_info, qpwanted); 5027 hmc_info->hmc_obj[IRDMA_HMC_IW_XFFL].cnt = 5028 hmc_info->hmc_obj[IRDMA_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size; 5029 hmc_info->hmc_obj[IRDMA_HMC_IW_Q1FL].cnt = 5030 hmc_info->hmc_obj[IRDMA_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size; 5031 hmc_info->hmc_obj[IRDMA_HMC_IW_TIMER].cnt = 5032 (round_up(qpwanted, 512) / 512 + 1) * hmc_fpm_misc->timer_bucket; 5033 5034 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted; 5035 sd_needed = irdma_est_sd(dev, hmc_info); 5036 irdma_debug(dev, IRDMA_DEBUG_HMC, 5037 "sd_needed = %d, max_sds=%d, mrwanted=%d, pblewanted=%d qpwanted=%d\n", 5038 sd_needed, max_sds, mrwanted, pblewanted, qpwanted); 5039 5040 /* Do not reduce resources further. All objects fit with max SDs */ 5041 if (sd_needed <= max_sds) 5042 break; 5043 5044 sd_diff = sd_needed - max_sds; 5045 if (sd_diff > 128) { 5046 if (!(loop_count % 2) && qpwanted > 128) { 5047 qpwanted /= 2; 5048 } else { 5049 mrwanted /= 2; 5050 pblewanted /= 2; 5051 } 5052 continue; 5053 } 5054 if (dev->cqp->hmc_profile != IRDMA_HMC_PROFILE_FAVOR_VF && 5055 pblewanted > (512 * FPM_MULTIPLIER * sd_diff)) { 5056 pblewanted -= 256 * FPM_MULTIPLIER * sd_diff; 5057 continue; 5058 } else if (pblewanted > (100 * FPM_MULTIPLIER)) { 5059 pblewanted -= 10 * FPM_MULTIPLIER; 5060 } else if (pblewanted > FPM_MULTIPLIER) { 5061 pblewanted -= FPM_MULTIPLIER; 5062 } else if (qpwanted <= 128) { 5063 if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt > 256) 5064 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt /= 2; 5065 if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256) 5066 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2; 5067 } 5068 if (mrwanted > FPM_MULTIPLIER) 5069 mrwanted -= FPM_MULTIPLIER; 5070 if (!(loop_count % 10) && qpwanted > 128) { 5071 qpwanted /= 2; 5072 if (hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt > 256) 5073 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt /= 2; 5074 } 5075 } while (loop_count < 2000); 5076 5077 if (sd_needed > max_sds) { 5078 irdma_debug(dev, IRDMA_DEBUG_HMC, 5079 "cfg_fpm failed loop_cnt=%d, sd_needed=%d, max sd count %d\n", 5080 loop_count, sd_needed, hmc_info->sd_table.sd_cnt); 5081 return -EINVAL; 5082 } 5083 5084 if (loop_count > 1 && sd_needed < max_sds) { 5085 pblewanted += (max_sds - sd_needed) * 256 * FPM_MULTIPLIER; 5086 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt = pblewanted; 5087 sd_needed = irdma_est_sd(dev, hmc_info); 5088 } 5089 5090 irdma_debug(dev, IRDMA_DEBUG_HMC, 5091 "loop_cnt=%d, sd_needed=%d, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d, mc=%d, ah=%d, max sd count %d, first sd index %d\n", 5092 loop_count, sd_needed, 5093 hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt, 5094 hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, 5095 hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt, 5096 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt, 5097 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 5098 hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt, 5099 hmc_info->sd_table.sd_cnt, hmc_info->first_sd_index); 5100 5101 ret_code = irdma_sc_cfg_iw_fpm(dev, dev->hmc_fn_id); 5102 if (ret_code) { 5103 irdma_debug(dev, IRDMA_DEBUG_HMC, 5104 "cfg_iw_fpm returned error_code[x%08X]\n", 5105 readl(dev->hw_regs[IRDMA_CQPERRCODES])); 5106 return ret_code; 5107 } 5108 5109 mem_size = sizeof(struct irdma_hmc_sd_entry) * 5110 (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1); 5111 virt_mem.size = mem_size; 5112 virt_mem.va = kzalloc(virt_mem.size, GFP_ATOMIC); 5113 if (!virt_mem.va) { 5114 irdma_debug(dev, IRDMA_DEBUG_HMC, 5115 "failed to allocate memory for sd_entry buffer\n"); 5116 return -ENOMEM; 5117 } 5118 hmc_info->sd_table.sd_entry = virt_mem.va; 5119 5120 return ret_code; 5121 } 5122 5123 /** 5124 * irdma_exec_cqp_cmd - execute cqp cmd when wqe are available 5125 * @dev: rdma device 5126 * @pcmdinfo: cqp command info 5127 */ 5128 static int 5129 irdma_exec_cqp_cmd(struct irdma_sc_dev *dev, 5130 struct cqp_cmds_info *pcmdinfo) 5131 { 5132 int status; 5133 struct irdma_dma_mem val_mem; 5134 bool alloc = false; 5135 5136 dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++; 5137 switch (pcmdinfo->cqp_cmd) { 5138 case IRDMA_OP_CEQ_DESTROY: 5139 status = irdma_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq, 5140 pcmdinfo->in.u.ceq_destroy.scratch, 5141 pcmdinfo->post_sq); 5142 break; 5143 case IRDMA_OP_AEQ_DESTROY: 5144 status = irdma_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq, 5145 pcmdinfo->in.u.aeq_destroy.scratch, 5146 pcmdinfo->post_sq); 5147 5148 break; 5149 case IRDMA_OP_CEQ_CREATE: 5150 status = irdma_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq, 5151 pcmdinfo->in.u.ceq_create.scratch, 5152 pcmdinfo->post_sq); 5153 break; 5154 case IRDMA_OP_AEQ_CREATE: 5155 status = irdma_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq, 5156 pcmdinfo->in.u.aeq_create.scratch, 5157 pcmdinfo->post_sq); 5158 break; 5159 case IRDMA_OP_QP_UPLOAD_CONTEXT: 5160 status = irdma_sc_qp_upload_context(pcmdinfo->in.u.qp_upload_context.dev, 5161 &pcmdinfo->in.u.qp_upload_context.info, 5162 pcmdinfo->in.u.qp_upload_context.scratch, 5163 pcmdinfo->post_sq); 5164 break; 5165 case IRDMA_OP_CQ_CREATE: 5166 status = irdma_sc_cq_create(pcmdinfo->in.u.cq_create.cq, 5167 pcmdinfo->in.u.cq_create.scratch, 5168 pcmdinfo->in.u.cq_create.check_overflow, 5169 pcmdinfo->post_sq); 5170 break; 5171 case IRDMA_OP_CQ_MODIFY: 5172 status = irdma_sc_cq_modify(pcmdinfo->in.u.cq_modify.cq, 5173 &pcmdinfo->in.u.cq_modify.info, 5174 pcmdinfo->in.u.cq_modify.scratch, 5175 pcmdinfo->post_sq); 5176 break; 5177 case IRDMA_OP_CQ_DESTROY: 5178 status = irdma_sc_cq_destroy(pcmdinfo->in.u.cq_destroy.cq, 5179 pcmdinfo->in.u.cq_destroy.scratch, 5180 pcmdinfo->post_sq); 5181 break; 5182 case IRDMA_OP_QP_FLUSH_WQES: 5183 status = irdma_sc_qp_flush_wqes(pcmdinfo->in.u.qp_flush_wqes.qp, 5184 &pcmdinfo->in.u.qp_flush_wqes.info, 5185 pcmdinfo->in.u.qp_flush_wqes.scratch, 5186 pcmdinfo->post_sq); 5187 break; 5188 case IRDMA_OP_GEN_AE: 5189 status = irdma_sc_gen_ae(pcmdinfo->in.u.gen_ae.qp, 5190 &pcmdinfo->in.u.gen_ae.info, 5191 pcmdinfo->in.u.gen_ae.scratch, 5192 pcmdinfo->post_sq); 5193 break; 5194 case IRDMA_OP_MANAGE_PUSH_PAGE: 5195 status = irdma_sc_manage_push_page(pcmdinfo->in.u.manage_push_page.cqp, 5196 &pcmdinfo->in.u.manage_push_page.info, 5197 pcmdinfo->in.u.manage_push_page.scratch, 5198 pcmdinfo->post_sq); 5199 break; 5200 case IRDMA_OP_UPDATE_PE_SDS: 5201 status = irdma_update_pe_sds(pcmdinfo->in.u.update_pe_sds.dev, 5202 &pcmdinfo->in.u.update_pe_sds.info, 5203 pcmdinfo->in.u.update_pe_sds.scratch); 5204 break; 5205 case IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE: 5206 /* switch to calling through the call table */ 5207 status = 5208 irdma_sc_manage_hmc_pm_func_table(pcmdinfo->in.u.manage_hmc_pm.dev->cqp, 5209 &pcmdinfo->in.u.manage_hmc_pm.info, 5210 pcmdinfo->in.u.manage_hmc_pm.scratch, 5211 true); 5212 break; 5213 case IRDMA_OP_SUSPEND: 5214 status = irdma_sc_suspend_qp(pcmdinfo->in.u.suspend_resume.cqp, 5215 pcmdinfo->in.u.suspend_resume.qp, 5216 pcmdinfo->in.u.suspend_resume.scratch); 5217 break; 5218 case IRDMA_OP_RESUME: 5219 status = irdma_sc_resume_qp(pcmdinfo->in.u.suspend_resume.cqp, 5220 pcmdinfo->in.u.suspend_resume.qp, 5221 pcmdinfo->in.u.suspend_resume.scratch); 5222 break; 5223 case IRDMA_OP_QUERY_FPM_VAL: 5224 val_mem.pa = pcmdinfo->in.u.query_fpm_val.fpm_val_pa; 5225 val_mem.va = pcmdinfo->in.u.query_fpm_val.fpm_val_va; 5226 status = irdma_sc_query_fpm_val(pcmdinfo->in.u.query_fpm_val.cqp, 5227 pcmdinfo->in.u.query_fpm_val.scratch, 5228 pcmdinfo->in.u.query_fpm_val.hmc_fn_id, 5229 &val_mem, true, IRDMA_CQP_WAIT_EVENT); 5230 break; 5231 case IRDMA_OP_COMMIT_FPM_VAL: 5232 val_mem.pa = pcmdinfo->in.u.commit_fpm_val.fpm_val_pa; 5233 val_mem.va = pcmdinfo->in.u.commit_fpm_val.fpm_val_va; 5234 status = irdma_sc_commit_fpm_val(pcmdinfo->in.u.commit_fpm_val.cqp, 5235 pcmdinfo->in.u.commit_fpm_val.scratch, 5236 pcmdinfo->in.u.commit_fpm_val.hmc_fn_id, 5237 &val_mem, 5238 true, 5239 IRDMA_CQP_WAIT_EVENT); 5240 break; 5241 case IRDMA_OP_STATS_ALLOCATE: 5242 alloc = true; 5243 /* fallthrough */ 5244 case IRDMA_OP_STATS_FREE: 5245 status = irdma_sc_manage_stats_inst(pcmdinfo->in.u.stats_manage.cqp, 5246 &pcmdinfo->in.u.stats_manage.info, 5247 alloc, 5248 pcmdinfo->in.u.stats_manage.scratch); 5249 break; 5250 case IRDMA_OP_STATS_GATHER: 5251 status = irdma_sc_gather_stats(pcmdinfo->in.u.stats_gather.cqp, 5252 &pcmdinfo->in.u.stats_gather.info, 5253 pcmdinfo->in.u.stats_gather.scratch); 5254 break; 5255 case IRDMA_OP_WS_MODIFY_NODE: 5256 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, 5257 &pcmdinfo->in.u.ws_node.info, 5258 IRDMA_MODIFY_NODE, 5259 pcmdinfo->in.u.ws_node.scratch); 5260 break; 5261 case IRDMA_OP_WS_DELETE_NODE: 5262 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, 5263 &pcmdinfo->in.u.ws_node.info, 5264 IRDMA_DEL_NODE, 5265 pcmdinfo->in.u.ws_node.scratch); 5266 break; 5267 case IRDMA_OP_WS_ADD_NODE: 5268 status = irdma_sc_manage_ws_node(pcmdinfo->in.u.ws_node.cqp, 5269 &pcmdinfo->in.u.ws_node.info, 5270 IRDMA_ADD_NODE, 5271 pcmdinfo->in.u.ws_node.scratch); 5272 break; 5273 case IRDMA_OP_SET_UP_MAP: 5274 status = irdma_sc_set_up_map(pcmdinfo->in.u.up_map.cqp, 5275 &pcmdinfo->in.u.up_map.info, 5276 pcmdinfo->in.u.up_map.scratch); 5277 break; 5278 case IRDMA_OP_QUERY_RDMA_FEATURES: 5279 status = irdma_sc_query_rdma_features(pcmdinfo->in.u.query_rdma.cqp, 5280 &pcmdinfo->in.u.query_rdma.query_buff_mem, 5281 pcmdinfo->in.u.query_rdma.scratch); 5282 break; 5283 case IRDMA_OP_DELETE_ARP_CACHE_ENTRY: 5284 status = irdma_sc_del_arp_cache_entry(pcmdinfo->in.u.del_arp_cache_entry.cqp, 5285 pcmdinfo->in.u.del_arp_cache_entry.scratch, 5286 pcmdinfo->in.u.del_arp_cache_entry.arp_index, 5287 pcmdinfo->post_sq); 5288 break; 5289 case IRDMA_OP_MANAGE_APBVT_ENTRY: 5290 status = irdma_sc_manage_apbvt_entry(pcmdinfo->in.u.manage_apbvt_entry.cqp, 5291 &pcmdinfo->in.u.manage_apbvt_entry.info, 5292 pcmdinfo->in.u.manage_apbvt_entry.scratch, 5293 pcmdinfo->post_sq); 5294 break; 5295 case IRDMA_OP_MANAGE_QHASH_TABLE_ENTRY: 5296 status = irdma_sc_manage_qhash_table_entry(pcmdinfo->in.u.manage_qhash_table_entry.cqp, 5297 &pcmdinfo->in.u.manage_qhash_table_entry.info, 5298 pcmdinfo->in.u.manage_qhash_table_entry.scratch, 5299 pcmdinfo->post_sq); 5300 break; 5301 case IRDMA_OP_QP_MODIFY: 5302 status = irdma_sc_qp_modify(pcmdinfo->in.u.qp_modify.qp, 5303 &pcmdinfo->in.u.qp_modify.info, 5304 pcmdinfo->in.u.qp_modify.scratch, 5305 pcmdinfo->post_sq); 5306 break; 5307 case IRDMA_OP_QP_CREATE: 5308 status = irdma_sc_qp_create(pcmdinfo->in.u.qp_create.qp, 5309 &pcmdinfo->in.u.qp_create.info, 5310 pcmdinfo->in.u.qp_create.scratch, 5311 pcmdinfo->post_sq); 5312 break; 5313 case IRDMA_OP_QP_DESTROY: 5314 status = irdma_sc_qp_destroy(pcmdinfo->in.u.qp_destroy.qp, 5315 pcmdinfo->in.u.qp_destroy.scratch, 5316 pcmdinfo->in.u.qp_destroy.remove_hash_idx, 5317 pcmdinfo->in.u.qp_destroy.ignore_mw_bnd, 5318 pcmdinfo->post_sq); 5319 break; 5320 case IRDMA_OP_ALLOC_STAG: 5321 status = irdma_sc_alloc_stag(pcmdinfo->in.u.alloc_stag.dev, 5322 &pcmdinfo->in.u.alloc_stag.info, 5323 pcmdinfo->in.u.alloc_stag.scratch, 5324 pcmdinfo->post_sq); 5325 break; 5326 case IRDMA_OP_MR_REG_NON_SHARED: 5327 status = irdma_sc_mr_reg_non_shared(pcmdinfo->in.u.mr_reg_non_shared.dev, 5328 &pcmdinfo->in.u.mr_reg_non_shared.info, 5329 pcmdinfo->in.u.mr_reg_non_shared.scratch, 5330 pcmdinfo->post_sq); 5331 break; 5332 case IRDMA_OP_DEALLOC_STAG: 5333 status = irdma_sc_dealloc_stag(pcmdinfo->in.u.dealloc_stag.dev, 5334 &pcmdinfo->in.u.dealloc_stag.info, 5335 pcmdinfo->in.u.dealloc_stag.scratch, 5336 pcmdinfo->post_sq); 5337 break; 5338 case IRDMA_OP_MW_ALLOC: 5339 status = irdma_sc_mw_alloc(pcmdinfo->in.u.mw_alloc.dev, 5340 &pcmdinfo->in.u.mw_alloc.info, 5341 pcmdinfo->in.u.mw_alloc.scratch, 5342 pcmdinfo->post_sq); 5343 break; 5344 case IRDMA_OP_ADD_ARP_CACHE_ENTRY: 5345 status = irdma_sc_add_arp_cache_entry(pcmdinfo->in.u.add_arp_cache_entry.cqp, 5346 &pcmdinfo->in.u.add_arp_cache_entry.info, 5347 pcmdinfo->in.u.add_arp_cache_entry.scratch, 5348 pcmdinfo->post_sq); 5349 break; 5350 case IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY: 5351 status = irdma_sc_alloc_local_mac_entry(pcmdinfo->in.u.alloc_local_mac_entry.cqp, 5352 pcmdinfo->in.u.alloc_local_mac_entry.scratch, 5353 pcmdinfo->post_sq); 5354 break; 5355 case IRDMA_OP_ADD_LOCAL_MAC_ENTRY: 5356 status = irdma_sc_add_local_mac_entry(pcmdinfo->in.u.add_local_mac_entry.cqp, 5357 &pcmdinfo->in.u.add_local_mac_entry.info, 5358 pcmdinfo->in.u.add_local_mac_entry.scratch, 5359 pcmdinfo->post_sq); 5360 break; 5361 case IRDMA_OP_DELETE_LOCAL_MAC_ENTRY: 5362 status = irdma_sc_del_local_mac_entry(pcmdinfo->in.u.del_local_mac_entry.cqp, 5363 pcmdinfo->in.u.del_local_mac_entry.scratch, 5364 pcmdinfo->in.u.del_local_mac_entry.entry_idx, 5365 pcmdinfo->in.u.del_local_mac_entry.ignore_ref_count, 5366 pcmdinfo->post_sq); 5367 break; 5368 case IRDMA_OP_AH_CREATE: 5369 status = irdma_sc_create_ah(pcmdinfo->in.u.ah_create.cqp, 5370 &pcmdinfo->in.u.ah_create.info, 5371 pcmdinfo->in.u.ah_create.scratch); 5372 break; 5373 case IRDMA_OP_AH_DESTROY: 5374 status = irdma_sc_destroy_ah(pcmdinfo->in.u.ah_destroy.cqp, 5375 &pcmdinfo->in.u.ah_destroy.info, 5376 pcmdinfo->in.u.ah_destroy.scratch); 5377 break; 5378 case IRDMA_OP_MC_CREATE: 5379 status = irdma_sc_create_mcast_grp(pcmdinfo->in.u.mc_create.cqp, 5380 &pcmdinfo->in.u.mc_create.info, 5381 pcmdinfo->in.u.mc_create.scratch); 5382 break; 5383 case IRDMA_OP_MC_DESTROY: 5384 status = irdma_sc_destroy_mcast_grp(pcmdinfo->in.u.mc_destroy.cqp, 5385 &pcmdinfo->in.u.mc_destroy.info, 5386 pcmdinfo->in.u.mc_destroy.scratch); 5387 break; 5388 case IRDMA_OP_MC_MODIFY: 5389 status = irdma_sc_modify_mcast_grp(pcmdinfo->in.u.mc_modify.cqp, 5390 &pcmdinfo->in.u.mc_modify.info, 5391 pcmdinfo->in.u.mc_modify.scratch); 5392 break; 5393 default: 5394 status = -EOPNOTSUPP; 5395 break; 5396 } 5397 5398 return status; 5399 } 5400 5401 /** 5402 * irdma_process_cqp_cmd - process all cqp commands 5403 * @dev: sc device struct 5404 * @pcmdinfo: cqp command info 5405 */ 5406 int 5407 irdma_process_cqp_cmd(struct irdma_sc_dev *dev, 5408 struct cqp_cmds_info *pcmdinfo) 5409 { 5410 int status = 0; 5411 unsigned long flags; 5412 5413 if (dev->no_cqp) 5414 return -EFAULT; 5415 5416 spin_lock_irqsave(&dev->cqp_lock, flags); 5417 if (list_empty(&dev->cqp_cmd_head) && !irdma_cqp_ring_full(dev->cqp)) 5418 status = irdma_exec_cqp_cmd(dev, pcmdinfo); 5419 else 5420 list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head); 5421 spin_unlock_irqrestore(&dev->cqp_lock, flags); 5422 return status; 5423 } 5424 5425 /** 5426 * irdma_process_bh - called from tasklet for cqp list 5427 * @dev: sc device struct 5428 */ 5429 int 5430 irdma_process_bh(struct irdma_sc_dev *dev) 5431 { 5432 int status = 0; 5433 struct cqp_cmds_info *pcmdinfo; 5434 unsigned long flags; 5435 5436 spin_lock_irqsave(&dev->cqp_lock, flags); 5437 while (!list_empty(&dev->cqp_cmd_head) && 5438 !irdma_cqp_ring_full(dev->cqp)) { 5439 pcmdinfo = (struct cqp_cmds_info *)irdma_remove_cqp_head(dev); 5440 status = irdma_exec_cqp_cmd(dev, pcmdinfo); 5441 if (status) 5442 break; 5443 } 5444 spin_unlock_irqrestore(&dev->cqp_lock, flags); 5445 return status; 5446 } 5447 5448 /** 5449 * irdma_cfg_aeq- Configure AEQ interrupt 5450 * @dev: pointer to the device structure 5451 * @idx: vector index 5452 * @enable: True to enable, False disables 5453 */ 5454 void 5455 irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable) 5456 { 5457 u32 reg_val; 5458 reg_val = enable ? IRDMA_PFINT_AEQCTL_CAUSE_ENA_M : 0; 5459 reg_val |= (idx << IRDMA_PFINT_AEQCTL_MSIX_INDX_S) | 5460 IRDMA_PFINT_AEQCTL_ITR_INDX_M; 5461 writel(reg_val, dev->hw_regs[IRDMA_PFINT_AEQCTL]); 5462 } 5463 5464 /** 5465 * sc_vsi_update_stats - Update statistics 5466 * @vsi: sc_vsi instance to update 5467 */ 5468 void 5469 sc_vsi_update_stats(struct irdma_sc_vsi *vsi) 5470 { 5471 struct irdma_gather_stats *gather_stats; 5472 struct irdma_gather_stats *last_gather_stats; 5473 5474 gather_stats = vsi->pestat->gather_info.gather_stats_va; 5475 last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va; 5476 irdma_update_stats(&vsi->pestat->hw_stats, gather_stats, 5477 last_gather_stats, vsi->dev->hw_stats_map, 5478 vsi->dev->hw_attrs.max_stat_idx); 5479 } 5480 5481 /** 5482 * irdma_wait_pe_ready - Check if firmware is ready 5483 * @dev: provides access to registers 5484 */ 5485 static int 5486 irdma_wait_pe_ready(struct irdma_sc_dev *dev) 5487 { 5488 u32 statuscpu0; 5489 u32 statuscpu1; 5490 u32 statuscpu2; 5491 u32 retrycount = 0; 5492 5493 do { 5494 statuscpu0 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS0]); 5495 statuscpu1 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS1]); 5496 statuscpu2 = readl(dev->hw_regs[IRDMA_GLPE_CPUSTATUS2]); 5497 if (statuscpu0 == 0x80 && statuscpu1 == 0x80 && 5498 statuscpu2 == 0x80) 5499 return 0; 5500 mdelay(1000); 5501 } while (retrycount++ < dev->hw_attrs.max_pe_ready_count); 5502 return -1; 5503 } 5504 5505 static inline void 5506 irdma_sc_init_hw(struct irdma_sc_dev *dev) 5507 { 5508 switch (dev->hw_attrs.uk_attrs.hw_rev) { 5509 case IRDMA_GEN_2: 5510 icrdma_init_hw(dev); 5511 break; 5512 } 5513 } 5514 5515 /** 5516 * irdma_sc_dev_init - Initialize control part of device 5517 * @ver: version 5518 * @dev: Device pointer 5519 * @info: Device init info 5520 */ 5521 int 5522 irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev, 5523 struct irdma_device_init_info *info) 5524 { 5525 u32 val; 5526 int ret_code = 0; 5527 u8 db_size; 5528 5529 INIT_LIST_HEAD(&dev->cqp_cmd_head); /* for CQP command backlog */ 5530 mutex_init(&dev->ws_mutex); 5531 dev->debug_mask = info->debug_mask; 5532 dev->hmc_fn_id = info->hmc_fn_id; 5533 dev->fpm_query_buf_pa = info->fpm_query_buf_pa; 5534 dev->fpm_query_buf = info->fpm_query_buf; 5535 dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa; 5536 dev->fpm_commit_buf = info->fpm_commit_buf; 5537 dev->hw = info->hw; 5538 dev->hw->hw_addr = info->bar0; 5539 /* Setup the hardware limits, hmc may limit further */ 5540 dev->hw_attrs.min_hw_qp_id = IRDMA_MIN_IW_QP_ID; 5541 dev->hw_attrs.min_hw_aeq_size = IRDMA_MIN_AEQ_ENTRIES; 5542 dev->hw_attrs.max_hw_aeq_size = IRDMA_MAX_AEQ_ENTRIES; 5543 dev->hw_attrs.min_hw_ceq_size = IRDMA_MIN_CEQ_ENTRIES; 5544 dev->hw_attrs.max_hw_ceq_size = IRDMA_MAX_CEQ_ENTRIES; 5545 dev->hw_attrs.uk_attrs.min_hw_cq_size = IRDMA_MIN_CQ_SIZE; 5546 dev->hw_attrs.uk_attrs.max_hw_cq_size = IRDMA_MAX_CQ_SIZE; 5547 dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE; 5548 dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE; 5549 dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE; 5550 dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT; 5551 dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE; 5552 dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES; 5553 dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR); 5554 5555 dev->hw_attrs.uk_attrs.max_hw_rq_quanta = IRDMA_QP_SW_MAX_RQ_QUANTA; 5556 dev->hw_attrs.uk_attrs.max_hw_wq_quanta = IRDMA_QP_SW_MAX_WQ_QUANTA; 5557 dev->hw_attrs.max_hw_pds = IRDMA_MAX_PDS; 5558 dev->hw_attrs.max_hw_ena_vf_count = IRDMA_MAX_PE_ENA_VF_COUNT; 5559 5560 dev->hw_attrs.max_pe_ready_count = 14; 5561 dev->hw_attrs.max_done_count = IRDMA_DONE_COUNT; 5562 dev->hw_attrs.max_sleep_count = IRDMA_SLEEP_COUNT; 5563 dev->hw_attrs.max_cqp_compl_wait_time_ms = CQP_COMPL_WAIT_TIME_MS; 5564 5565 dev->hw_attrs.uk_attrs.hw_rev = ver; 5566 irdma_sc_init_hw(dev); 5567 5568 if (irdma_wait_pe_ready(dev)) 5569 return -ETIMEDOUT; 5570 5571 val = readl(dev->hw_regs[IRDMA_GLPCI_LBARCTRL]); 5572 db_size = (u8)RS_32(val, IRDMA_GLPCI_LBARCTRL_PE_DB_SIZE); 5573 if (db_size != IRDMA_PE_DB_SIZE_4M && db_size != IRDMA_PE_DB_SIZE_8M) { 5574 irdma_debug(dev, IRDMA_DEBUG_DEV, 5575 "RDMA PE doorbell is not enabled in CSR val 0x%x db_size=%d\n", 5576 val, db_size); 5577 return -ENODEV; 5578 } 5579 dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET]; 5580 5581 return ret_code; 5582 } 5583 5584 /** 5585 * irdma_stat_val - Extract HW counter value from statistics buffer 5586 * @stats_val: pointer to statistics buffer 5587 * @byteoff: byte offset of counter value in the buffer (8B-aligned) 5588 * @bitoff: bit offset of counter value within 8B entry 5589 * @bitmask: maximum counter value (e.g. 0xffffff for 24-bit counter) 5590 */ 5591 static inline u64 irdma_stat_val(const u64 *stats_val, u16 byteoff, 5592 u8 bitoff, u64 bitmask){ 5593 u16 idx = byteoff / sizeof(*stats_val); 5594 5595 return (stats_val[idx] >> bitoff) & bitmask; 5596 } 5597 5598 /** 5599 * irdma_stat_delta - Calculate counter delta 5600 * @new_val: updated counter value 5601 * @old_val: last counter value 5602 * @max_val: maximum counter value (e.g. 0xffffff for 24-bit counter) 5603 */ 5604 static inline u64 irdma_stat_delta(u64 new_val, u64 old_val, u64 max_val) { 5605 if (new_val >= old_val) 5606 return new_val - old_val; 5607 else 5608 /* roll-over case */ 5609 return max_val - old_val + new_val + 1; 5610 } 5611 5612 /** 5613 * irdma_update_stats - Update statistics 5614 * @hw_stats: hw_stats instance to update 5615 * @gather_stats: updated stat counters 5616 * @last_gather_stats: last stat counters 5617 * @map: HW stat map (hw_stats => gather_stats) 5618 * @max_stat_idx: number of HW stats 5619 */ 5620 void 5621 irdma_update_stats(struct irdma_dev_hw_stats *hw_stats, 5622 struct irdma_gather_stats *gather_stats, 5623 struct irdma_gather_stats *last_gather_stats, 5624 const struct irdma_hw_stat_map *map, 5625 u16 max_stat_idx) 5626 { 5627 u64 *stats_val = hw_stats->stats_val; 5628 u16 i; 5629 5630 for (i = 0; i < max_stat_idx; i++) { 5631 u64 new_val = irdma_stat_val(gather_stats->val, 5632 map[i].byteoff, map[i].bitoff, 5633 map[i].bitmask); 5634 u64 last_val = irdma_stat_val(last_gather_stats->val, 5635 map[i].byteoff, map[i].bitoff, 5636 map[i].bitmask); 5637 5638 stats_val[i] += irdma_stat_delta(new_val, last_val, 5639 map[i].bitmask); 5640 } 5641 5642 irdma_memcpy(last_gather_stats, gather_stats, 5643 sizeof(*last_gather_stats)); 5644 } 5645