1 /* 2 * Copyright (c) 2018-2019 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File : ecore_ll2.c 30 */ 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "bcm_osal.h" 35 36 #include "ecore.h" 37 #include "ecore_status.h" 38 #include "ecore_ll2.h" 39 #include "reg_addr.h" 40 #include "ecore_int.h" 41 #include "ecore_cxt.h" 42 #include "ecore_sp_commands.h" 43 #include "ecore_hw.h" 44 #include "reg_addr.h" 45 #include "ecore_dev_api.h" 46 #include "ecore_iro.h" 47 #include "ecore_gtt_reg_addr.h" 48 #include "ecore_ooo.h" 49 #include "ecore_hw.h" 50 #include "ecore_mcp.h" 51 52 #define ECORE_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registred) 53 #define ECORE_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registred) 54 55 #ifdef _NTDDK_ 56 #pragma warning(push) 57 #pragma warning(disable : 28167) 58 #pragma warning(disable : 28123) 59 #pragma warning(disable : 28121) 60 #endif 61 62 static struct ecore_ll2_info * 63 __ecore_ll2_handle_sanity(struct ecore_hwfn *p_hwfn, 64 u8 connection_handle, 65 bool b_lock, bool b_only_active) 66 { 67 struct ecore_ll2_info *p_ll2_conn, *p_ret = OSAL_NULL; 68 69 if (connection_handle >= ECORE_MAX_NUM_OF_LL2_CONNECTIONS) 70 return OSAL_NULL; 71 72 if (!p_hwfn->p_ll2_info) 73 return OSAL_NULL; 74 75 /* TODO - is there really need for the locked vs. unlocked 76 * variant? I simply used what was already there. 77 */ 78 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle]; 79 80 if (b_only_active) { 81 if (b_lock) 82 OSAL_MUTEX_ACQUIRE(&p_ll2_conn->mutex); 83 if (p_ll2_conn->b_active) 84 p_ret = p_ll2_conn; 85 if (b_lock) 86 OSAL_MUTEX_RELEASE(&p_ll2_conn->mutex); 87 } else { 88 p_ret = p_ll2_conn; 89 } 90 91 return p_ret; 92 } 93 94 static struct ecore_ll2_info * 95 ecore_ll2_handle_sanity(struct ecore_hwfn *p_hwfn, 96 u8 connection_handle) 97 { 98 return __ecore_ll2_handle_sanity(p_hwfn, connection_handle, 99 false, true); 100 } 101 102 static struct ecore_ll2_info * 103 ecore_ll2_handle_sanity_lock(struct ecore_hwfn *p_hwfn, 104 u8 connection_handle) 105 { 106 return __ecore_ll2_handle_sanity(p_hwfn, connection_handle, 107 true, true); 108 } 109 110 static struct ecore_ll2_info * 111 ecore_ll2_handle_sanity_inactive(struct ecore_hwfn *p_hwfn, 112 u8 connection_handle) 113 { 114 return __ecore_ll2_handle_sanity(p_hwfn, connection_handle, 115 false, false); 116 } 117 118 #ifndef LINUX_REMOVE 119 /* TODO - is this really been used by anyone? Is it a on future todo list? */ 120 enum _ecore_status_t 121 ecore_ll2_get_fragment_of_tx_packet(struct ecore_hwfn *p_hwfn, 122 u8 connection_handle, 123 dma_addr_t *p_addr, 124 bool *b_last_fragment) 125 { 126 struct ecore_ll2_tx_packet *p_pkt; 127 struct ecore_ll2_info *p_ll2_conn; 128 u16 cur_frag_idx = 0; 129 130 p_ll2_conn = ecore_ll2_handle_sanity(p_hwfn, connection_handle); 131 if (p_ll2_conn == OSAL_NULL) 132 return ECORE_INVAL; 133 p_pkt = &p_ll2_conn->tx_queue.cur_completing_packet; 134 135 if (!p_ll2_conn->tx_queue.b_completing_packet || !p_addr) 136 return ECORE_INVAL; 137 138 if (p_ll2_conn->tx_queue.cur_completing_bd_idx == p_pkt->bd_used) 139 return ECORE_INVAL; 140 141 /* Packet is available and has at least one more frag - provide it */ 142 cur_frag_idx = p_ll2_conn->tx_queue.cur_completing_bd_idx++; 143 *p_addr = p_pkt->bds_set[cur_frag_idx].tx_frag; 144 if (b_last_fragment) 145 *b_last_fragment = p_pkt->bd_used == 146 p_ll2_conn->tx_queue.cur_completing_bd_idx; 147 148 return ECORE_SUCCESS; 149 } 150 #endif 151 152 static void ecore_ll2_txq_flush(struct ecore_hwfn *p_hwfn, 153 u8 connection_handle) 154 { 155 bool b_last_packet = false, b_last_frag = false; 156 struct ecore_ll2_tx_packet *p_pkt = OSAL_NULL; 157 struct ecore_ll2_info *p_ll2_conn; 158 struct ecore_ll2_tx_queue *p_tx; 159 unsigned long flags = 0; 160 dma_addr_t tx_frag; 161 162 p_ll2_conn = ecore_ll2_handle_sanity_inactive(p_hwfn, 163 connection_handle); 164 if (p_ll2_conn == OSAL_NULL) 165 return; 166 p_tx = &p_ll2_conn->tx_queue; 167 168 OSAL_SPIN_LOCK_IRQSAVE(&p_tx->lock, flags); 169 while (!OSAL_LIST_IS_EMPTY(&p_tx->active_descq)) { 170 p_pkt = OSAL_LIST_FIRST_ENTRY(&p_tx->active_descq, 171 struct ecore_ll2_tx_packet, 172 list_entry); 173 174 if (p_pkt == OSAL_NULL) 175 break; 176 177 #if defined(_NTDDK_) 178 #pragma warning(suppress : 6011 28182) 179 #endif 180 OSAL_LIST_REMOVE_ENTRY(&p_pkt->list_entry, 181 &p_tx->active_descq); 182 b_last_packet = OSAL_LIST_IS_EMPTY(&p_tx->active_descq); 183 OSAL_LIST_PUSH_TAIL(&p_pkt->list_entry, 184 &p_tx->free_descq); 185 OSAL_SPIN_UNLOCK_IRQSAVE(&p_tx->lock, flags); 186 if (p_ll2_conn->input.conn_type == ECORE_LL2_TYPE_OOO) { 187 struct ecore_ooo_buffer *p_buffer; 188 189 p_buffer = (struct ecore_ooo_buffer *)p_pkt->cookie; 190 ecore_ooo_put_free_buffer(p_hwfn->p_ooo_info, p_buffer); 191 } else { 192 p_tx->cur_completing_packet = *p_pkt; 193 p_tx->cur_completing_bd_idx = 1; 194 b_last_frag = p_tx->cur_completing_bd_idx == 195 p_pkt->bd_used; 196 197 tx_frag = p_pkt->bds_set[0].tx_frag; 198 p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie, 199 p_ll2_conn->my_id, 200 p_pkt->cookie, 201 tx_frag, 202 b_last_frag, 203 b_last_packet); 204 } 205 OSAL_SPIN_LOCK_IRQSAVE(&p_tx->lock, flags); 206 } 207 OSAL_SPIN_UNLOCK_IRQSAVE(&p_tx->lock, flags); 208 } 209 210 static enum _ecore_status_t 211 ecore_ll2_txq_completion(struct ecore_hwfn *p_hwfn, 212 void *p_cookie) 213 { 214 struct ecore_ll2_info *p_ll2_conn = (struct ecore_ll2_info*)p_cookie; 215 struct ecore_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; 216 u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0; 217 struct ecore_ll2_tx_packet *p_pkt; 218 bool b_last_frag = false; 219 unsigned long flags; 220 enum _ecore_status_t rc = ECORE_INVAL; 221 222 OSAL_SPIN_LOCK_IRQSAVE(&p_tx->lock, flags); 223 if (p_tx->b_completing_packet) { 224 /* TODO - this looks completely unnecessary to me - the only 225 * way we can re-enter is by the DPC calling us again, but this 226 * would only happen AFTER we return, and we unset this at end 227 * of the function. 228 */ 229 rc = ECORE_BUSY; 230 goto out; 231 } 232 233 new_idx = OSAL_LE16_TO_CPU(*p_tx->p_fw_cons); 234 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); 235 while (num_bds) { 236 if (OSAL_LIST_IS_EMPTY(&p_tx->active_descq)) 237 goto out; 238 239 p_pkt = OSAL_LIST_FIRST_ENTRY(&p_tx->active_descq, 240 struct ecore_ll2_tx_packet, 241 list_entry); 242 if (!p_pkt) 243 goto out; 244 245 p_tx->b_completing_packet = true; 246 p_tx->cur_completing_packet = *p_pkt; 247 num_bds_in_packet = p_pkt->bd_used; 248 #if defined(_NTDDK_) 249 #pragma warning(suppress : 6011 28182) 250 #endif 251 OSAL_LIST_REMOVE_ENTRY(&p_pkt->list_entry, 252 &p_tx->active_descq); 253 254 if (num_bds < num_bds_in_packet) { 255 DP_NOTICE(p_hwfn, true, 256 "Rest of BDs does not cover whole packet\n"); 257 goto out; 258 } 259 260 num_bds -= num_bds_in_packet; 261 p_tx->bds_idx += num_bds_in_packet; 262 while (num_bds_in_packet--) 263 ecore_chain_consume(&p_tx->txq_chain); 264 265 p_tx->cur_completing_bd_idx = 1; 266 b_last_frag = p_tx->cur_completing_bd_idx == 267 p_pkt->bd_used; 268 OSAL_LIST_PUSH_TAIL(&p_pkt->list_entry, 269 &p_tx->free_descq); 270 271 OSAL_SPIN_UNLOCK_IRQSAVE(&p_tx->lock, flags); 272 273 p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie, 274 p_ll2_conn->my_id, 275 p_pkt->cookie, 276 p_pkt->bds_set[0].tx_frag, 277 b_last_frag, 278 !num_bds); 279 280 OSAL_SPIN_LOCK_IRQSAVE(&p_tx->lock, flags); 281 } 282 283 p_tx->b_completing_packet = false; 284 rc = ECORE_SUCCESS; 285 out: 286 OSAL_SPIN_UNLOCK_IRQSAVE(&p_tx->lock, flags); 287 return rc; 288 } 289 290 static void ecore_ll2_rxq_parse_gsi(union core_rx_cqe_union *p_cqe, 291 struct ecore_ll2_comp_rx_data *data) 292 { 293 data->parse_flags = 294 OSAL_LE16_TO_CPU(p_cqe->rx_cqe_gsi.parse_flags.flags); 295 data->length.data_length = 296 OSAL_LE16_TO_CPU(p_cqe->rx_cqe_gsi.data_length); 297 data->vlan = 298 OSAL_LE16_TO_CPU(p_cqe->rx_cqe_gsi.vlan); 299 data->opaque_data_0 = 300 OSAL_LE32_TO_CPU(p_cqe->rx_cqe_gsi.src_mac_addrhi); 301 data->opaque_data_1 = 302 OSAL_LE16_TO_CPU(p_cqe->rx_cqe_gsi.src_mac_addrlo); 303 data->u.data_length_error = 304 p_cqe->rx_cqe_gsi.data_length_error; 305 data->qp_id = OSAL_LE16_TO_CPU(p_cqe->rx_cqe_gsi.qp_id); 306 307 data->src_qp = OSAL_LE32_TO_CPU(p_cqe->rx_cqe_gsi.src_qp); 308 } 309 310 static void ecore_ll2_rxq_parse_reg(union core_rx_cqe_union *p_cqe, 311 struct ecore_ll2_comp_rx_data *data) 312 { 313 data->parse_flags = 314 OSAL_LE16_TO_CPU(p_cqe->rx_cqe_fp.parse_flags.flags); 315 data->err_flags = 316 OSAL_LE16_TO_CPU(p_cqe->rx_cqe_fp.err_flags.flags); 317 data->length.packet_length = 318 OSAL_LE16_TO_CPU(p_cqe->rx_cqe_fp.packet_length); 319 data->vlan = 320 OSAL_LE16_TO_CPU(p_cqe->rx_cqe_fp.vlan); 321 data->opaque_data_0 = 322 OSAL_LE32_TO_CPU(p_cqe->rx_cqe_fp.opaque_data.data[0]); 323 data->opaque_data_1 = 324 OSAL_LE32_TO_CPU(p_cqe->rx_cqe_fp.opaque_data.data[1]); 325 data->u.placement_offset = 326 p_cqe->rx_cqe_fp.placement_offset; 327 } 328 329 #if defined(_NTDDK_) 330 #pragma warning(suppress : 28167 26110) 331 #endif 332 static enum _ecore_status_t 333 ecore_ll2_handle_slowpath(struct ecore_hwfn *p_hwfn, 334 struct ecore_ll2_info *p_ll2_conn, 335 union core_rx_cqe_union *p_cqe, 336 unsigned long *p_lock_flags) 337 { 338 struct ecore_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 339 struct core_rx_slow_path_cqe *sp_cqe; 340 341 sp_cqe = &p_cqe->rx_cqe_sp; 342 if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) { 343 DP_NOTICE(p_hwfn, true, 344 "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n", 345 sp_cqe->ramrod_cmd_id); 346 return ECORE_INVAL; 347 } 348 349 if (p_ll2_conn->cbs.slowpath_cb == OSAL_NULL) { 350 DP_NOTICE(p_hwfn, true, 351 "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n"); 352 return ECORE_INVAL; 353 } 354 355 OSAL_SPIN_UNLOCK_IRQSAVE(&p_rx->lock, *p_lock_flags); 356 357 p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie, 358 p_ll2_conn->my_id, 359 OSAL_LE32_TO_CPU(sp_cqe->opaque_data.data[0]), 360 OSAL_LE32_TO_CPU(sp_cqe->opaque_data.data[1])); 361 362 OSAL_SPIN_LOCK_IRQSAVE(&p_rx->lock, *p_lock_flags); 363 364 return ECORE_SUCCESS; 365 } 366 367 static enum _ecore_status_t 368 ecore_ll2_rxq_handle_completion(struct ecore_hwfn *p_hwfn, 369 struct ecore_ll2_info *p_ll2_conn, 370 union core_rx_cqe_union *p_cqe, 371 unsigned long *p_lock_flags, 372 bool b_last_cqe) 373 { 374 struct ecore_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 375 struct ecore_ll2_rx_packet *p_pkt = OSAL_NULL; 376 struct ecore_ll2_comp_rx_data data; 377 378 if (!OSAL_LIST_IS_EMPTY(&p_rx->active_descq)) 379 p_pkt = OSAL_LIST_FIRST_ENTRY(&p_rx->active_descq, 380 struct ecore_ll2_rx_packet, 381 list_entry); 382 if (!p_pkt) { 383 DP_NOTICE(p_hwfn, false, 384 "[%d] LL2 Rx completion but active_descq is empty\n", 385 p_ll2_conn->input.conn_type); 386 387 return ECORE_IO; 388 } 389 390 OSAL_LIST_REMOVE_ENTRY(&p_pkt->list_entry, &p_rx->active_descq); 391 392 if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR) 393 ecore_ll2_rxq_parse_reg(p_cqe, &data); 394 else 395 ecore_ll2_rxq_parse_gsi(p_cqe, &data); 396 397 if (ecore_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd) { 398 DP_NOTICE(p_hwfn, false, 399 "Mismatch between active_descq and the LL2 Rx chain\n"); 400 /* TODO - didn't return error value since this wasn't handled 401 * before, but this is obviously lacking. 402 */ 403 } 404 405 OSAL_LIST_PUSH_TAIL(&p_pkt->list_entry, &p_rx->free_descq); 406 407 data.connection_handle = p_ll2_conn->my_id; 408 data.cookie = p_pkt->cookie; 409 data.rx_buf_addr = p_pkt->rx_buf_addr; 410 data.b_last_packet = b_last_cqe; 411 412 OSAL_SPIN_UNLOCK_IRQSAVE(&p_rx->lock, *p_lock_flags); 413 p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, 414 &data); 415 416 OSAL_SPIN_LOCK_IRQSAVE(&p_rx->lock, *p_lock_flags); 417 418 return ECORE_SUCCESS; 419 } 420 421 static enum _ecore_status_t ecore_ll2_rxq_completion(struct ecore_hwfn *p_hwfn, 422 void *cookie) 423 { 424 struct ecore_ll2_info *p_ll2_conn = (struct ecore_ll2_info*)cookie; 425 struct ecore_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 426 union core_rx_cqe_union *cqe = OSAL_NULL; 427 u16 cq_new_idx = 0, cq_old_idx = 0; 428 unsigned long flags = 0; 429 enum _ecore_status_t rc = ECORE_SUCCESS; 430 431 OSAL_SPIN_LOCK_IRQSAVE(&p_rx->lock, flags); 432 cq_new_idx = OSAL_LE16_TO_CPU(*p_rx->p_fw_cons); 433 cq_old_idx = ecore_chain_get_cons_idx(&p_rx->rcq_chain); 434 435 while (cq_new_idx != cq_old_idx) { 436 bool b_last_cqe = (cq_new_idx == cq_old_idx); 437 438 cqe = (union core_rx_cqe_union *)ecore_chain_consume(&p_rx->rcq_chain); 439 cq_old_idx = ecore_chain_get_cons_idx(&p_rx->rcq_chain); 440 441 DP_VERBOSE(p_hwfn, ECORE_MSG_LL2, 442 "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n", 443 cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type); 444 445 switch (cqe->rx_cqe_sp.type) { 446 case CORE_RX_CQE_TYPE_SLOW_PATH: 447 rc = ecore_ll2_handle_slowpath(p_hwfn, p_ll2_conn, 448 cqe, &flags); 449 break; 450 case CORE_RX_CQE_TYPE_GSI_OFFLOAD: 451 case CORE_RX_CQE_TYPE_REGULAR: 452 rc = ecore_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn, 453 cqe, &flags, 454 b_last_cqe); 455 break; 456 default: 457 rc = ECORE_IO; 458 } 459 } 460 461 OSAL_SPIN_UNLOCK_IRQSAVE(&p_rx->lock, flags); 462 return rc; 463 } 464 465 static void ecore_ll2_rxq_flush(struct ecore_hwfn *p_hwfn, 466 u8 connection_handle) 467 { 468 struct ecore_ll2_info *p_ll2_conn = OSAL_NULL; 469 struct ecore_ll2_rx_packet *p_pkt = OSAL_NULL; 470 struct ecore_ll2_rx_queue *p_rx; 471 unsigned long flags = 0; 472 473 p_ll2_conn = ecore_ll2_handle_sanity_inactive(p_hwfn, 474 connection_handle); 475 if (p_ll2_conn == OSAL_NULL) 476 return; 477 p_rx = &p_ll2_conn->rx_queue; 478 479 OSAL_SPIN_LOCK_IRQSAVE(&p_rx->lock, flags); 480 while (!OSAL_LIST_IS_EMPTY(&p_rx->active_descq)) { 481 bool b_last; 482 p_pkt = OSAL_LIST_FIRST_ENTRY(&p_rx->active_descq, 483 struct ecore_ll2_rx_packet, 484 list_entry); 485 if (p_pkt == OSAL_NULL) 486 break; 487 #if defined(_NTDDK_) 488 #pragma warning(suppress : 6011 28182) 489 #endif 490 OSAL_LIST_REMOVE_ENTRY(&p_pkt->list_entry, 491 &p_rx->active_descq); 492 OSAL_LIST_PUSH_TAIL(&p_pkt->list_entry, 493 &p_rx->free_descq); 494 b_last = OSAL_LIST_IS_EMPTY(&p_rx->active_descq); 495 OSAL_SPIN_UNLOCK_IRQSAVE(&p_rx->lock, flags); 496 497 if (p_ll2_conn->input.conn_type == ECORE_LL2_TYPE_OOO) { 498 struct ecore_ooo_buffer *p_buffer; 499 500 p_buffer = (struct ecore_ooo_buffer *)p_pkt->cookie; 501 ecore_ooo_put_free_buffer(p_hwfn->p_ooo_info, p_buffer); 502 } else { 503 dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr; 504 void *cookie = p_pkt->cookie; 505 506 p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie, 507 p_ll2_conn->my_id, 508 cookie, 509 rx_buf_addr, 510 b_last); 511 } 512 OSAL_SPIN_LOCK_IRQSAVE(&p_rx->lock, flags); 513 } 514 OSAL_SPIN_UNLOCK_IRQSAVE(&p_rx->lock, flags); 515 } 516 517 static bool 518 ecore_ll2_lb_rxq_handler_slowpath(struct ecore_hwfn *p_hwfn, 519 struct core_rx_slow_path_cqe *p_cqe) 520 { 521 struct ooo_opaque *iscsi_ooo; 522 u32 cid; 523 524 if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) 525 return false; 526 527 iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data; 528 if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES) 529 return false; 530 531 /* Need to make a flush */ 532 cid = OSAL_LE32_TO_CPU(iscsi_ooo->cid); 533 ecore_ooo_release_connection_isles(p_hwfn->p_ooo_info, cid); 534 535 return true; 536 } 537 538 static enum _ecore_status_t 539 ecore_ll2_lb_rxq_handler(struct ecore_hwfn *p_hwfn, 540 struct ecore_ll2_info *p_ll2_conn) 541 { 542 struct ecore_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 543 u16 packet_length = 0, parse_flags = 0, vlan = 0; 544 struct ecore_ll2_rx_packet *p_pkt = OSAL_NULL; 545 u32 num_ooo_add_to_peninsula = 0, cid; 546 union core_rx_cqe_union *cqe = OSAL_NULL; 547 u16 cq_new_idx = 0, cq_old_idx = 0; 548 struct ecore_ooo_buffer *p_buffer; 549 struct ooo_opaque *iscsi_ooo; 550 u8 placement_offset = 0; 551 u8 cqe_type; 552 553 cq_new_idx = OSAL_LE16_TO_CPU(*p_rx->p_fw_cons); 554 cq_old_idx = ecore_chain_get_cons_idx(&p_rx->rcq_chain); 555 if (cq_new_idx == cq_old_idx) 556 return ECORE_SUCCESS; 557 558 while (cq_new_idx != cq_old_idx) { 559 struct core_rx_fast_path_cqe *p_cqe_fp; 560 561 cqe = (union core_rx_cqe_union *)ecore_chain_consume(&p_rx->rcq_chain); 562 cq_old_idx = ecore_chain_get_cons_idx(&p_rx->rcq_chain); 563 cqe_type = cqe->rx_cqe_sp.type; 564 565 if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH) 566 if (ecore_ll2_lb_rxq_handler_slowpath(p_hwfn, 567 &cqe->rx_cqe_sp)) 568 continue; 569 570 if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) { 571 DP_NOTICE(p_hwfn, true, 572 "Got a non-regular LB LL2 completion [type 0x%02x]\n", 573 cqe_type); 574 return ECORE_INVAL; 575 } 576 p_cqe_fp = &cqe->rx_cqe_fp; 577 578 placement_offset = p_cqe_fp->placement_offset; 579 parse_flags = OSAL_LE16_TO_CPU(p_cqe_fp->parse_flags.flags); 580 packet_length = OSAL_LE16_TO_CPU(p_cqe_fp->packet_length); 581 vlan = OSAL_LE16_TO_CPU(p_cqe_fp->vlan); 582 iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data; 583 ecore_ooo_save_history_entry(p_hwfn->p_ooo_info, iscsi_ooo); 584 cid = OSAL_LE32_TO_CPU(iscsi_ooo->cid); 585 586 /* Process delete isle first*/ 587 if (iscsi_ooo->drop_size) 588 ecore_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid, 589 iscsi_ooo->drop_isle, 590 iscsi_ooo->drop_size); 591 592 if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP) 593 continue; 594 595 /* Now process create/add/join isles */ 596 if (OSAL_LIST_IS_EMPTY(&p_rx->active_descq)) { 597 DP_NOTICE(p_hwfn, true, 598 "LL2 OOO RX chain has no submitted buffers\n"); 599 return ECORE_IO; 600 } 601 602 p_pkt = OSAL_LIST_FIRST_ENTRY(&p_rx->active_descq, 603 struct ecore_ll2_rx_packet, 604 list_entry); 605 606 if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) || 607 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) || 608 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) || 609 (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) || 610 (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) { 611 if (!p_pkt) { 612 DP_NOTICE(p_hwfn, true, 613 "LL2 OOO RX packet is not valid\n"); 614 return ECORE_IO; 615 } 616 #if defined(_NTDDK_) 617 #pragma warning(suppress : 6011 28182) 618 #endif 619 OSAL_LIST_REMOVE_ENTRY(&p_pkt->list_entry, 620 &p_rx->active_descq); 621 p_buffer = (struct ecore_ooo_buffer *)p_pkt->cookie; 622 p_buffer->packet_length = packet_length; 623 p_buffer->parse_flags = parse_flags; 624 p_buffer->vlan = vlan; 625 p_buffer->placement_offset = placement_offset; 626 if (ecore_chain_consume(&p_rx->rxq_chain) != 627 p_pkt->rxq_bd) { 628 /**/ 629 } 630 ecore_ooo_dump_rx_event(p_hwfn, iscsi_ooo, p_buffer); 631 OSAL_LIST_PUSH_TAIL(&p_pkt->list_entry, 632 &p_rx->free_descq); 633 634 switch (iscsi_ooo->ooo_opcode) { 635 case TCP_EVENT_ADD_NEW_ISLE: 636 ecore_ooo_add_new_isle(p_hwfn, 637 p_hwfn->p_ooo_info, 638 cid, 639 iscsi_ooo->ooo_isle, 640 p_buffer); 641 break; 642 case TCP_EVENT_ADD_ISLE_RIGHT: 643 ecore_ooo_add_new_buffer(p_hwfn, 644 p_hwfn->p_ooo_info, 645 cid, 646 iscsi_ooo->ooo_isle, 647 p_buffer, 648 ECORE_OOO_RIGHT_BUF); 649 break; 650 case TCP_EVENT_ADD_ISLE_LEFT: 651 ecore_ooo_add_new_buffer(p_hwfn, 652 p_hwfn->p_ooo_info, 653 cid, 654 iscsi_ooo->ooo_isle, 655 p_buffer, 656 ECORE_OOO_LEFT_BUF); 657 break; 658 case TCP_EVENT_JOIN: 659 ecore_ooo_add_new_buffer(p_hwfn, 660 p_hwfn->p_ooo_info, 661 cid, 662 iscsi_ooo->ooo_isle + 663 1, 664 p_buffer, 665 ECORE_OOO_LEFT_BUF); 666 ecore_ooo_join_isles(p_hwfn, 667 p_hwfn->p_ooo_info, 668 cid, 669 iscsi_ooo->ooo_isle); 670 break; 671 case TCP_EVENT_ADD_PEN: 672 num_ooo_add_to_peninsula++; 673 ecore_ooo_put_ready_buffer(p_hwfn->p_ooo_info, 674 p_buffer, true); 675 break; 676 } 677 } else { 678 DP_NOTICE(p_hwfn, true, 679 "Unexpected event (%d) TX OOO completion\n", 680 iscsi_ooo->ooo_opcode); 681 } 682 } 683 684 return ECORE_SUCCESS; 685 } 686 687 static void 688 ecore_ooo_submit_tx_buffers(struct ecore_hwfn *p_hwfn, 689 struct ecore_ll2_info *p_ll2_conn) 690 { 691 struct ecore_ll2_tx_pkt_info tx_pkt; 692 struct ecore_ooo_buffer *p_buffer; 693 dma_addr_t first_frag; 694 u16 l4_hdr_offset_w; 695 u8 bd_flags; 696 enum _ecore_status_t rc; 697 698 /* Submit Tx buffers here */ 699 while ((p_buffer = ecore_ooo_get_ready_buffer(p_hwfn->p_ooo_info))) { 700 l4_hdr_offset_w = 0; 701 bd_flags = 0; 702 703 first_frag = p_buffer->rx_buffer_phys_addr + 704 p_buffer->placement_offset; 705 SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1); 706 SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1); 707 708 OSAL_MEM_ZERO(&tx_pkt, sizeof(tx_pkt)); 709 tx_pkt.num_of_bds = 1; 710 tx_pkt.vlan = p_buffer->vlan; 711 tx_pkt.bd_flags = bd_flags; 712 tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w; 713 tx_pkt.tx_dest = (enum ecore_ll2_tx_dest)p_ll2_conn->tx_dest; 714 tx_pkt.first_frag = first_frag; 715 tx_pkt.first_frag_len = p_buffer->packet_length; 716 tx_pkt.cookie = p_buffer; 717 718 rc = ecore_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 719 &tx_pkt, true); 720 if (rc != ECORE_SUCCESS) { 721 ecore_ooo_put_ready_buffer(p_hwfn->p_ooo_info, 722 p_buffer, false); 723 break; 724 } 725 } 726 } 727 728 static void 729 ecore_ooo_submit_rx_buffers(struct ecore_hwfn *p_hwfn, 730 struct ecore_ll2_info *p_ll2_conn) 731 { 732 struct ecore_ooo_buffer *p_buffer; 733 enum _ecore_status_t rc; 734 735 while ((p_buffer = ecore_ooo_get_free_buffer(p_hwfn->p_ooo_info))) { 736 rc = ecore_ll2_post_rx_buffer(p_hwfn, 737 p_ll2_conn->my_id, 738 p_buffer->rx_buffer_phys_addr, 739 0, p_buffer, true); 740 if (rc != ECORE_SUCCESS) { 741 ecore_ooo_put_free_buffer(p_hwfn->p_ooo_info, p_buffer); 742 break; 743 } 744 } 745 } 746 747 static enum _ecore_status_t 748 ecore_ll2_lb_rxq_completion(struct ecore_hwfn *p_hwfn, 749 void *p_cookie) 750 { 751 struct ecore_ll2_info *p_ll2_conn = (struct ecore_ll2_info *)p_cookie; 752 enum _ecore_status_t rc; 753 754 rc = ecore_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); 755 if (rc != ECORE_SUCCESS) 756 return rc; 757 758 ecore_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); 759 ecore_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); 760 761 return 0; 762 } 763 764 static enum _ecore_status_t 765 ecore_ll2_lb_txq_completion(struct ecore_hwfn *p_hwfn, 766 void *p_cookie) 767 { 768 struct ecore_ll2_info *p_ll2_conn = (struct ecore_ll2_info *)p_cookie; 769 struct ecore_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; 770 struct ecore_ll2_tx_packet *p_pkt = OSAL_NULL; 771 struct ecore_ooo_buffer *p_buffer; 772 bool b_dont_submit_rx = false; 773 u16 new_idx = 0, num_bds = 0; 774 enum _ecore_status_t rc; 775 776 new_idx = OSAL_LE16_TO_CPU(*p_tx->p_fw_cons); 777 num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); 778 779 if (!num_bds) 780 return ECORE_SUCCESS; 781 782 while (num_bds) { 783 784 if (OSAL_LIST_IS_EMPTY(&p_tx->active_descq)) 785 return ECORE_INVAL; 786 787 p_pkt = OSAL_LIST_FIRST_ENTRY(&p_tx->active_descq, 788 struct ecore_ll2_tx_packet, 789 list_entry); 790 if (!p_pkt) 791 return ECORE_INVAL; 792 793 if (p_pkt->bd_used != 1) { 794 DP_NOTICE(p_hwfn, true, 795 "Unexpectedly many BDs(%d) in TX OOO completion\n", 796 p_pkt->bd_used); 797 return ECORE_INVAL; 798 } 799 800 OSAL_LIST_REMOVE_ENTRY(&p_pkt->list_entry, 801 &p_tx->active_descq); 802 803 num_bds--; 804 p_tx->bds_idx++; 805 ecore_chain_consume(&p_tx->txq_chain); 806 807 p_buffer = (struct ecore_ooo_buffer *)p_pkt->cookie; 808 OSAL_LIST_PUSH_TAIL(&p_pkt->list_entry, 809 &p_tx->free_descq); 810 811 if (b_dont_submit_rx) { 812 ecore_ooo_put_free_buffer(p_hwfn->p_ooo_info, p_buffer); 813 continue; 814 } 815 816 rc = ecore_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id, 817 p_buffer->rx_buffer_phys_addr, 0, 818 p_buffer, true); 819 if (rc != ECORE_SUCCESS) { 820 ecore_ooo_put_free_buffer(p_hwfn->p_ooo_info, p_buffer); 821 b_dont_submit_rx = true; 822 } 823 } 824 825 ecore_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); 826 827 return ECORE_SUCCESS; 828 } 829 830 static enum _ecore_status_t ecore_sp_ll2_rx_queue_start(struct ecore_hwfn *p_hwfn, 831 struct ecore_ll2_info *p_ll2_conn, 832 u8 action_on_error) 833 { 834 enum ecore_ll2_conn_type conn_type = p_ll2_conn->input.conn_type; 835 struct ecore_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; 836 struct core_rx_start_ramrod_data *p_ramrod = OSAL_NULL; 837 struct ecore_spq_entry *p_ent = OSAL_NULL; 838 struct ecore_sp_init_data init_data; 839 u16 cqe_pbl_size; 840 enum _ecore_status_t rc = ECORE_SUCCESS; 841 842 /* Get SPQ entry */ 843 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 844 init_data.cid = p_ll2_conn->cid; 845 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 846 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 847 848 rc = ecore_sp_init_request(p_hwfn, &p_ent, 849 CORE_RAMROD_RX_QUEUE_START, 850 PROTOCOLID_CORE, &init_data); 851 if (rc != ECORE_SUCCESS) 852 return rc; 853 854 p_ramrod = &p_ent->ramrod.core_rx_queue_start; 855 856 p_ramrod->sb_id = OSAL_CPU_TO_LE16(ecore_int_get_sp_sb_id(p_hwfn)); 857 p_ramrod->sb_index = p_rx->rx_sb_index; 858 p_ramrod->complete_event_flg = 1; 859 860 p_ramrod->mtu = OSAL_CPU_TO_LE16(p_ll2_conn->input.mtu); 861 DMA_REGPAIR_LE(p_ramrod->bd_base, 862 p_rx->rxq_chain.p_phys_addr); 863 cqe_pbl_size = (u16)ecore_chain_get_page_cnt(&p_rx->rcq_chain); 864 p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size); 865 DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, 866 ecore_chain_get_pbl_phys(&p_rx->rcq_chain)); 867 868 p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; 869 p_ramrod->inner_vlan_stripping_en = 870 p_ll2_conn->input.rx_vlan_removal_en; 871 872 if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits) && 873 (p_ll2_conn->input.conn_type == ECORE_LL2_TYPE_FCOE)) 874 p_ramrod->report_outer_vlan = 1; 875 p_ramrod->queue_id = p_ll2_conn->queue_id; 876 p_ramrod->main_func_queue = p_ll2_conn->main_func_queue; 877 878 if (OSAL_TEST_BIT(ECORE_MF_LL2_NON_UNICAST, 879 &p_hwfn->p_dev->mf_bits) && 880 p_ramrod->main_func_queue && 881 ((conn_type != ECORE_LL2_TYPE_ROCE) && 882 (conn_type != ECORE_LL2_TYPE_IWARP))) { 883 p_ramrod->mf_si_bcast_accept_all = 1; 884 p_ramrod->mf_si_mcast_accept_all = 1; 885 } else { 886 p_ramrod->mf_si_bcast_accept_all = 0; 887 p_ramrod->mf_si_mcast_accept_all = 0; 888 } 889 890 p_ramrod->action_on_error.error_type = action_on_error; 891 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; 892 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 893 } 894 895 static enum _ecore_status_t ecore_sp_ll2_tx_queue_start(struct ecore_hwfn *p_hwfn, 896 struct ecore_ll2_info *p_ll2_conn) 897 { 898 enum ecore_ll2_conn_type conn_type = p_ll2_conn->input.conn_type; 899 struct ecore_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; 900 struct core_tx_start_ramrod_data *p_ramrod = OSAL_NULL; 901 struct ecore_spq_entry *p_ent = OSAL_NULL; 902 struct ecore_sp_init_data init_data; 903 u16 pq_id = 0, pbl_size; 904 enum _ecore_status_t rc = ECORE_NOTIMPL; 905 906 if (!ECORE_LL2_TX_REGISTERED(p_ll2_conn)) 907 return ECORE_SUCCESS; 908 909 if (p_ll2_conn->input.conn_type == ECORE_LL2_TYPE_OOO) 910 p_ll2_conn->tx_stats_en = 0; 911 else 912 p_ll2_conn->tx_stats_en = 1; 913 914 /* Get SPQ entry */ 915 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 916 init_data.cid = p_ll2_conn->cid; 917 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 918 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 919 920 rc = ecore_sp_init_request(p_hwfn, &p_ent, 921 CORE_RAMROD_TX_QUEUE_START, 922 PROTOCOLID_CORE, &init_data); 923 if (rc != ECORE_SUCCESS) 924 return rc; 925 926 p_ramrod = &p_ent->ramrod.core_tx_queue_start; 927 928 p_ramrod->sb_id = OSAL_CPU_TO_LE16(ecore_int_get_sp_sb_id(p_hwfn)); 929 p_ramrod->sb_index = p_tx->tx_sb_index; 930 p_ramrod->mtu = OSAL_CPU_TO_LE16(p_ll2_conn->input.mtu); 931 p_ramrod->stats_en = p_ll2_conn->tx_stats_en; 932 p_ramrod->stats_id = p_ll2_conn->tx_stats_id; 933 934 DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, 935 ecore_chain_get_pbl_phys(&p_tx->txq_chain)); 936 pbl_size = (u16)ecore_chain_get_page_cnt(&p_tx->txq_chain); 937 p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size); 938 939 /* TODO RESC_ALLOC pq for ll2 */ 940 switch (p_ll2_conn->input.tx_tc) { 941 case PURE_LB_TC: 942 pq_id = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); 943 break; 944 case PKT_LB_TC: 945 pq_id = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO); 946 break; 947 default: 948 pq_id = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); 949 } 950 951 p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id); 952 953 switch (conn_type) { 954 case ECORE_LL2_TYPE_FCOE: 955 p_ramrod->conn_type = PROTOCOLID_FCOE; 956 break; 957 case ECORE_LL2_TYPE_ISCSI: 958 p_ramrod->conn_type = PROTOCOLID_ISCSI; 959 break; 960 case ECORE_LL2_TYPE_ROCE: 961 p_ramrod->conn_type = PROTOCOLID_ROCE; 962 break; 963 case ECORE_LL2_TYPE_IWARP: 964 p_ramrod->conn_type = PROTOCOLID_IWARP; 965 break; 966 case ECORE_LL2_TYPE_OOO: 967 if (p_hwfn->hw_info.personality == ECORE_PCI_ISCSI) { 968 p_ramrod->conn_type = PROTOCOLID_ISCSI; 969 } else { 970 p_ramrod->conn_type = PROTOCOLID_IWARP; 971 } 972 break; 973 default: 974 p_ramrod->conn_type = PROTOCOLID_ETH; 975 DP_NOTICE(p_hwfn, false, "Unknown connection type: %d\n", 976 conn_type); 977 } 978 979 p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; 980 981 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 982 if (rc != ECORE_SUCCESS) 983 return rc; 984 985 rc = ecore_db_recovery_add(p_hwfn->p_dev, p_tx->doorbell_addr, 986 &p_tx->db_msg, DB_REC_WIDTH_32B, 987 DB_REC_KERNEL); 988 return rc; 989 } 990 991 static enum _ecore_status_t ecore_sp_ll2_rx_queue_stop(struct ecore_hwfn *p_hwfn, 992 struct ecore_ll2_info *p_ll2_conn) 993 { 994 struct core_rx_stop_ramrod_data *p_ramrod = OSAL_NULL; 995 struct ecore_spq_entry *p_ent = OSAL_NULL; 996 struct ecore_sp_init_data init_data; 997 enum _ecore_status_t rc = ECORE_NOTIMPL; 998 999 /* Get SPQ entry */ 1000 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1001 init_data.cid = p_ll2_conn->cid; 1002 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1003 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1004 1005 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1006 CORE_RAMROD_RX_QUEUE_STOP, 1007 PROTOCOLID_CORE, &init_data); 1008 if (rc != ECORE_SUCCESS) 1009 return rc; 1010 1011 p_ramrod = &p_ent->ramrod.core_rx_queue_stop; 1012 1013 p_ramrod->complete_event_flg = 1; 1014 p_ramrod->queue_id = p_ll2_conn->queue_id; 1015 1016 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1017 } 1018 1019 static enum _ecore_status_t ecore_sp_ll2_tx_queue_stop(struct ecore_hwfn *p_hwfn, 1020 struct ecore_ll2_info *p_ll2_conn) 1021 { 1022 struct ecore_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; 1023 struct ecore_spq_entry *p_ent = OSAL_NULL; 1024 struct ecore_sp_init_data init_data; 1025 enum _ecore_status_t rc = ECORE_NOTIMPL; 1026 1027 ecore_db_recovery_del(p_hwfn->p_dev, p_tx->doorbell_addr, 1028 &p_tx->db_msg); 1029 1030 /* Get SPQ entry */ 1031 OSAL_MEMSET(&init_data, 0, sizeof(init_data)); 1032 init_data.cid = p_ll2_conn->cid; 1033 init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 1034 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; 1035 1036 rc = ecore_sp_init_request(p_hwfn, &p_ent, 1037 CORE_RAMROD_TX_QUEUE_STOP, 1038 PROTOCOLID_CORE, &init_data); 1039 if (rc != ECORE_SUCCESS) 1040 return rc; 1041 1042 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); 1043 } 1044 1045 static enum _ecore_status_t 1046 ecore_ll2_acquire_connection_rx(struct ecore_hwfn *p_hwfn, 1047 struct ecore_ll2_info *p_ll2_info) 1048 { 1049 struct ecore_ll2_rx_packet *p_descq; 1050 u32 capacity; 1051 enum _ecore_status_t rc = ECORE_SUCCESS; 1052 1053 if (!p_ll2_info->input.rx_num_desc) 1054 goto out; 1055 1056 rc = ecore_chain_alloc(p_hwfn->p_dev, 1057 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 1058 ECORE_CHAIN_MODE_NEXT_PTR, 1059 ECORE_CHAIN_CNT_TYPE_U16, 1060 p_ll2_info->input.rx_num_desc, 1061 sizeof(struct core_rx_bd), 1062 &p_ll2_info->rx_queue.rxq_chain, OSAL_NULL); 1063 if (rc) { 1064 DP_NOTICE(p_hwfn, false, 1065 "Failed to allocate ll2 rxq chain\n"); 1066 goto out; 1067 } 1068 1069 capacity = ecore_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain); 1070 p_descq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1071 capacity * sizeof(struct ecore_ll2_rx_packet)); 1072 if (!p_descq) { 1073 rc = ECORE_NOMEM; 1074 DP_NOTICE(p_hwfn, false, 1075 "Failed to allocate ll2 Rx desc\n"); 1076 goto out; 1077 } 1078 p_ll2_info->rx_queue.descq_array = p_descq; 1079 1080 rc = ecore_chain_alloc(p_hwfn->p_dev, 1081 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 1082 ECORE_CHAIN_MODE_PBL, 1083 ECORE_CHAIN_CNT_TYPE_U16, 1084 p_ll2_info->input.rx_num_desc, 1085 sizeof(struct core_rx_fast_path_cqe), 1086 &p_ll2_info->rx_queue.rcq_chain, OSAL_NULL); 1087 if (rc != ECORE_SUCCESS) { 1088 DP_NOTICE(p_hwfn, false, 1089 "Failed to allocate ll2 rcq chain\n"); 1090 goto out; 1091 } 1092 1093 DP_VERBOSE(p_hwfn, ECORE_MSG_LL2, 1094 "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n", 1095 p_ll2_info->input.conn_type, 1096 p_ll2_info->input.rx_num_desc); 1097 1098 out: 1099 return rc; 1100 } 1101 1102 static enum _ecore_status_t 1103 ecore_ll2_acquire_connection_tx(struct ecore_hwfn *p_hwfn, 1104 struct ecore_ll2_info *p_ll2_info) 1105 { 1106 struct ecore_ll2_tx_packet *p_descq; 1107 u32 capacity; 1108 enum _ecore_status_t rc = ECORE_SUCCESS; 1109 u32 desc_size; 1110 1111 if (!p_ll2_info->input.tx_num_desc) 1112 goto out; 1113 1114 rc = ecore_chain_alloc(p_hwfn->p_dev, 1115 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 1116 ECORE_CHAIN_MODE_PBL, 1117 ECORE_CHAIN_CNT_TYPE_U16, 1118 p_ll2_info->input.tx_num_desc, 1119 sizeof(struct core_tx_bd), 1120 &p_ll2_info->tx_queue.txq_chain, OSAL_NULL); 1121 if (rc != ECORE_SUCCESS) 1122 goto out; 1123 1124 capacity = ecore_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain); 1125 desc_size = (sizeof(*p_descq) + 1126 (p_ll2_info->input.tx_max_bds_per_packet - 1) * 1127 sizeof(p_descq->bds_set)); 1128 1129 p_descq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 1130 capacity * desc_size); 1131 if (!p_descq) { 1132 rc = ECORE_NOMEM; 1133 goto out; 1134 } 1135 p_ll2_info->tx_queue.descq_array = p_descq; 1136 1137 DP_VERBOSE(p_hwfn, ECORE_MSG_LL2, 1138 "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", 1139 p_ll2_info->input.conn_type, 1140 p_ll2_info->input.tx_num_desc); 1141 1142 out: 1143 if (rc != ECORE_SUCCESS) 1144 DP_NOTICE(p_hwfn, false, 1145 "Can't allocate memory for Tx LL2 with 0x%08x buffers\n", 1146 p_ll2_info->input.tx_num_desc); 1147 return rc; 1148 } 1149 1150 static enum _ecore_status_t 1151 ecore_ll2_acquire_connection_ooo(struct ecore_hwfn *p_hwfn, 1152 struct ecore_ll2_info *p_ll2_info, u16 mtu) 1153 { 1154 struct ecore_ooo_buffer *p_buf = OSAL_NULL; 1155 u32 rx_buffer_size = 0; 1156 void *p_virt; 1157 u16 buf_idx; 1158 enum _ecore_status_t rc = ECORE_SUCCESS; 1159 1160 if (p_ll2_info->input.conn_type != ECORE_LL2_TYPE_OOO) 1161 return rc; 1162 1163 /* Correct number of requested OOO buffers if needed */ 1164 if (!p_ll2_info->input.rx_num_ooo_buffers) { 1165 u16 num_desc = p_ll2_info->input.rx_num_desc; 1166 1167 if (!num_desc) 1168 return ECORE_INVAL; 1169 p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2; 1170 } 1171 1172 /* TODO - use some defines for buffer size */ 1173 rx_buffer_size = mtu + 14 + 4 + 8 + ETH_CACHE_LINE_SIZE; 1174 rx_buffer_size = (rx_buffer_size + ETH_CACHE_LINE_SIZE - 1) & 1175 ~(ETH_CACHE_LINE_SIZE - 1); 1176 1177 for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers; 1178 buf_idx++) { 1179 p_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_buf)); 1180 if (!p_buf) { 1181 DP_NOTICE(p_hwfn, false, 1182 "Failed to allocate ooo descriptor\n"); 1183 rc = ECORE_NOMEM; 1184 goto out; 1185 } 1186 1187 p_buf->rx_buffer_size = rx_buffer_size; 1188 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, 1189 &p_buf->rx_buffer_phys_addr, 1190 p_buf->rx_buffer_size); 1191 if (!p_virt) { 1192 DP_NOTICE(p_hwfn, false, 1193 "Failed to allocate ooo buffer\n"); 1194 OSAL_FREE(p_hwfn->p_dev, p_buf); 1195 rc = ECORE_NOMEM; 1196 goto out; 1197 } 1198 p_buf->rx_buffer_virt_addr = p_virt; 1199 ecore_ooo_put_free_buffer(p_hwfn->p_ooo_info, p_buf); 1200 } 1201 1202 DP_VERBOSE(p_hwfn, ECORE_MSG_LL2, 1203 "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n", 1204 p_ll2_info->input.rx_num_ooo_buffers, rx_buffer_size); 1205 1206 out: 1207 return rc; 1208 } 1209 1210 static enum _ecore_status_t 1211 ecore_ll2_set_cbs(struct ecore_ll2_info *p_ll2_info, 1212 const struct ecore_ll2_cbs *cbs) 1213 { 1214 if (!cbs || (!cbs->rx_comp_cb || 1215 !cbs->rx_release_cb || 1216 !cbs->tx_comp_cb || 1217 !cbs->tx_release_cb || 1218 !cbs->cookie)) 1219 return ECORE_INVAL; 1220 1221 p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb; 1222 p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb; 1223 p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb; 1224 p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb; 1225 p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb; 1226 p_ll2_info->cbs.cookie = cbs->cookie; 1227 1228 return ECORE_SUCCESS; 1229 } 1230 1231 static enum core_error_handle 1232 ecore_ll2_get_error_choice(enum ecore_ll2_error_handle err) 1233 { 1234 switch (err) { 1235 case ECORE_LL2_DROP_PACKET: 1236 return LL2_DROP_PACKET; 1237 case ECORE_LL2_DO_NOTHING: 1238 return LL2_DO_NOTHING; 1239 case ECORE_LL2_ASSERT: 1240 return LL2_ASSERT; 1241 default: 1242 return LL2_DO_NOTHING; 1243 } 1244 } 1245 1246 enum _ecore_status_t 1247 ecore_ll2_acquire_connection(void *cxt, 1248 struct ecore_ll2_acquire_data *data) 1249 { 1250 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt; 1251 ecore_int_comp_cb_t comp_rx_cb, comp_tx_cb; 1252 struct ecore_ll2_info *p_ll2_info = OSAL_NULL; 1253 enum _ecore_status_t rc; 1254 u8 i, *p_tx_max; 1255 1256 if (!data->p_connection_handle || !p_hwfn->p_ll2_info) { 1257 DP_NOTICE(p_hwfn, false, "Invalid connection handle, ll2_info not allocated\n"); 1258 return ECORE_INVAL; 1259 } 1260 1261 /* Find a free connection to be used */ 1262 for (i = 0; (i < ECORE_MAX_NUM_OF_LL2_CONNECTIONS); i++) { 1263 OSAL_MUTEX_ACQUIRE(&p_hwfn->p_ll2_info[i].mutex); 1264 if (p_hwfn->p_ll2_info[i].b_active) { 1265 OSAL_MUTEX_RELEASE(&p_hwfn->p_ll2_info[i].mutex); 1266 continue; 1267 } 1268 1269 p_hwfn->p_ll2_info[i].b_active = true; 1270 p_ll2_info = &p_hwfn->p_ll2_info[i]; 1271 OSAL_MUTEX_RELEASE(&p_hwfn->p_ll2_info[i].mutex); 1272 break; 1273 } 1274 if (p_ll2_info == OSAL_NULL) { 1275 DP_NOTICE(p_hwfn, false, "No available ll2 connection\n"); 1276 return ECORE_BUSY; 1277 } 1278 1279 OSAL_MEMCPY(&p_ll2_info->input, &data->input, 1280 sizeof(p_ll2_info->input)); 1281 1282 switch (data->input.tx_dest) { 1283 case ECORE_LL2_TX_DEST_NW: 1284 p_ll2_info->tx_dest = CORE_TX_DEST_NW; 1285 break; 1286 case ECORE_LL2_TX_DEST_LB: 1287 p_ll2_info->tx_dest = CORE_TX_DEST_LB; 1288 break; 1289 case ECORE_LL2_TX_DEST_DROP: 1290 p_ll2_info->tx_dest = CORE_TX_DEST_DROP; 1291 break; 1292 default: 1293 return ECORE_INVAL; 1294 } 1295 1296 if ((data->input.conn_type == ECORE_LL2_TYPE_OOO) || 1297 data->input.secondary_queue) 1298 p_ll2_info->main_func_queue = false; 1299 else 1300 p_ll2_info->main_func_queue = true; 1301 1302 /* Correct maximum number of Tx BDs */ 1303 p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet; 1304 if (*p_tx_max == 0) 1305 *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET; 1306 else 1307 *p_tx_max = OSAL_MIN_T(u8, *p_tx_max, 1308 CORE_LL2_TX_MAX_BDS_PER_PACKET); 1309 1310 rc = ecore_ll2_set_cbs(p_ll2_info, data->cbs); 1311 if (rc) { 1312 DP_NOTICE(p_hwfn, false, "Invalid callback functions\n"); 1313 goto q_allocate_fail; 1314 } 1315 1316 rc = ecore_ll2_acquire_connection_rx(p_hwfn, p_ll2_info); 1317 if (rc != ECORE_SUCCESS) { 1318 DP_NOTICE(p_hwfn, false, "ll2 acquire rx connection failed\n"); 1319 goto q_allocate_fail; 1320 } 1321 1322 rc = ecore_ll2_acquire_connection_tx(p_hwfn, p_ll2_info); 1323 if (rc != ECORE_SUCCESS) { 1324 DP_NOTICE(p_hwfn, false, "ll2 acquire tx connection failed\n"); 1325 goto q_allocate_fail; 1326 } 1327 1328 rc = ecore_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info, 1329 data->input.mtu); 1330 if (rc != ECORE_SUCCESS) { 1331 DP_NOTICE(p_hwfn, false, "ll2 acquire ooo connection failed\n"); 1332 goto q_allocate_fail; 1333 } 1334 1335 /* Register callbacks for the Rx/Tx queues */ 1336 if (data->input.conn_type == ECORE_LL2_TYPE_OOO) { 1337 comp_rx_cb = ecore_ll2_lb_rxq_completion; 1338 comp_tx_cb = ecore_ll2_lb_txq_completion; 1339 1340 } else { 1341 comp_rx_cb = ecore_ll2_rxq_completion; 1342 comp_tx_cb = ecore_ll2_txq_completion; 1343 } 1344 1345 if (data->input.rx_num_desc) { 1346 ecore_int_register_cb(p_hwfn, comp_rx_cb, 1347 &p_hwfn->p_ll2_info[i], 1348 &p_ll2_info->rx_queue.rx_sb_index, 1349 &p_ll2_info->rx_queue.p_fw_cons); 1350 p_ll2_info->rx_queue.b_cb_registred = true; 1351 } 1352 1353 if (data->input.tx_num_desc) { 1354 ecore_int_register_cb(p_hwfn, 1355 comp_tx_cb, 1356 &p_hwfn->p_ll2_info[i], 1357 &p_ll2_info->tx_queue.tx_sb_index, 1358 &p_ll2_info->tx_queue.p_fw_cons); 1359 p_ll2_info->tx_queue.b_cb_registred = true; 1360 } 1361 1362 *(data->p_connection_handle) = i; 1363 return rc; 1364 1365 q_allocate_fail: 1366 ecore_ll2_release_connection(p_hwfn, i); 1367 return ECORE_NOMEM; 1368 } 1369 1370 static enum _ecore_status_t ecore_ll2_establish_connection_rx(struct ecore_hwfn *p_hwfn, 1371 struct ecore_ll2_info *p_ll2_conn) 1372 { 1373 enum ecore_ll2_error_handle error_input; 1374 enum core_error_handle error_mode; 1375 u8 action_on_error = 0; 1376 1377 if (!ECORE_LL2_RX_REGISTERED(p_ll2_conn)) 1378 return ECORE_SUCCESS; 1379 1380 DIRECT_REG_WR(p_hwfn, p_ll2_conn->rx_queue.set_prod_addr, 0x0); 1381 error_input = p_ll2_conn->input.ai_err_packet_too_big; 1382 error_mode = ecore_ll2_get_error_choice(error_input); 1383 SET_FIELD(action_on_error, 1384 CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode); 1385 error_input = p_ll2_conn->input.ai_err_no_buf; 1386 error_mode = ecore_ll2_get_error_choice(error_input); 1387 SET_FIELD(action_on_error, 1388 CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode); 1389 1390 return ecore_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); 1391 } 1392 1393 static void 1394 ecore_ll2_establish_connection_ooo(struct ecore_hwfn *p_hwfn, 1395 struct ecore_ll2_info *p_ll2_conn) 1396 { 1397 if (p_ll2_conn->input.conn_type != ECORE_LL2_TYPE_OOO) 1398 return; 1399 1400 ecore_ooo_release_all_isles(p_hwfn->p_ooo_info); 1401 ecore_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); 1402 } 1403 1404 enum _ecore_status_t ecore_ll2_establish_connection(void *cxt, 1405 u8 connection_handle) 1406 { 1407 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt; 1408 struct e4_core_conn_context *p_cxt; 1409 struct ecore_ll2_info *p_ll2_conn; 1410 struct ecore_cxt_info cxt_info; 1411 struct ecore_ll2_rx_queue *p_rx; 1412 struct ecore_ll2_tx_queue *p_tx; 1413 struct ecore_ll2_tx_packet *p_pkt; 1414 struct ecore_ptt *p_ptt; 1415 enum _ecore_status_t rc = ECORE_NOTIMPL; 1416 u32 i, capacity; 1417 u32 desc_size; 1418 u8 qid; 1419 1420 p_ptt = ecore_ptt_acquire(p_hwfn); 1421 if (!p_ptt) 1422 return ECORE_AGAIN; 1423 1424 p_ll2_conn = ecore_ll2_handle_sanity_lock(p_hwfn, connection_handle); 1425 if (p_ll2_conn == OSAL_NULL) { 1426 rc = ECORE_INVAL; 1427 goto out; 1428 } 1429 1430 p_rx = &p_ll2_conn->rx_queue; 1431 p_tx = &p_ll2_conn->tx_queue; 1432 1433 ecore_chain_reset(&p_rx->rxq_chain); 1434 ecore_chain_reset(&p_rx->rcq_chain); 1435 OSAL_LIST_INIT(&p_rx->active_descq); 1436 OSAL_LIST_INIT(&p_rx->free_descq); 1437 OSAL_LIST_INIT(&p_rx->posting_descq); 1438 OSAL_SPIN_LOCK_INIT(&p_rx->lock); 1439 capacity = ecore_chain_get_capacity(&p_rx->rxq_chain); 1440 for (i = 0; i < capacity; i++) 1441 OSAL_LIST_PUSH_TAIL(&p_rx->descq_array[i].list_entry, 1442 &p_rx->free_descq); 1443 *p_rx->p_fw_cons = 0; 1444 1445 ecore_chain_reset(&p_tx->txq_chain); 1446 OSAL_LIST_INIT(&p_tx->active_descq); 1447 OSAL_LIST_INIT(&p_tx->free_descq); 1448 OSAL_LIST_INIT(&p_tx->sending_descq); 1449 OSAL_SPIN_LOCK_INIT(&p_tx->lock); 1450 capacity = ecore_chain_get_capacity(&p_tx->txq_chain); 1451 /* The size of the element in descq_array is flexible */ 1452 desc_size = (sizeof(*p_pkt) + 1453 (p_ll2_conn->input.tx_max_bds_per_packet - 1) * 1454 sizeof(p_pkt->bds_set)); 1455 1456 for (i = 0; i < capacity; i++) { 1457 p_pkt = (struct ecore_ll2_tx_packet *)((u8 *)p_tx->descq_array + 1458 desc_size*i); 1459 OSAL_LIST_PUSH_TAIL(&p_pkt->list_entry, 1460 &p_tx->free_descq); 1461 } 1462 p_tx->cur_completing_bd_idx = 0; 1463 p_tx->bds_idx = 0; 1464 p_tx->b_completing_packet = false; 1465 p_tx->cur_send_packet = OSAL_NULL; 1466 p_tx->cur_send_frag_num = 0; 1467 p_tx->cur_completing_frag_num = 0; 1468 *p_tx->p_fw_cons = 0; 1469 1470 rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid); 1471 if (rc) 1472 goto out; 1473 cxt_info.iid = p_ll2_conn->cid; 1474 rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info); 1475 if (rc) { 1476 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n", 1477 p_ll2_conn->cid); 1478 goto out; 1479 } 1480 1481 p_cxt = cxt_info.p_cxt; 1482 1483 /* @@@TBD we zero the context until we have ilt_reset implemented. */ 1484 OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt)); 1485 1486 qid = ecore_ll2_handle_to_queue_id(p_hwfn, connection_handle); 1487 p_ll2_conn->queue_id = qid; 1488 p_ll2_conn->tx_stats_id = qid; 1489 p_rx->set_prod_addr = (u8 OSAL_IOMEM*)p_hwfn->regview + 1490 GTT_BAR0_MAP_REG_TSDM_RAM + 1491 TSTORM_LL2_RX_PRODS_OFFSET(qid); 1492 p_tx->doorbell_addr = (u8 OSAL_IOMEM*)p_hwfn->doorbells + 1493 DB_ADDR(p_ll2_conn->cid, 1494 DQ_DEMS_LEGACY); 1495 1496 /* prepare db data */ 1497 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); 1498 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, 1499 DB_AGG_CMD_SET); 1500 SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL, 1501 DQ_XCM_CORE_TX_BD_PROD_CMD); 1502 p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; 1503 1504 rc = ecore_ll2_establish_connection_rx(p_hwfn, p_ll2_conn); 1505 if (rc) 1506 goto out; 1507 1508 rc = ecore_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn); 1509 if (rc) 1510 goto out; 1511 1512 if (!ECORE_IS_RDMA_PERSONALITY(p_hwfn)) 1513 ecore_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1); 1514 1515 ecore_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); 1516 1517 if (p_ll2_conn->input.conn_type == ECORE_LL2_TYPE_FCOE) { 1518 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, 1519 &p_hwfn->p_dev->mf_bits)) 1520 ecore_llh_add_protocol_filter(p_hwfn->p_dev, 0, 1521 ECORE_LLH_FILTER_ETHERTYPE, 1522 0x8906, 0); 1523 ecore_llh_add_protocol_filter(p_hwfn->p_dev, 0, 1524 ECORE_LLH_FILTER_ETHERTYPE, 1525 0x8914, 0); 1526 } 1527 1528 out: 1529 ecore_ptt_release(p_hwfn, p_ptt); 1530 1531 return rc; 1532 } 1533 1534 static void ecore_ll2_post_rx_buffer_notify_fw(struct ecore_hwfn *p_hwfn, 1535 struct ecore_ll2_rx_queue *p_rx, 1536 struct ecore_ll2_rx_packet *p_curp) 1537 { 1538 struct ecore_ll2_rx_packet *p_posting_packet = OSAL_NULL; 1539 struct core_ll2_rx_prod rx_prod = {0, 0, 0}; 1540 bool b_notify_fw = false; 1541 u16 bd_prod, cq_prod; 1542 1543 /* This handles the flushing of already posted buffers */ 1544 while (!OSAL_LIST_IS_EMPTY(&p_rx->posting_descq)) { 1545 p_posting_packet = OSAL_LIST_FIRST_ENTRY(&p_rx->posting_descq, 1546 struct ecore_ll2_rx_packet, 1547 list_entry); 1548 #if defined(_NTDDK_) 1549 #pragma warning(suppress : 6011 28182) 1550 #endif 1551 OSAL_LIST_REMOVE_ENTRY(&p_posting_packet->list_entry, &p_rx->posting_descq); 1552 OSAL_LIST_PUSH_TAIL(&p_posting_packet->list_entry, &p_rx->active_descq); 1553 b_notify_fw = true; 1554 } 1555 1556 /* This handles the supplied packet [if there is one] */ 1557 if (p_curp) { 1558 OSAL_LIST_PUSH_TAIL(&p_curp->list_entry, 1559 &p_rx->active_descq); 1560 b_notify_fw = true; 1561 } 1562 1563 if (!b_notify_fw) 1564 return; 1565 1566 bd_prod = ecore_chain_get_prod_idx(&p_rx->rxq_chain); 1567 cq_prod = ecore_chain_get_prod_idx(&p_rx->rcq_chain); 1568 rx_prod.bd_prod = OSAL_CPU_TO_LE16(bd_prod); 1569 rx_prod.cqe_prod = OSAL_CPU_TO_LE16(cq_prod); 1570 DIRECT_REG_WR(p_hwfn, p_rx->set_prod_addr, *((u32 *)&rx_prod)); 1571 } 1572 1573 enum _ecore_status_t ecore_ll2_post_rx_buffer(void *cxt, 1574 u8 connection_handle, 1575 dma_addr_t addr, 1576 u16 buf_len, 1577 void *cookie, 1578 u8 notify_fw) 1579 { 1580 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt; 1581 struct core_rx_bd_with_buff_len *p_curb = OSAL_NULL; 1582 struct ecore_ll2_rx_packet *p_curp = OSAL_NULL; 1583 struct ecore_ll2_info *p_ll2_conn; 1584 struct ecore_ll2_rx_queue *p_rx; 1585 unsigned long flags; 1586 void *p_data; 1587 enum _ecore_status_t rc = ECORE_SUCCESS; 1588 1589 p_ll2_conn = ecore_ll2_handle_sanity(p_hwfn, connection_handle); 1590 if (p_ll2_conn == OSAL_NULL) 1591 return ECORE_INVAL; 1592 p_rx = &p_ll2_conn->rx_queue; 1593 if (p_rx->set_prod_addr == OSAL_NULL) 1594 return ECORE_IO; 1595 1596 OSAL_SPIN_LOCK_IRQSAVE(&p_rx->lock, flags); 1597 if (!OSAL_LIST_IS_EMPTY(&p_rx->free_descq)) 1598 p_curp = OSAL_LIST_FIRST_ENTRY(&p_rx->free_descq, 1599 struct ecore_ll2_rx_packet, 1600 list_entry); 1601 if (p_curp) { 1602 if (ecore_chain_get_elem_left(&p_rx->rxq_chain) && 1603 ecore_chain_get_elem_left(&p_rx->rcq_chain)) { 1604 p_data = ecore_chain_produce(&p_rx->rxq_chain); 1605 p_curb = (struct core_rx_bd_with_buff_len *)p_data; 1606 ecore_chain_produce(&p_rx->rcq_chain); 1607 } 1608 } 1609 1610 /* If we're lacking entires, let's try to flush buffers to FW */ 1611 if (!p_curp || !p_curb) { 1612 rc = ECORE_BUSY; 1613 p_curp = OSAL_NULL; 1614 goto out_notify; 1615 } 1616 1617 /* We have an Rx packet we can fill */ 1618 DMA_REGPAIR_LE(p_curb->addr, addr); 1619 p_curb->buff_length = OSAL_CPU_TO_LE16(buf_len); 1620 p_curp->rx_buf_addr = addr; 1621 p_curp->cookie = cookie; 1622 p_curp->rxq_bd = p_curb; 1623 p_curp->buf_length = buf_len; 1624 OSAL_LIST_REMOVE_ENTRY(&p_curp->list_entry, 1625 &p_rx->free_descq); 1626 1627 /* Check if we only want to enqueue this packet without informing FW */ 1628 if (!notify_fw) { 1629 OSAL_LIST_PUSH_TAIL(&p_curp->list_entry, 1630 &p_rx->posting_descq); 1631 goto out; 1632 } 1633 1634 out_notify: 1635 ecore_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp); 1636 out: 1637 OSAL_SPIN_UNLOCK_IRQSAVE(&p_rx->lock, flags); 1638 return rc; 1639 } 1640 1641 static void ecore_ll2_prepare_tx_packet_set(struct ecore_ll2_tx_queue *p_tx, 1642 struct ecore_ll2_tx_packet *p_curp, 1643 struct ecore_ll2_tx_pkt_info *pkt, 1644 u8 notify_fw) 1645 { 1646 OSAL_LIST_REMOVE_ENTRY(&p_curp->list_entry, 1647 &p_tx->free_descq); 1648 p_curp->cookie = pkt->cookie; 1649 p_curp->bd_used = pkt->num_of_bds; 1650 p_curp->notify_fw = notify_fw; 1651 p_tx->cur_send_packet = p_curp; 1652 p_tx->cur_send_frag_num = 0; 1653 1654 p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag; 1655 p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len; 1656 p_tx->cur_send_frag_num++; 1657 } 1658 1659 static void ecore_ll2_prepare_tx_packet_set_bd( 1660 struct ecore_hwfn *p_hwfn, 1661 struct ecore_ll2_info *p_ll2, 1662 struct ecore_ll2_tx_packet *p_curp, 1663 struct ecore_ll2_tx_pkt_info *pkt) 1664 { 1665 struct ecore_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain; 1666 u16 prod_idx = ecore_chain_get_prod_idx(p_tx_chain); 1667 struct core_tx_bd *start_bd = OSAL_NULL; 1668 enum core_roce_flavor_type roce_flavor; 1669 enum core_tx_dest tx_dest; 1670 u16 bd_data = 0, frag_idx; 1671 1672 roce_flavor = (pkt->ecore_roce_flavor == ECORE_LL2_ROCE) ? 1673 CORE_ROCE : CORE_RROCE; 1674 1675 switch (pkt->tx_dest) { 1676 case ECORE_LL2_TX_DEST_NW: 1677 tx_dest = CORE_TX_DEST_NW; 1678 break; 1679 case ECORE_LL2_TX_DEST_LB: 1680 tx_dest = CORE_TX_DEST_LB; 1681 break; 1682 case ECORE_LL2_TX_DEST_DROP: 1683 tx_dest = CORE_TX_DEST_DROP; 1684 break; 1685 default: 1686 tx_dest = CORE_TX_DEST_LB; 1687 break; 1688 } 1689 1690 start_bd = (struct core_tx_bd*)ecore_chain_produce(p_tx_chain); 1691 1692 if (ECORE_IS_IWARP_PERSONALITY(p_hwfn) && 1693 (p_ll2->input.conn_type == ECORE_LL2_TYPE_OOO)) { 1694 start_bd->nw_vlan_or_lb_echo = 1695 OSAL_CPU_TO_LE16(IWARP_LL2_IN_ORDER_TX_QUEUE); 1696 } else { 1697 start_bd->nw_vlan_or_lb_echo = OSAL_CPU_TO_LE16(pkt->vlan); 1698 if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits) && 1699 (p_ll2->input.conn_type == ECORE_LL2_TYPE_FCOE)) 1700 pkt->remove_stag = true; 1701 } 1702 1703 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, 1704 OSAL_CPU_TO_LE16(pkt->l4_hdr_offset_w)); 1705 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); 1706 bd_data |= pkt->bd_flags; 1707 SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1); 1708 SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds); 1709 SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor); 1710 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum)); 1711 SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum)); 1712 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len)); 1713 SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION, 1714 !!(pkt->remove_stag)); 1715 1716 start_bd->bd_data.as_bitfield = OSAL_CPU_TO_LE16(bd_data); 1717 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag); 1718 start_bd->nbytes = OSAL_CPU_TO_LE16(pkt->first_frag_len); 1719 1720 DP_VERBOSE(p_hwfn, (ECORE_MSG_TX_QUEUED | ECORE_MSG_LL2), 1721 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", 1722 p_ll2->queue_id, p_ll2->cid, p_ll2->input.conn_type, 1723 prod_idx, pkt->first_frag_len, pkt->num_of_bds, 1724 OSAL_LE32_TO_CPU(start_bd->addr.hi), 1725 OSAL_LE32_TO_CPU(start_bd->addr.lo)); 1726 1727 if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds) 1728 return; 1729 1730 /* Need to provide the packet with additional BDs for frags */ 1731 for (frag_idx = p_ll2->tx_queue.cur_send_frag_num; 1732 frag_idx < pkt->num_of_bds; frag_idx++) { 1733 struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd; 1734 1735 *p_bd = (struct core_tx_bd *)ecore_chain_produce(p_tx_chain); 1736 (*p_bd)->bd_data.as_bitfield = 0; 1737 (*p_bd)->bitfield1 = 0; 1738 p_curp->bds_set[frag_idx].tx_frag = 0; 1739 p_curp->bds_set[frag_idx].frag_len = 0; 1740 } 1741 } 1742 1743 /* This should be called while the Txq spinlock is being held */ 1744 static void ecore_ll2_tx_packet_notify(struct ecore_hwfn *p_hwfn, 1745 struct ecore_ll2_info *p_ll2_conn) 1746 { 1747 bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw; 1748 struct ecore_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; 1749 struct ecore_ll2_tx_packet *p_pkt = OSAL_NULL; 1750 u16 bd_prod; 1751 1752 /* If there are missing BDs, don't do anything now */ 1753 if (p_ll2_conn->tx_queue.cur_send_frag_num != 1754 p_ll2_conn->tx_queue.cur_send_packet->bd_used) 1755 return; 1756 1757 1758 /* Push the current packet to the list and clean after it */ 1759 OSAL_LIST_PUSH_TAIL(&p_ll2_conn->tx_queue.cur_send_packet->list_entry, 1760 &p_ll2_conn->tx_queue.sending_descq); 1761 p_ll2_conn->tx_queue.cur_send_packet = OSAL_NULL; 1762 p_ll2_conn->tx_queue.cur_send_frag_num = 0; 1763 1764 /* Notify FW of packet only if requested to */ 1765 if (!b_notify) 1766 return; 1767 1768 bd_prod = ecore_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain); 1769 1770 while (!OSAL_LIST_IS_EMPTY(&p_tx->sending_descq)) { 1771 p_pkt = OSAL_LIST_FIRST_ENTRY(&p_tx->sending_descq, 1772 struct ecore_ll2_tx_packet, 1773 list_entry); 1774 if (p_pkt == OSAL_NULL) 1775 break; 1776 #if defined(_NTDDK_) 1777 #pragma warning(suppress : 6011 28182) 1778 #endif 1779 OSAL_LIST_REMOVE_ENTRY(&p_pkt->list_entry, 1780 &p_tx->sending_descq); 1781 OSAL_LIST_PUSH_TAIL(&p_pkt->list_entry, &p_tx->active_descq); 1782 } 1783 1784 p_tx->db_msg.spq_prod = OSAL_CPU_TO_LE16(bd_prod); 1785 1786 /* Make sure the BDs data is updated before ringing the doorbell */ 1787 OSAL_WMB(p_hwfn->p_dev); 1788 1789 //DIRECT_REG_WR(p_hwfn, p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg)); 1790 DIRECT_REG_WR_DB(p_hwfn, p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg)); 1791 1792 DP_VERBOSE(p_hwfn, (ECORE_MSG_TX_QUEUED | ECORE_MSG_LL2), 1793 "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", 1794 p_ll2_conn->queue_id, p_ll2_conn->cid, 1795 p_ll2_conn->input.conn_type, 1796 p_tx->db_msg.spq_prod); 1797 } 1798 1799 enum _ecore_status_t ecore_ll2_prepare_tx_packet( 1800 void *cxt, 1801 u8 connection_handle, 1802 struct ecore_ll2_tx_pkt_info *pkt, 1803 bool notify_fw) 1804 { 1805 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt; 1806 struct ecore_ll2_tx_packet *p_curp = OSAL_NULL; 1807 struct ecore_ll2_info *p_ll2_conn = OSAL_NULL; 1808 struct ecore_ll2_tx_queue *p_tx; 1809 struct ecore_chain *p_tx_chain; 1810 unsigned long flags; 1811 enum _ecore_status_t rc = ECORE_SUCCESS; 1812 1813 p_ll2_conn = ecore_ll2_handle_sanity(p_hwfn, connection_handle); 1814 if (p_ll2_conn == OSAL_NULL) 1815 return ECORE_INVAL; 1816 p_tx = &p_ll2_conn->tx_queue; 1817 p_tx_chain = &p_tx->txq_chain; 1818 1819 if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet) 1820 return ECORE_IO; /* coalescing is requireed */ 1821 1822 OSAL_SPIN_LOCK_IRQSAVE(&p_tx->lock, flags); 1823 if (p_tx->cur_send_packet) { 1824 rc = ECORE_EXISTS; 1825 goto out; 1826 } 1827 1828 /* Get entry, but only if we have tx elements for it */ 1829 if (!OSAL_LIST_IS_EMPTY(&p_tx->free_descq)) 1830 p_curp = OSAL_LIST_FIRST_ENTRY(&p_tx->free_descq, 1831 struct ecore_ll2_tx_packet, 1832 list_entry); 1833 if (p_curp && ecore_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds) 1834 p_curp = OSAL_NULL; 1835 1836 if (!p_curp) { 1837 rc = ECORE_BUSY; 1838 goto out; 1839 } 1840 1841 /* Prepare packet and BD, and perhaps send a doorbell to FW */ 1842 ecore_ll2_prepare_tx_packet_set(p_tx, p_curp, pkt, notify_fw); 1843 1844 ecore_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, 1845 pkt); 1846 1847 ecore_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); 1848 1849 out: 1850 OSAL_SPIN_UNLOCK_IRQSAVE(&p_tx->lock, flags); 1851 return rc; 1852 } 1853 1854 enum _ecore_status_t ecore_ll2_set_fragment_of_tx_packet(void *cxt, 1855 u8 connection_handle, 1856 dma_addr_t addr, 1857 u16 nbytes) 1858 { 1859 struct ecore_ll2_tx_packet *p_cur_send_packet = OSAL_NULL; 1860 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt; 1861 struct ecore_ll2_info *p_ll2_conn = OSAL_NULL; 1862 u16 cur_send_frag_num = 0; 1863 struct core_tx_bd *p_bd; 1864 unsigned long flags; 1865 1866 p_ll2_conn = ecore_ll2_handle_sanity(p_hwfn, connection_handle); 1867 if (p_ll2_conn == OSAL_NULL) 1868 return ECORE_INVAL; 1869 1870 if (!p_ll2_conn->tx_queue.cur_send_packet) 1871 return ECORE_INVAL; 1872 1873 p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet; 1874 cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num; 1875 1876 if (cur_send_frag_num >= p_cur_send_packet->bd_used) 1877 return ECORE_INVAL; 1878 1879 /* Fill the BD information, and possibly notify FW */ 1880 p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd; 1881 DMA_REGPAIR_LE(p_bd->addr, addr); 1882 p_bd->nbytes = OSAL_CPU_TO_LE16(nbytes); 1883 p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr; 1884 p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes; 1885 1886 p_ll2_conn->tx_queue.cur_send_frag_num++; 1887 1888 OSAL_SPIN_LOCK_IRQSAVE(&p_ll2_conn->tx_queue.lock, flags); 1889 ecore_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); 1890 OSAL_SPIN_UNLOCK_IRQSAVE(&p_ll2_conn->tx_queue.lock, flags); 1891 1892 return ECORE_SUCCESS; 1893 } 1894 1895 enum _ecore_status_t ecore_ll2_terminate_connection(void *cxt, 1896 u8 connection_handle) 1897 { 1898 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt; 1899 struct ecore_ll2_info *p_ll2_conn = OSAL_NULL; 1900 enum _ecore_status_t rc = ECORE_NOTIMPL; 1901 struct ecore_ptt *p_ptt; 1902 1903 p_ptt = ecore_ptt_acquire(p_hwfn); 1904 if (!p_ptt) 1905 return ECORE_AGAIN; 1906 1907 p_ll2_conn = ecore_ll2_handle_sanity_lock(p_hwfn, connection_handle); 1908 if (p_ll2_conn == OSAL_NULL) { 1909 rc = ECORE_INVAL; 1910 goto out; 1911 } 1912 1913 /* Stop Tx & Rx of connection, if needed */ 1914 if (ECORE_LL2_TX_REGISTERED(p_ll2_conn)) { 1915 rc = ecore_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); 1916 if (rc != ECORE_SUCCESS) 1917 goto out; 1918 ecore_ll2_txq_flush(p_hwfn, connection_handle); 1919 } 1920 1921 if (ECORE_LL2_RX_REGISTERED(p_ll2_conn)) { 1922 rc = ecore_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); 1923 if (rc) 1924 goto out; 1925 ecore_ll2_rxq_flush(p_hwfn, connection_handle); 1926 } 1927 1928 if (p_ll2_conn->input.conn_type == ECORE_LL2_TYPE_OOO) 1929 ecore_ooo_release_all_isles(p_hwfn->p_ooo_info); 1930 1931 if (p_ll2_conn->input.conn_type == ECORE_LL2_TYPE_FCOE) { 1932 if (!OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, 1933 &p_hwfn->p_dev->mf_bits)) 1934 ecore_llh_remove_protocol_filter(p_hwfn->p_dev, 0, 1935 ECORE_LLH_FILTER_ETHERTYPE, 1936 0x8906, 0); 1937 ecore_llh_remove_protocol_filter(p_hwfn->p_dev, 0, 1938 ECORE_LLH_FILTER_ETHERTYPE, 1939 0x8914, 0); 1940 } 1941 1942 out: 1943 ecore_ptt_release(p_hwfn, p_ptt); 1944 1945 return rc; 1946 } 1947 1948 static void ecore_ll2_release_connection_ooo(struct ecore_hwfn *p_hwfn, 1949 struct ecore_ll2_info *p_ll2_conn) 1950 { 1951 struct ecore_ooo_buffer *p_buffer; 1952 1953 if (p_ll2_conn->input.conn_type != ECORE_LL2_TYPE_OOO) 1954 return; 1955 1956 ecore_ooo_release_all_isles(p_hwfn->p_ooo_info); 1957 while ((p_buffer = ecore_ooo_get_free_buffer(p_hwfn->p_ooo_info))) { 1958 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, 1959 p_buffer->rx_buffer_virt_addr, 1960 p_buffer->rx_buffer_phys_addr, 1961 p_buffer->rx_buffer_size); 1962 OSAL_FREE(p_hwfn->p_dev, p_buffer); 1963 } 1964 } 1965 1966 void ecore_ll2_release_connection(void *cxt, 1967 u8 connection_handle) 1968 { 1969 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt; 1970 struct ecore_ll2_info *p_ll2_conn = OSAL_NULL; 1971 1972 p_ll2_conn = ecore_ll2_handle_sanity(p_hwfn, connection_handle); 1973 if (p_ll2_conn == OSAL_NULL) 1974 return; 1975 1976 if (ECORE_LL2_RX_REGISTERED(p_ll2_conn)) { 1977 p_ll2_conn->rx_queue.b_cb_registred = false; 1978 ecore_int_unregister_cb(p_hwfn, 1979 p_ll2_conn->rx_queue.rx_sb_index); 1980 } 1981 1982 if (ECORE_LL2_TX_REGISTERED(p_ll2_conn)) { 1983 p_ll2_conn->tx_queue.b_cb_registred = false; 1984 ecore_int_unregister_cb(p_hwfn, 1985 p_ll2_conn->tx_queue.tx_sb_index); 1986 } 1987 1988 OSAL_FREE(p_hwfn->p_dev, p_ll2_conn->tx_queue.descq_array); 1989 ecore_chain_free(p_hwfn->p_dev, &p_ll2_conn->tx_queue.txq_chain); 1990 1991 OSAL_FREE(p_hwfn->p_dev, p_ll2_conn->rx_queue.descq_array); 1992 ecore_chain_free(p_hwfn->p_dev, &p_ll2_conn->rx_queue.rxq_chain); 1993 ecore_chain_free(p_hwfn->p_dev, &p_ll2_conn->rx_queue.rcq_chain); 1994 1995 ecore_cxt_release_cid(p_hwfn, p_ll2_conn->cid); 1996 1997 ecore_ll2_release_connection_ooo(p_hwfn, p_ll2_conn); 1998 1999 OSAL_MUTEX_ACQUIRE(&p_ll2_conn->mutex); 2000 p_ll2_conn->b_active = false; 2001 OSAL_MUTEX_RELEASE(&p_ll2_conn->mutex); 2002 } 2003 2004 /* ECORE LL2: internal functions */ 2005 2006 enum _ecore_status_t ecore_ll2_alloc(struct ecore_hwfn *p_hwfn) 2007 { 2008 struct ecore_ll2_info *p_ll2_info; 2009 u8 i; 2010 2011 /* Allocate LL2's set struct */ 2012 p_ll2_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, 2013 sizeof(struct ecore_ll2_info) * 2014 ECORE_MAX_NUM_OF_LL2_CONNECTIONS); 2015 if (!p_ll2_info) { 2016 DP_NOTICE(p_hwfn, false, 2017 "Failed to allocate `struct ecore_ll2'\n"); 2018 return ECORE_NOMEM; 2019 } 2020 2021 p_hwfn->p_ll2_info = p_ll2_info; 2022 2023 for (i = 0; i < ECORE_MAX_NUM_OF_LL2_CONNECTIONS; i++) { 2024 #ifdef CONFIG_ECORE_LOCK_ALLOC 2025 if (OSAL_MUTEX_ALLOC(p_hwfn, &p_ll2_info[i].mutex)) 2026 goto handle_err; 2027 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_ll2_info[i].rx_queue.lock)) 2028 goto handle_err; 2029 if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_ll2_info[i].tx_queue.lock)) 2030 goto handle_err; 2031 #endif 2032 p_ll2_info[i].my_id = i; 2033 } 2034 2035 return ECORE_SUCCESS; 2036 #ifdef CONFIG_ECORE_LOCK_ALLOC 2037 handle_err: 2038 ecore_ll2_free(p_hwfn); 2039 return ECORE_NOMEM; 2040 #endif 2041 } 2042 2043 void ecore_ll2_setup(struct ecore_hwfn *p_hwfn) 2044 { 2045 int i; 2046 2047 for (i = 0; i < ECORE_MAX_NUM_OF_LL2_CONNECTIONS; i++) 2048 OSAL_MUTEX_INIT(&p_hwfn->p_ll2_info[i].mutex); 2049 } 2050 2051 void ecore_ll2_free(struct ecore_hwfn *p_hwfn) 2052 { 2053 #ifdef CONFIG_ECORE_LOCK_ALLOC 2054 int i; 2055 #endif 2056 if (!p_hwfn->p_ll2_info) 2057 return; 2058 2059 #ifdef CONFIG_ECORE_LOCK_ALLOC 2060 for (i = 0; i < ECORE_MAX_NUM_OF_LL2_CONNECTIONS; i++) { 2061 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ll2_info[i].rx_queue.lock); 2062 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ll2_info[i].tx_queue.lock); 2063 OSAL_MUTEX_DEALLOC(&p_hwfn->p_ll2_info[i].mutex); 2064 } 2065 #endif 2066 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ll2_info); 2067 p_hwfn->p_ll2_info = OSAL_NULL; 2068 } 2069 2070 static void _ecore_ll2_get_port_stats(struct ecore_hwfn *p_hwfn, 2071 struct ecore_ptt *p_ptt, 2072 struct ecore_ll2_stats *p_stats) 2073 { 2074 struct core_ll2_port_stats port_stats; 2075 2076 OSAL_MEMSET(&port_stats, 0, sizeof(port_stats)); 2077 ecore_memcpy_from(p_hwfn, p_ptt, &port_stats, 2078 BAR0_MAP_REG_TSDM_RAM + 2079 TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)), 2080 sizeof(port_stats)); 2081 2082 p_stats->gsi_invalid_hdr += 2083 HILO_64_REGPAIR(port_stats.gsi_invalid_hdr); 2084 p_stats->gsi_invalid_pkt_length += 2085 HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length); 2086 p_stats->gsi_unsupported_pkt_typ += 2087 HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ); 2088 p_stats->gsi_crcchksm_error += 2089 HILO_64_REGPAIR(port_stats.gsi_crcchksm_error); 2090 } 2091 2092 static void _ecore_ll2_get_tstats(struct ecore_hwfn *p_hwfn, 2093 struct ecore_ptt *p_ptt, 2094 struct ecore_ll2_info *p_ll2_conn, 2095 struct ecore_ll2_stats *p_stats) 2096 { 2097 struct core_ll2_tstorm_per_queue_stat tstats; 2098 u8 qid = p_ll2_conn->queue_id; 2099 u32 tstats_addr; 2100 2101 OSAL_MEMSET(&tstats, 0, sizeof(tstats)); 2102 tstats_addr = BAR0_MAP_REG_TSDM_RAM + 2103 CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid); 2104 ecore_memcpy_from(p_hwfn, p_ptt, &tstats, 2105 tstats_addr, 2106 sizeof(tstats)); 2107 2108 p_stats->packet_too_big_discard += 2109 HILO_64_REGPAIR(tstats.packet_too_big_discard); 2110 p_stats->no_buff_discard += 2111 HILO_64_REGPAIR(tstats.no_buff_discard); 2112 } 2113 2114 static void _ecore_ll2_get_ustats(struct ecore_hwfn *p_hwfn, 2115 struct ecore_ptt *p_ptt, 2116 struct ecore_ll2_info *p_ll2_conn, 2117 struct ecore_ll2_stats *p_stats) 2118 { 2119 struct core_ll2_ustorm_per_queue_stat ustats; 2120 u8 qid = p_ll2_conn->queue_id; 2121 u32 ustats_addr; 2122 2123 OSAL_MEMSET(&ustats, 0, sizeof(ustats)); 2124 ustats_addr = BAR0_MAP_REG_USDM_RAM + 2125 CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid); 2126 ecore_memcpy_from(p_hwfn, p_ptt, &ustats, 2127 ustats_addr, 2128 sizeof(ustats)); 2129 2130 p_stats->rcv_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes); 2131 p_stats->rcv_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes); 2132 p_stats->rcv_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes); 2133 p_stats->rcv_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); 2134 p_stats->rcv_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); 2135 p_stats->rcv_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); 2136 } 2137 2138 static void _ecore_ll2_get_pstats(struct ecore_hwfn *p_hwfn, 2139 struct ecore_ptt *p_ptt, 2140 struct ecore_ll2_info *p_ll2_conn, 2141 struct ecore_ll2_stats *p_stats) 2142 { 2143 struct core_ll2_pstorm_per_queue_stat pstats; 2144 u8 stats_id = p_ll2_conn->tx_stats_id; 2145 u32 pstats_addr; 2146 2147 OSAL_MEMSET(&pstats, 0, sizeof(pstats)); 2148 pstats_addr = BAR0_MAP_REG_PSDM_RAM + 2149 CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id); 2150 ecore_memcpy_from(p_hwfn, p_ptt, &pstats, 2151 pstats_addr, 2152 sizeof(pstats)); 2153 2154 p_stats->sent_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes); 2155 p_stats->sent_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes); 2156 p_stats->sent_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes); 2157 p_stats->sent_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts); 2158 p_stats->sent_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts); 2159 p_stats->sent_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts); 2160 } 2161 2162 enum _ecore_status_t __ecore_ll2_get_stats(void *cxt, 2163 u8 connection_handle, 2164 struct ecore_ll2_stats *p_stats) 2165 { 2166 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)cxt; 2167 struct ecore_ll2_info *p_ll2_conn = OSAL_NULL; 2168 struct ecore_ptt *p_ptt; 2169 2170 if ((connection_handle >= ECORE_MAX_NUM_OF_LL2_CONNECTIONS) || 2171 !p_hwfn->p_ll2_info) { 2172 return ECORE_INVAL; 2173 } 2174 2175 p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle]; 2176 2177 p_ptt = ecore_ptt_acquire(p_hwfn); 2178 if (!p_ptt) { 2179 DP_ERR(p_hwfn, "Failed to acquire ptt\n"); 2180 return ECORE_INVAL; 2181 } 2182 2183 if (p_ll2_conn->input.gsi_enable) 2184 _ecore_ll2_get_port_stats(p_hwfn, p_ptt, p_stats); 2185 2186 _ecore_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats); 2187 2188 _ecore_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats); 2189 2190 if (p_ll2_conn->tx_stats_en) 2191 _ecore_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats); 2192 2193 ecore_ptt_release(p_hwfn, p_ptt); 2194 2195 return ECORE_SUCCESS; 2196 } 2197 2198 enum _ecore_status_t ecore_ll2_get_stats(void *cxt, 2199 u8 connection_handle, 2200 struct ecore_ll2_stats *p_stats) 2201 { 2202 OSAL_MEMSET(p_stats, 0, sizeof(*p_stats)); 2203 2204 return __ecore_ll2_get_stats(cxt, connection_handle, p_stats); 2205 } 2206 2207 /**/ 2208 2209 #ifdef _NTDDK_ 2210 #pragma warning(pop) 2211 #endif 2212