1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/types.h> 34 #include <linux/io.h> 35 #include <linux/delay.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/errno.h> 38 #include <linux/kernel.h> 39 #include <linux/list.h> 40 #include <linux/mutex.h> 41 #include <linux/pci.h> 42 #include <linux/slab.h> 43 #include <linux/spinlock.h> 44 #include <linux/string.h> 45 #include <linux/qed/qed_chain.h> 46 #include "qed.h" 47 #include "qed_hsi.h" 48 #include "qed_hw.h" 49 #include "qed_reg_addr.h" 50 #include "qed_sriov.h" 51 52 #define QED_BAR_ACQUIRE_TIMEOUT 1000 53 54 /* Invalid values */ 55 #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1)) 56 57 struct qed_ptt { 58 struct list_head list_entry; 59 unsigned int idx; 60 struct pxp_ptt_entry pxp; 61 }; 62 63 struct qed_ptt_pool { 64 struct list_head free_list; 65 spinlock_t lock; /* ptt synchronized access */ 66 struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM]; 67 }; 68 69 int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) 70 { 71 struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL); 72 int i; 73 74 if (!p_pool) 75 return -ENOMEM; 76 77 INIT_LIST_HEAD(&p_pool->free_list); 78 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { 79 p_pool->ptts[i].idx = i; 80 p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET; 81 p_pool->ptts[i].pxp.pretend.control = 0; 82 if (i >= RESERVED_PTT_MAX) 83 list_add(&p_pool->ptts[i].list_entry, 84 &p_pool->free_list); 85 } 86 87 p_hwfn->p_ptt_pool = p_pool; 88 spin_lock_init(&p_pool->lock); 89 90 return 0; 91 } 92 93 void qed_ptt_invalidate(struct qed_hwfn *p_hwfn) 94 { 95 struct qed_ptt *p_ptt; 96 int i; 97 98 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { 99 p_ptt = &p_hwfn->p_ptt_pool->ptts[i]; 100 p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET; 101 } 102 } 103 104 void qed_ptt_pool_free(struct qed_hwfn *p_hwfn) 105 { 106 kfree(p_hwfn->p_ptt_pool); 107 p_hwfn->p_ptt_pool = NULL; 108 } 109 110 struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) 111 { 112 struct qed_ptt *p_ptt; 113 unsigned int i; 114 115 /* Take the free PTT from the list */ 116 for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) { 117 spin_lock_bh(&p_hwfn->p_ptt_pool->lock); 118 119 if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) { 120 p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list, 121 struct qed_ptt, list_entry); 122 list_del(&p_ptt->list_entry); 123 124 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 125 126 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 127 "allocated ptt %d\n", p_ptt->idx); 128 return p_ptt; 129 } 130 131 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 132 usleep_range(1000, 2000); 133 } 134 135 DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n"); 136 return NULL; 137 } 138 139 void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 140 { 141 spin_lock_bh(&p_hwfn->p_ptt_pool->lock); 142 list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list); 143 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 144 } 145 146 u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 147 { 148 /* The HW is using DWORDS and we need to translate it to Bytes */ 149 return le32_to_cpu(p_ptt->pxp.offset) << 2; 150 } 151 152 static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt) 153 { 154 return PXP_PF_WINDOW_ADMIN_PER_PF_START + 155 p_ptt->idx * sizeof(struct pxp_ptt_entry); 156 } 157 158 u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt) 159 { 160 return PXP_EXTERNAL_BAR_PF_WINDOW_START + 161 p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE; 162 } 163 164 void qed_ptt_set_win(struct qed_hwfn *p_hwfn, 165 struct qed_ptt *p_ptt, u32 new_hw_addr) 166 { 167 u32 prev_hw_addr; 168 169 prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); 170 171 if (new_hw_addr == prev_hw_addr) 172 return; 173 174 /* Update PTT entery in admin window */ 175 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 176 "Updating PTT entry %d to offset 0x%x\n", 177 p_ptt->idx, new_hw_addr); 178 179 /* The HW is using DWORDS and the address is in Bytes */ 180 p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2); 181 182 REG_WR(p_hwfn, 183 qed_ptt_config_addr(p_ptt) + 184 offsetof(struct pxp_ptt_entry, offset), 185 le32_to_cpu(p_ptt->pxp.offset)); 186 } 187 188 static u32 qed_set_ptt(struct qed_hwfn *p_hwfn, 189 struct qed_ptt *p_ptt, u32 hw_addr) 190 { 191 u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); 192 u32 offset; 193 194 offset = hw_addr - win_hw_addr; 195 196 /* Verify the address is within the window */ 197 if (hw_addr < win_hw_addr || 198 offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) { 199 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr); 200 offset = 0; 201 } 202 203 return qed_ptt_get_bar_addr(p_ptt) + offset; 204 } 205 206 struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, 207 enum reserved_ptts ptt_idx) 208 { 209 if (ptt_idx >= RESERVED_PTT_MAX) { 210 DP_NOTICE(p_hwfn, 211 "Requested PTT %d is out of range\n", ptt_idx); 212 return NULL; 213 } 214 215 return &p_hwfn->p_ptt_pool->ptts[ptt_idx]; 216 } 217 218 void qed_wr(struct qed_hwfn *p_hwfn, 219 struct qed_ptt *p_ptt, 220 u32 hw_addr, u32 val) 221 { 222 u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); 223 224 REG_WR(p_hwfn, bar_addr, val); 225 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 226 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", 227 bar_addr, hw_addr, val); 228 } 229 230 u32 qed_rd(struct qed_hwfn *p_hwfn, 231 struct qed_ptt *p_ptt, 232 u32 hw_addr) 233 { 234 u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); 235 u32 val = REG_RD(p_hwfn, bar_addr); 236 237 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 238 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", 239 bar_addr, hw_addr, val); 240 241 return val; 242 } 243 244 static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, 245 struct qed_ptt *p_ptt, 246 void *addr, u32 hw_addr, size_t n, bool to_device) 247 { 248 u32 dw_count, *host_addr, hw_offset; 249 size_t quota, done = 0; 250 u32 __iomem *reg_addr; 251 252 while (done < n) { 253 quota = min_t(size_t, n - done, 254 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); 255 256 if (IS_PF(p_hwfn->cdev)) { 257 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); 258 hw_offset = qed_ptt_get_bar_addr(p_ptt); 259 } else { 260 hw_offset = hw_addr + done; 261 } 262 263 dw_count = quota / 4; 264 host_addr = (u32 *)((u8 *)addr + done); 265 reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset); 266 if (to_device) 267 while (dw_count--) 268 DIRECT_REG_WR(reg_addr++, *host_addr++); 269 else 270 while (dw_count--) 271 *host_addr++ = DIRECT_REG_RD(reg_addr++); 272 273 done += quota; 274 } 275 } 276 277 void qed_memcpy_from(struct qed_hwfn *p_hwfn, 278 struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n) 279 { 280 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 281 "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n", 282 hw_addr, dest, hw_addr, (unsigned long)n); 283 284 qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false); 285 } 286 287 void qed_memcpy_to(struct qed_hwfn *p_hwfn, 288 struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n) 289 { 290 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 291 "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n", 292 hw_addr, hw_addr, src, (unsigned long)n); 293 294 qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true); 295 } 296 297 void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid) 298 { 299 u16 control = 0; 300 301 SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); 302 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); 303 304 /* Every pretend undos previous pretends, including 305 * previous port pretend. 306 */ 307 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); 308 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); 309 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 310 311 if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) 312 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); 313 314 p_ptt->pxp.pretend.control = cpu_to_le16(control); 315 p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); 316 317 REG_WR(p_hwfn, 318 qed_ptt_config_addr(p_ptt) + 319 offsetof(struct pxp_ptt_entry, pretend), 320 *(u32 *)&p_ptt->pxp.pretend); 321 } 322 323 void qed_port_pretend(struct qed_hwfn *p_hwfn, 324 struct qed_ptt *p_ptt, u8 port_id) 325 { 326 u16 control = 0; 327 328 SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); 329 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); 330 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 331 332 p_ptt->pxp.pretend.control = cpu_to_le16(control); 333 334 REG_WR(p_hwfn, 335 qed_ptt_config_addr(p_ptt) + 336 offsetof(struct pxp_ptt_entry, pretend), 337 *(u32 *)&p_ptt->pxp.pretend); 338 } 339 340 void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 341 { 342 u16 control = 0; 343 344 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); 345 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); 346 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 347 348 p_ptt->pxp.pretend.control = cpu_to_le16(control); 349 350 REG_WR(p_hwfn, 351 qed_ptt_config_addr(p_ptt) + 352 offsetof(struct pxp_ptt_entry, pretend), 353 *(u32 *)&p_ptt->pxp.pretend); 354 } 355 356 u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid) 357 { 358 u32 concrete_fid = 0; 359 360 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id); 361 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid); 362 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1); 363 364 return concrete_fid; 365 } 366 367 /* DMAE */ 368 static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, 369 const u8 is_src_type_grc, 370 const u8 is_dst_type_grc, 371 struct qed_dmae_params *p_params) 372 { 373 u16 opcode_b = 0; 374 u32 opcode = 0; 375 376 /* Whether the source is the PCIe or the GRC. 377 * 0- The source is the PCIe 378 * 1- The source is the GRC. 379 */ 380 opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC 381 : DMAE_CMD_SRC_MASK_PCIE) << 382 DMAE_CMD_SRC_SHIFT; 383 opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) << 384 DMAE_CMD_SRC_PF_ID_SHIFT); 385 386 /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */ 387 opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC 388 : DMAE_CMD_DST_MASK_PCIE) << 389 DMAE_CMD_DST_SHIFT; 390 opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) << 391 DMAE_CMD_DST_PF_ID_SHIFT); 392 393 /* Whether to write a completion word to the completion destination: 394 * 0-Do not write a completion word 395 * 1-Write the completion word 396 */ 397 opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT); 398 opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK << 399 DMAE_CMD_SRC_ADDR_RESET_SHIFT); 400 401 if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST) 402 opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT); 403 404 opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT); 405 406 opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT); 407 408 /* reset source address in next go */ 409 opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK << 410 DMAE_CMD_SRC_ADDR_RESET_SHIFT); 411 412 /* reset dest address in next go */ 413 opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK << 414 DMAE_CMD_DST_ADDR_RESET_SHIFT); 415 416 /* SRC/DST VFID: all 1's - pf, otherwise VF id */ 417 if (p_params->flags & QED_DMAE_FLAG_VF_SRC) { 418 opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT; 419 opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT; 420 } else { 421 opcode_b |= DMAE_CMD_SRC_VF_ID_MASK << 422 DMAE_CMD_SRC_VF_ID_SHIFT; 423 } 424 425 if (p_params->flags & QED_DMAE_FLAG_VF_DST) { 426 opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT; 427 opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT; 428 } else { 429 opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT; 430 } 431 432 p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode); 433 p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b); 434 } 435 436 u32 qed_dmae_idx_to_go_cmd(u8 idx) 437 { 438 /* All the DMAE 'go' registers form an array in internal memory */ 439 return DMAE_REG_GO_C0 + (idx << 2); 440 } 441 442 static int qed_dmae_post_command(struct qed_hwfn *p_hwfn, 443 struct qed_ptt *p_ptt) 444 { 445 struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd; 446 u8 idx_cmd = p_hwfn->dmae_info.channel, i; 447 int qed_status = 0; 448 449 /* verify address is not NULL */ 450 if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) || 451 ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) { 452 DP_NOTICE(p_hwfn, 453 "source or destination address 0 idx_cmd=%d\n" 454 "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", 455 idx_cmd, 456 le32_to_cpu(p_command->opcode), 457 le16_to_cpu(p_command->opcode_b), 458 le16_to_cpu(p_command->length_dw), 459 le32_to_cpu(p_command->src_addr_hi), 460 le32_to_cpu(p_command->src_addr_lo), 461 le32_to_cpu(p_command->dst_addr_hi), 462 le32_to_cpu(p_command->dst_addr_lo)); 463 464 return -EINVAL; 465 } 466 467 DP_VERBOSE(p_hwfn, 468 NETIF_MSG_HW, 469 "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", 470 idx_cmd, 471 le32_to_cpu(p_command->opcode), 472 le16_to_cpu(p_command->opcode_b), 473 le16_to_cpu(p_command->length_dw), 474 le32_to_cpu(p_command->src_addr_hi), 475 le32_to_cpu(p_command->src_addr_lo), 476 le32_to_cpu(p_command->dst_addr_hi), 477 le32_to_cpu(p_command->dst_addr_lo)); 478 479 /* Copy the command to DMAE - need to do it before every call 480 * for source/dest address no reset. 481 * The first 9 DWs are the command registers, the 10 DW is the 482 * GO register, and the rest are result registers 483 * (which are read only by the client). 484 */ 485 for (i = 0; i < DMAE_CMD_SIZE; i++) { 486 u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ? 487 *(((u32 *)p_command) + i) : 0; 488 489 qed_wr(p_hwfn, p_ptt, 490 DMAE_REG_CMD_MEM + 491 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) + 492 (i * sizeof(u32)), data); 493 } 494 495 qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE); 496 497 return qed_status; 498 } 499 500 int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn) 501 { 502 dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr; 503 struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd; 504 u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer; 505 u32 **p_comp = &p_hwfn->dmae_info.p_completion_word; 506 507 *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 508 sizeof(u32), p_addr, GFP_KERNEL); 509 if (!*p_comp) 510 goto err; 511 512 p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr; 513 *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 514 sizeof(struct dmae_cmd), 515 p_addr, GFP_KERNEL); 516 if (!*p_cmd) 517 goto err; 518 519 p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr; 520 *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 521 sizeof(u32) * DMAE_MAX_RW_SIZE, 522 p_addr, GFP_KERNEL); 523 if (!*p_buff) 524 goto err; 525 526 p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; 527 528 return 0; 529 err: 530 qed_dmae_info_free(p_hwfn); 531 return -ENOMEM; 532 } 533 534 void qed_dmae_info_free(struct qed_hwfn *p_hwfn) 535 { 536 dma_addr_t p_phys; 537 538 /* Just make sure no one is in the middle */ 539 mutex_lock(&p_hwfn->dmae_info.mutex); 540 541 if (p_hwfn->dmae_info.p_completion_word) { 542 p_phys = p_hwfn->dmae_info.completion_word_phys_addr; 543 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 544 sizeof(u32), 545 p_hwfn->dmae_info.p_completion_word, p_phys); 546 p_hwfn->dmae_info.p_completion_word = NULL; 547 } 548 549 if (p_hwfn->dmae_info.p_dmae_cmd) { 550 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr; 551 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 552 sizeof(struct dmae_cmd), 553 p_hwfn->dmae_info.p_dmae_cmd, p_phys); 554 p_hwfn->dmae_info.p_dmae_cmd = NULL; 555 } 556 557 if (p_hwfn->dmae_info.p_intermediate_buffer) { 558 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; 559 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 560 sizeof(u32) * DMAE_MAX_RW_SIZE, 561 p_hwfn->dmae_info.p_intermediate_buffer, 562 p_phys); 563 p_hwfn->dmae_info.p_intermediate_buffer = NULL; 564 } 565 566 mutex_unlock(&p_hwfn->dmae_info.mutex); 567 } 568 569 static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn) 570 { 571 u32 wait_cnt_limit = 10000, wait_cnt = 0; 572 int qed_status = 0; 573 574 barrier(); 575 while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) { 576 udelay(DMAE_MIN_WAIT_TIME); 577 if (++wait_cnt > wait_cnt_limit) { 578 DP_NOTICE(p_hwfn->cdev, 579 "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n", 580 *p_hwfn->dmae_info.p_completion_word, 581 DMAE_COMPLETION_VAL); 582 qed_status = -EBUSY; 583 break; 584 } 585 586 /* to sync the completion_word since we are not 587 * using the volatile keyword for p_completion_word 588 */ 589 barrier(); 590 } 591 592 if (qed_status == 0) 593 *p_hwfn->dmae_info.p_completion_word = 0; 594 595 return qed_status; 596 } 597 598 static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, 599 struct qed_ptt *p_ptt, 600 u64 src_addr, 601 u64 dst_addr, 602 u8 src_type, 603 u8 dst_type, 604 u32 length_dw) 605 { 606 dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; 607 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; 608 int qed_status = 0; 609 610 switch (src_type) { 611 case QED_DMAE_ADDRESS_GRC: 612 case QED_DMAE_ADDRESS_HOST_PHYS: 613 cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr)); 614 cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr)); 615 break; 616 /* for virtual source addresses we use the intermediate buffer. */ 617 case QED_DMAE_ADDRESS_HOST_VIRT: 618 cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys)); 619 cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys)); 620 memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0], 621 (void *)(uintptr_t)src_addr, 622 length_dw * sizeof(u32)); 623 break; 624 default: 625 return -EINVAL; 626 } 627 628 switch (dst_type) { 629 case QED_DMAE_ADDRESS_GRC: 630 case QED_DMAE_ADDRESS_HOST_PHYS: 631 cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr)); 632 cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr)); 633 break; 634 /* for virtual source addresses we use the intermediate buffer. */ 635 case QED_DMAE_ADDRESS_HOST_VIRT: 636 cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys)); 637 cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys)); 638 break; 639 default: 640 return -EINVAL; 641 } 642 643 cmd->length_dw = cpu_to_le16((u16)length_dw); 644 645 qed_dmae_post_command(p_hwfn, p_ptt); 646 647 qed_status = qed_dmae_operation_wait(p_hwfn); 648 649 if (qed_status) { 650 DP_NOTICE(p_hwfn, 651 "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n", 652 src_addr, dst_addr, length_dw); 653 return qed_status; 654 } 655 656 if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT) 657 memcpy((void *)(uintptr_t)(dst_addr), 658 &p_hwfn->dmae_info.p_intermediate_buffer[0], 659 length_dw * sizeof(u32)); 660 661 return 0; 662 } 663 664 static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, 665 struct qed_ptt *p_ptt, 666 u64 src_addr, u64 dst_addr, 667 u8 src_type, u8 dst_type, 668 u32 size_in_dwords, 669 struct qed_dmae_params *p_params) 670 { 671 dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr; 672 u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0; 673 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; 674 u64 src_addr_split = 0, dst_addr_split = 0; 675 u16 length_limit = DMAE_MAX_RW_SIZE; 676 int qed_status = 0; 677 u32 offset = 0; 678 679 qed_dmae_opcode(p_hwfn, 680 (src_type == QED_DMAE_ADDRESS_GRC), 681 (dst_type == QED_DMAE_ADDRESS_GRC), 682 p_params); 683 684 cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys)); 685 cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys)); 686 cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL); 687 688 /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */ 689 cnt_split = size_in_dwords / length_limit; 690 length_mod = size_in_dwords % length_limit; 691 692 src_addr_split = src_addr; 693 dst_addr_split = dst_addr; 694 695 for (i = 0; i <= cnt_split; i++) { 696 offset = length_limit * i; 697 698 if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) { 699 if (src_type == QED_DMAE_ADDRESS_GRC) 700 src_addr_split = src_addr + offset; 701 else 702 src_addr_split = src_addr + (offset * 4); 703 } 704 705 if (dst_type == QED_DMAE_ADDRESS_GRC) 706 dst_addr_split = dst_addr + offset; 707 else 708 dst_addr_split = dst_addr + (offset * 4); 709 710 length_cur = (cnt_split == i) ? length_mod : length_limit; 711 712 /* might be zero on last iteration */ 713 if (!length_cur) 714 continue; 715 716 qed_status = qed_dmae_execute_sub_operation(p_hwfn, 717 p_ptt, 718 src_addr_split, 719 dst_addr_split, 720 src_type, 721 dst_type, 722 length_cur); 723 if (qed_status) { 724 DP_NOTICE(p_hwfn, 725 "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n", 726 qed_status, src_addr, dst_addr, length_cur); 727 break; 728 } 729 } 730 731 return qed_status; 732 } 733 734 int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, 735 struct qed_ptt *p_ptt, 736 u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags) 737 { 738 u32 grc_addr_in_dw = grc_addr / sizeof(u32); 739 struct qed_dmae_params params; 740 int rc; 741 742 memset(¶ms, 0, sizeof(struct qed_dmae_params)); 743 params.flags = flags; 744 745 mutex_lock(&p_hwfn->dmae_info.mutex); 746 747 rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, 748 grc_addr_in_dw, 749 QED_DMAE_ADDRESS_HOST_VIRT, 750 QED_DMAE_ADDRESS_GRC, 751 size_in_dwords, ¶ms); 752 753 mutex_unlock(&p_hwfn->dmae_info.mutex); 754 755 return rc; 756 } 757 758 int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, 759 struct qed_ptt *p_ptt, 760 u32 grc_addr, 761 dma_addr_t dest_addr, u32 size_in_dwords, u32 flags) 762 { 763 u32 grc_addr_in_dw = grc_addr / sizeof(u32); 764 struct qed_dmae_params params; 765 int rc; 766 767 memset(¶ms, 0, sizeof(struct qed_dmae_params)); 768 params.flags = flags; 769 770 mutex_lock(&p_hwfn->dmae_info.mutex); 771 772 rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw, 773 dest_addr, QED_DMAE_ADDRESS_GRC, 774 QED_DMAE_ADDRESS_HOST_VIRT, 775 size_in_dwords, ¶ms); 776 777 mutex_unlock(&p_hwfn->dmae_info.mutex); 778 779 return rc; 780 } 781 782 int qed_dmae_host2host(struct qed_hwfn *p_hwfn, 783 struct qed_ptt *p_ptt, 784 dma_addr_t source_addr, 785 dma_addr_t dest_addr, 786 u32 size_in_dwords, struct qed_dmae_params *p_params) 787 { 788 int rc; 789 790 mutex_lock(&(p_hwfn->dmae_info.mutex)); 791 792 rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, 793 dest_addr, 794 QED_DMAE_ADDRESS_HOST_PHYS, 795 QED_DMAE_ADDRESS_HOST_PHYS, 796 size_in_dwords, p_params); 797 798 mutex_unlock(&(p_hwfn->dmae_info.mutex)); 799 800 return rc; 801 } 802 803 u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, 804 enum protocol_type proto, union qed_qm_pq_params *p_params) 805 { 806 u16 pq_id = 0; 807 808 if ((proto == PROTOCOLID_CORE || 809 proto == PROTOCOLID_ETH || 810 proto == PROTOCOLID_ISCSI || 811 proto == PROTOCOLID_ROCE) && !p_params) { 812 DP_NOTICE(p_hwfn, 813 "Protocol %d received NULL PQ params\n", proto); 814 return 0; 815 } 816 817 switch (proto) { 818 case PROTOCOLID_CORE: 819 if (p_params->core.tc == LB_TC) 820 pq_id = p_hwfn->qm_info.pure_lb_pq; 821 else if (p_params->core.tc == OOO_LB_TC) 822 pq_id = p_hwfn->qm_info.ooo_pq; 823 else 824 pq_id = p_hwfn->qm_info.offload_pq; 825 break; 826 case PROTOCOLID_ETH: 827 pq_id = p_params->eth.tc; 828 if (p_params->eth.is_vf) 829 pq_id += p_hwfn->qm_info.vf_queues_offset + 830 p_params->eth.vf_id; 831 break; 832 case PROTOCOLID_ISCSI: 833 if (p_params->iscsi.q_idx == 1) 834 pq_id = p_hwfn->qm_info.pure_ack_pq; 835 break; 836 case PROTOCOLID_ROCE: 837 if (p_params->roce.dcqcn) 838 pq_id = p_params->roce.qpid; 839 else 840 pq_id = p_hwfn->qm_info.offload_pq; 841 if (pq_id > p_hwfn->qm_info.num_pf_rls) 842 pq_id = p_hwfn->qm_info.offload_pq; 843 break; 844 case PROTOCOLID_FCOE: 845 pq_id = p_hwfn->qm_info.offload_pq; 846 break; 847 default: 848 pq_id = 0; 849 } 850 851 pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ); 852 853 return pq_id; 854 } 855