1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/types.h> 10 #include <linux/io.h> 11 #include <linux/delay.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/errno.h> 14 #include <linux/kernel.h> 15 #include <linux/list.h> 16 #include <linux/mutex.h> 17 #include <linux/pci.h> 18 #include <linux/slab.h> 19 #include <linux/spinlock.h> 20 #include <linux/string.h> 21 #include <linux/qed/qed_chain.h> 22 #include "qed.h" 23 #include "qed_hsi.h" 24 #include "qed_hw.h" 25 #include "qed_reg_addr.h" 26 #include "qed_sriov.h" 27 28 #define QED_BAR_ACQUIRE_TIMEOUT 1000 29 30 /* Invalid values */ 31 #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1)) 32 33 struct qed_ptt { 34 struct list_head list_entry; 35 unsigned int idx; 36 struct pxp_ptt_entry pxp; 37 }; 38 39 struct qed_ptt_pool { 40 struct list_head free_list; 41 spinlock_t lock; /* ptt synchronized access */ 42 struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM]; 43 }; 44 45 int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) 46 { 47 struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), 48 GFP_KERNEL); 49 int i; 50 51 if (!p_pool) 52 return -ENOMEM; 53 54 INIT_LIST_HEAD(&p_pool->free_list); 55 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { 56 p_pool->ptts[i].idx = i; 57 p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET; 58 p_pool->ptts[i].pxp.pretend.control = 0; 59 if (i >= RESERVED_PTT_MAX) 60 list_add(&p_pool->ptts[i].list_entry, 61 &p_pool->free_list); 62 } 63 64 p_hwfn->p_ptt_pool = p_pool; 65 spin_lock_init(&p_pool->lock); 66 67 return 0; 68 } 69 70 void qed_ptt_invalidate(struct qed_hwfn *p_hwfn) 71 { 72 struct qed_ptt *p_ptt; 73 int i; 74 75 for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { 76 p_ptt = &p_hwfn->p_ptt_pool->ptts[i]; 77 p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET; 78 } 79 } 80 81 void qed_ptt_pool_free(struct qed_hwfn *p_hwfn) 82 { 83 kfree(p_hwfn->p_ptt_pool); 84 p_hwfn->p_ptt_pool = NULL; 85 } 86 87 struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) 88 { 89 struct qed_ptt *p_ptt; 90 unsigned int i; 91 92 /* Take the free PTT from the list */ 93 for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) { 94 spin_lock_bh(&p_hwfn->p_ptt_pool->lock); 95 96 if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) { 97 p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list, 98 struct qed_ptt, list_entry); 99 list_del(&p_ptt->list_entry); 100 101 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 102 103 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 104 "allocated ptt %d\n", p_ptt->idx); 105 return p_ptt; 106 } 107 108 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 109 usleep_range(1000, 2000); 110 } 111 112 DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n"); 113 return NULL; 114 } 115 116 void qed_ptt_release(struct qed_hwfn *p_hwfn, 117 struct qed_ptt *p_ptt) 118 { 119 spin_lock_bh(&p_hwfn->p_ptt_pool->lock); 120 list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list); 121 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); 122 } 123 124 u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, 125 struct qed_ptt *p_ptt) 126 { 127 /* The HW is using DWORDS and we need to translate it to Bytes */ 128 return le32_to_cpu(p_ptt->pxp.offset) << 2; 129 } 130 131 static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt) 132 { 133 return PXP_PF_WINDOW_ADMIN_PER_PF_START + 134 p_ptt->idx * sizeof(struct pxp_ptt_entry); 135 } 136 137 u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt) 138 { 139 return PXP_EXTERNAL_BAR_PF_WINDOW_START + 140 p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE; 141 } 142 143 void qed_ptt_set_win(struct qed_hwfn *p_hwfn, 144 struct qed_ptt *p_ptt, 145 u32 new_hw_addr) 146 { 147 u32 prev_hw_addr; 148 149 prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); 150 151 if (new_hw_addr == prev_hw_addr) 152 return; 153 154 /* Update PTT entery in admin window */ 155 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 156 "Updating PTT entry %d to offset 0x%x\n", 157 p_ptt->idx, new_hw_addr); 158 159 /* The HW is using DWORDS and the address is in Bytes */ 160 p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2); 161 162 REG_WR(p_hwfn, 163 qed_ptt_config_addr(p_ptt) + 164 offsetof(struct pxp_ptt_entry, offset), 165 le32_to_cpu(p_ptt->pxp.offset)); 166 } 167 168 static u32 qed_set_ptt(struct qed_hwfn *p_hwfn, 169 struct qed_ptt *p_ptt, 170 u32 hw_addr) 171 { 172 u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); 173 u32 offset; 174 175 offset = hw_addr - win_hw_addr; 176 177 /* Verify the address is within the window */ 178 if (hw_addr < win_hw_addr || 179 offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) { 180 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr); 181 offset = 0; 182 } 183 184 return qed_ptt_get_bar_addr(p_ptt) + offset; 185 } 186 187 struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, 188 enum reserved_ptts ptt_idx) 189 { 190 if (ptt_idx >= RESERVED_PTT_MAX) { 191 DP_NOTICE(p_hwfn, 192 "Requested PTT %d is out of range\n", ptt_idx); 193 return NULL; 194 } 195 196 return &p_hwfn->p_ptt_pool->ptts[ptt_idx]; 197 } 198 199 void qed_wr(struct qed_hwfn *p_hwfn, 200 struct qed_ptt *p_ptt, 201 u32 hw_addr, u32 val) 202 { 203 u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); 204 205 REG_WR(p_hwfn, bar_addr, val); 206 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 207 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", 208 bar_addr, hw_addr, val); 209 } 210 211 u32 qed_rd(struct qed_hwfn *p_hwfn, 212 struct qed_ptt *p_ptt, 213 u32 hw_addr) 214 { 215 u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); 216 u32 val = REG_RD(p_hwfn, bar_addr); 217 218 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 219 "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", 220 bar_addr, hw_addr, val); 221 222 return val; 223 } 224 225 static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, 226 struct qed_ptt *p_ptt, 227 void *addr, 228 u32 hw_addr, 229 size_t n, 230 bool to_device) 231 { 232 u32 dw_count, *host_addr, hw_offset; 233 size_t quota, done = 0; 234 u32 __iomem *reg_addr; 235 236 while (done < n) { 237 quota = min_t(size_t, n - done, 238 PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); 239 240 if (IS_PF(p_hwfn->cdev)) { 241 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); 242 hw_offset = qed_ptt_get_bar_addr(p_ptt); 243 } else { 244 hw_offset = hw_addr + done; 245 } 246 247 dw_count = quota / 4; 248 host_addr = (u32 *)((u8 *)addr + done); 249 reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset); 250 if (to_device) 251 while (dw_count--) 252 DIRECT_REG_WR(reg_addr++, *host_addr++); 253 else 254 while (dw_count--) 255 *host_addr++ = DIRECT_REG_RD(reg_addr++); 256 257 done += quota; 258 } 259 } 260 261 void qed_memcpy_from(struct qed_hwfn *p_hwfn, 262 struct qed_ptt *p_ptt, 263 void *dest, u32 hw_addr, size_t n) 264 { 265 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 266 "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n", 267 hw_addr, dest, hw_addr, (unsigned long)n); 268 269 qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false); 270 } 271 272 void qed_memcpy_to(struct qed_hwfn *p_hwfn, 273 struct qed_ptt *p_ptt, 274 u32 hw_addr, void *src, size_t n) 275 { 276 DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 277 "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n", 278 hw_addr, hw_addr, src, (unsigned long)n); 279 280 qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true); 281 } 282 283 void qed_fid_pretend(struct qed_hwfn *p_hwfn, 284 struct qed_ptt *p_ptt, 285 u16 fid) 286 { 287 u16 control = 0; 288 289 SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); 290 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); 291 292 /* Every pretend undos previous pretends, including 293 * previous port pretend. 294 */ 295 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); 296 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); 297 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 298 299 if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) 300 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); 301 302 p_ptt->pxp.pretend.control = cpu_to_le16(control); 303 p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); 304 305 REG_WR(p_hwfn, 306 qed_ptt_config_addr(p_ptt) + 307 offsetof(struct pxp_ptt_entry, pretend), 308 *(u32 *)&p_ptt->pxp.pretend); 309 } 310 311 void qed_port_pretend(struct qed_hwfn *p_hwfn, 312 struct qed_ptt *p_ptt, 313 u8 port_id) 314 { 315 u16 control = 0; 316 317 SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); 318 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); 319 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 320 321 p_ptt->pxp.pretend.control = cpu_to_le16(control); 322 323 REG_WR(p_hwfn, 324 qed_ptt_config_addr(p_ptt) + 325 offsetof(struct pxp_ptt_entry, pretend), 326 *(u32 *)&p_ptt->pxp.pretend); 327 } 328 329 void qed_port_unpretend(struct qed_hwfn *p_hwfn, 330 struct qed_ptt *p_ptt) 331 { 332 u16 control = 0; 333 334 SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); 335 SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); 336 SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); 337 338 p_ptt->pxp.pretend.control = cpu_to_le16(control); 339 340 REG_WR(p_hwfn, 341 qed_ptt_config_addr(p_ptt) + 342 offsetof(struct pxp_ptt_entry, pretend), 343 *(u32 *)&p_ptt->pxp.pretend); 344 } 345 346 u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid) 347 { 348 u32 concrete_fid = 0; 349 350 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id); 351 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid); 352 SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1); 353 354 return concrete_fid; 355 } 356 357 /* DMAE */ 358 static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, 359 const u8 is_src_type_grc, 360 const u8 is_dst_type_grc, 361 struct qed_dmae_params *p_params) 362 { 363 u16 opcode_b = 0; 364 u32 opcode = 0; 365 366 /* Whether the source is the PCIe or the GRC. 367 * 0- The source is the PCIe 368 * 1- The source is the GRC. 369 */ 370 opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC 371 : DMAE_CMD_SRC_MASK_PCIE) << 372 DMAE_CMD_SRC_SHIFT; 373 opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) << 374 DMAE_CMD_SRC_PF_ID_SHIFT); 375 376 /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */ 377 opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC 378 : DMAE_CMD_DST_MASK_PCIE) << 379 DMAE_CMD_DST_SHIFT; 380 opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) << 381 DMAE_CMD_DST_PF_ID_SHIFT); 382 383 /* Whether to write a completion word to the completion destination: 384 * 0-Do not write a completion word 385 * 1-Write the completion word 386 */ 387 opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT); 388 opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK << 389 DMAE_CMD_SRC_ADDR_RESET_SHIFT); 390 391 if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST) 392 opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT); 393 394 opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT); 395 396 opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT); 397 398 /* reset source address in next go */ 399 opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK << 400 DMAE_CMD_SRC_ADDR_RESET_SHIFT); 401 402 /* reset dest address in next go */ 403 opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK << 404 DMAE_CMD_DST_ADDR_RESET_SHIFT); 405 406 /* SRC/DST VFID: all 1's - pf, otherwise VF id */ 407 if (p_params->flags & QED_DMAE_FLAG_VF_SRC) { 408 opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT; 409 opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT; 410 } else { 411 opcode_b |= DMAE_CMD_SRC_VF_ID_MASK << 412 DMAE_CMD_SRC_VF_ID_SHIFT; 413 } 414 415 if (p_params->flags & QED_DMAE_FLAG_VF_DST) { 416 opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT; 417 opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT; 418 } else { 419 opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT; 420 } 421 422 p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode); 423 p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b); 424 } 425 426 u32 qed_dmae_idx_to_go_cmd(u8 idx) 427 { 428 /* All the DMAE 'go' registers form an array in internal memory */ 429 return DMAE_REG_GO_C0 + (idx << 2); 430 } 431 432 static int 433 qed_dmae_post_command(struct qed_hwfn *p_hwfn, 434 struct qed_ptt *p_ptt) 435 { 436 struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd; 437 u8 idx_cmd = p_hwfn->dmae_info.channel, i; 438 int qed_status = 0; 439 440 /* verify address is not NULL */ 441 if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) || 442 ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) { 443 DP_NOTICE(p_hwfn, 444 "source or destination address 0 idx_cmd=%d\n" 445 "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", 446 idx_cmd, 447 le32_to_cpu(command->opcode), 448 le16_to_cpu(command->opcode_b), 449 le16_to_cpu(command->length), 450 le32_to_cpu(command->src_addr_hi), 451 le32_to_cpu(command->src_addr_lo), 452 le32_to_cpu(command->dst_addr_hi), 453 le32_to_cpu(command->dst_addr_lo)); 454 455 return -EINVAL; 456 } 457 458 DP_VERBOSE(p_hwfn, 459 NETIF_MSG_HW, 460 "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", 461 idx_cmd, 462 le32_to_cpu(command->opcode), 463 le16_to_cpu(command->opcode_b), 464 le16_to_cpu(command->length), 465 le32_to_cpu(command->src_addr_hi), 466 le32_to_cpu(command->src_addr_lo), 467 le32_to_cpu(command->dst_addr_hi), 468 le32_to_cpu(command->dst_addr_lo)); 469 470 /* Copy the command to DMAE - need to do it before every call 471 * for source/dest address no reset. 472 * The first 9 DWs are the command registers, the 10 DW is the 473 * GO register, and the rest are result registers 474 * (which are read only by the client). 475 */ 476 for (i = 0; i < DMAE_CMD_SIZE; i++) { 477 u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ? 478 *(((u32 *)command) + i) : 0; 479 480 qed_wr(p_hwfn, p_ptt, 481 DMAE_REG_CMD_MEM + 482 (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) + 483 (i * sizeof(u32)), data); 484 } 485 486 qed_wr(p_hwfn, p_ptt, 487 qed_dmae_idx_to_go_cmd(idx_cmd), 488 DMAE_GO_VALUE); 489 490 return qed_status; 491 } 492 493 int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn) 494 { 495 dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr; 496 struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd; 497 u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer; 498 u32 **p_comp = &p_hwfn->dmae_info.p_completion_word; 499 500 *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 501 sizeof(u32), 502 p_addr, 503 GFP_KERNEL); 504 if (!*p_comp) { 505 DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n"); 506 goto err; 507 } 508 509 p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr; 510 *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 511 sizeof(struct dmae_cmd), 512 p_addr, GFP_KERNEL); 513 if (!*p_cmd) { 514 DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n"); 515 goto err; 516 } 517 518 p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr; 519 *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, 520 sizeof(u32) * DMAE_MAX_RW_SIZE, 521 p_addr, GFP_KERNEL); 522 if (!*p_buff) { 523 DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n"); 524 goto err; 525 } 526 527 p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; 528 529 return 0; 530 err: 531 qed_dmae_info_free(p_hwfn); 532 return -ENOMEM; 533 } 534 535 void qed_dmae_info_free(struct qed_hwfn *p_hwfn) 536 { 537 dma_addr_t p_phys; 538 539 /* Just make sure no one is in the middle */ 540 mutex_lock(&p_hwfn->dmae_info.mutex); 541 542 if (p_hwfn->dmae_info.p_completion_word) { 543 p_phys = p_hwfn->dmae_info.completion_word_phys_addr; 544 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 545 sizeof(u32), 546 p_hwfn->dmae_info.p_completion_word, 547 p_phys); 548 p_hwfn->dmae_info.p_completion_word = NULL; 549 } 550 551 if (p_hwfn->dmae_info.p_dmae_cmd) { 552 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr; 553 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 554 sizeof(struct dmae_cmd), 555 p_hwfn->dmae_info.p_dmae_cmd, 556 p_phys); 557 p_hwfn->dmae_info.p_dmae_cmd = NULL; 558 } 559 560 if (p_hwfn->dmae_info.p_intermediate_buffer) { 561 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; 562 dma_free_coherent(&p_hwfn->cdev->pdev->dev, 563 sizeof(u32) * DMAE_MAX_RW_SIZE, 564 p_hwfn->dmae_info.p_intermediate_buffer, 565 p_phys); 566 p_hwfn->dmae_info.p_intermediate_buffer = NULL; 567 } 568 569 mutex_unlock(&p_hwfn->dmae_info.mutex); 570 } 571 572 static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn) 573 { 574 u32 wait_cnt = 0; 575 u32 wait_cnt_limit = 10000; 576 577 int qed_status = 0; 578 579 barrier(); 580 while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) { 581 udelay(DMAE_MIN_WAIT_TIME); 582 if (++wait_cnt > wait_cnt_limit) { 583 DP_NOTICE(p_hwfn->cdev, 584 "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n", 585 *p_hwfn->dmae_info.p_completion_word, 586 DMAE_COMPLETION_VAL); 587 qed_status = -EBUSY; 588 break; 589 } 590 591 /* to sync the completion_word since we are not 592 * using the volatile keyword for p_completion_word 593 */ 594 barrier(); 595 } 596 597 if (qed_status == 0) 598 *p_hwfn->dmae_info.p_completion_word = 0; 599 600 return qed_status; 601 } 602 603 static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, 604 struct qed_ptt *p_ptt, 605 u64 src_addr, 606 u64 dst_addr, 607 u8 src_type, 608 u8 dst_type, 609 u32 length) 610 { 611 dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; 612 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; 613 int qed_status = 0; 614 615 switch (src_type) { 616 case QED_DMAE_ADDRESS_GRC: 617 case QED_DMAE_ADDRESS_HOST_PHYS: 618 cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr)); 619 cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr)); 620 break; 621 /* for virtual source addresses we use the intermediate buffer. */ 622 case QED_DMAE_ADDRESS_HOST_VIRT: 623 cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys)); 624 cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys)); 625 memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0], 626 (void *)(uintptr_t)src_addr, 627 length * sizeof(u32)); 628 break; 629 default: 630 return -EINVAL; 631 } 632 633 switch (dst_type) { 634 case QED_DMAE_ADDRESS_GRC: 635 case QED_DMAE_ADDRESS_HOST_PHYS: 636 cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr)); 637 cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr)); 638 break; 639 /* for virtual source addresses we use the intermediate buffer. */ 640 case QED_DMAE_ADDRESS_HOST_VIRT: 641 cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys)); 642 cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys)); 643 break; 644 default: 645 return -EINVAL; 646 } 647 648 cmd->length = cpu_to_le16((u16)length); 649 650 qed_dmae_post_command(p_hwfn, p_ptt); 651 652 qed_status = qed_dmae_operation_wait(p_hwfn); 653 654 if (qed_status) { 655 DP_NOTICE(p_hwfn, 656 "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n", 657 src_addr, 658 dst_addr, 659 length); 660 return qed_status; 661 } 662 663 if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT) 664 memcpy((void *)(uintptr_t)(dst_addr), 665 &p_hwfn->dmae_info.p_intermediate_buffer[0], 666 length * sizeof(u32)); 667 668 return 0; 669 } 670 671 static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, 672 struct qed_ptt *p_ptt, 673 u64 src_addr, u64 dst_addr, 674 u8 src_type, u8 dst_type, 675 u32 size_in_dwords, 676 struct qed_dmae_params *p_params) 677 { 678 dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr; 679 u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0; 680 struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; 681 u64 src_addr_split = 0, dst_addr_split = 0; 682 u16 length_limit = DMAE_MAX_RW_SIZE; 683 int qed_status = 0; 684 u32 offset = 0; 685 686 qed_dmae_opcode(p_hwfn, 687 (src_type == QED_DMAE_ADDRESS_GRC), 688 (dst_type == QED_DMAE_ADDRESS_GRC), 689 p_params); 690 691 cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys)); 692 cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys)); 693 cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL); 694 695 /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */ 696 cnt_split = size_in_dwords / length_limit; 697 length_mod = size_in_dwords % length_limit; 698 699 src_addr_split = src_addr; 700 dst_addr_split = dst_addr; 701 702 for (i = 0; i <= cnt_split; i++) { 703 offset = length_limit * i; 704 705 if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) { 706 if (src_type == QED_DMAE_ADDRESS_GRC) 707 src_addr_split = src_addr + offset; 708 else 709 src_addr_split = src_addr + (offset * 4); 710 } 711 712 if (dst_type == QED_DMAE_ADDRESS_GRC) 713 dst_addr_split = dst_addr + offset; 714 else 715 dst_addr_split = dst_addr + (offset * 4); 716 717 length_cur = (cnt_split == i) ? length_mod : length_limit; 718 719 /* might be zero on last iteration */ 720 if (!length_cur) 721 continue; 722 723 qed_status = qed_dmae_execute_sub_operation(p_hwfn, 724 p_ptt, 725 src_addr_split, 726 dst_addr_split, 727 src_type, 728 dst_type, 729 length_cur); 730 if (qed_status) { 731 DP_NOTICE(p_hwfn, 732 "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n", 733 qed_status, 734 src_addr, 735 dst_addr, 736 length_cur); 737 break; 738 } 739 } 740 741 return qed_status; 742 } 743 744 int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, 745 struct qed_ptt *p_ptt, 746 u64 source_addr, 747 u32 grc_addr, 748 u32 size_in_dwords, 749 u32 flags) 750 { 751 u32 grc_addr_in_dw = grc_addr / sizeof(u32); 752 struct qed_dmae_params params; 753 int rc; 754 755 memset(¶ms, 0, sizeof(struct qed_dmae_params)); 756 params.flags = flags; 757 758 mutex_lock(&p_hwfn->dmae_info.mutex); 759 760 rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, 761 grc_addr_in_dw, 762 QED_DMAE_ADDRESS_HOST_VIRT, 763 QED_DMAE_ADDRESS_GRC, 764 size_in_dwords, ¶ms); 765 766 mutex_unlock(&p_hwfn->dmae_info.mutex); 767 768 return rc; 769 } 770 771 int 772 qed_dmae_host2host(struct qed_hwfn *p_hwfn, 773 struct qed_ptt *p_ptt, 774 dma_addr_t source_addr, 775 dma_addr_t dest_addr, 776 u32 size_in_dwords, struct qed_dmae_params *p_params) 777 { 778 int rc; 779 780 mutex_lock(&(p_hwfn->dmae_info.mutex)); 781 782 rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, 783 dest_addr, 784 QED_DMAE_ADDRESS_HOST_PHYS, 785 QED_DMAE_ADDRESS_HOST_PHYS, 786 size_in_dwords, p_params); 787 788 mutex_unlock(&(p_hwfn->dmae_info.mutex)); 789 790 return rc; 791 } 792 793 u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, 794 enum protocol_type proto, 795 union qed_qm_pq_params *p_params) 796 { 797 u16 pq_id = 0; 798 799 if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) && 800 !p_params) { 801 DP_NOTICE(p_hwfn, 802 "Protocol %d received NULL PQ params\n", 803 proto); 804 return 0; 805 } 806 807 switch (proto) { 808 case PROTOCOLID_CORE: 809 if (p_params->core.tc == LB_TC) 810 pq_id = p_hwfn->qm_info.pure_lb_pq; 811 else 812 pq_id = p_hwfn->qm_info.offload_pq; 813 break; 814 case PROTOCOLID_ETH: 815 pq_id = p_params->eth.tc; 816 if (p_params->eth.is_vf) 817 pq_id += p_hwfn->qm_info.vf_queues_offset + 818 p_params->eth.vf_id; 819 break; 820 default: 821 pq_id = 0; 822 } 823 824 pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ); 825 826 return pq_id; 827 } 828