1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */ 3 4 #include <linux/kernel.h> 5 #include <linux/types.h> 6 #include <linux/errno.h> 7 #include <linux/io.h> 8 #include <linux/slab.h> 9 #include <linux/etherdevice.h> 10 #include "ionic.h" 11 #include "ionic_dev.h" 12 #include "ionic_lif.h" 13 14 static void ionic_watchdog_cb(struct timer_list *t) 15 { 16 struct ionic *ionic = from_timer(ionic, t, watchdog_timer); 17 struct ionic_lif *lif = ionic->lif; 18 int hb; 19 20 mod_timer(&ionic->watchdog_timer, 21 round_jiffies(jiffies + ionic->watchdog_period)); 22 23 if (!lif) 24 return; 25 26 hb = ionic_heartbeat_check(ionic); 27 dev_dbg(ionic->dev, "%s: hb %d running %d UP %d\n", 28 __func__, hb, netif_running(lif->netdev), 29 test_bit(IONIC_LIF_F_UP, lif->state)); 30 31 if (hb >= 0 && 32 !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 33 ionic_link_status_check_request(lif, CAN_NOT_SLEEP); 34 } 35 36 void ionic_init_devinfo(struct ionic *ionic) 37 { 38 struct ionic_dev *idev = &ionic->idev; 39 40 idev->dev_info.asic_type = ioread8(&idev->dev_info_regs->asic_type); 41 idev->dev_info.asic_rev = ioread8(&idev->dev_info_regs->asic_rev); 42 43 memcpy_fromio(idev->dev_info.fw_version, 44 idev->dev_info_regs->fw_version, 45 IONIC_DEVINFO_FWVERS_BUFLEN); 46 47 memcpy_fromio(idev->dev_info.serial_num, 48 idev->dev_info_regs->serial_num, 49 IONIC_DEVINFO_SERIAL_BUFLEN); 50 51 idev->dev_info.fw_version[IONIC_DEVINFO_FWVERS_BUFLEN] = 0; 52 idev->dev_info.serial_num[IONIC_DEVINFO_SERIAL_BUFLEN] = 0; 53 54 dev_dbg(ionic->dev, "fw_version %s\n", idev->dev_info.fw_version); 55 } 56 57 int ionic_dev_setup(struct ionic *ionic) 58 { 59 struct ionic_dev_bar *bar = ionic->bars; 60 unsigned int num_bars = ionic->num_bars; 61 struct ionic_dev *idev = &ionic->idev; 62 struct device *dev = ionic->dev; 63 u32 sig; 64 65 /* BAR0: dev_cmd and interrupts */ 66 if (num_bars < 1) { 67 dev_err(dev, "No bars found, aborting\n"); 68 return -EFAULT; 69 } 70 71 if (bar->len < IONIC_BAR0_SIZE) { 72 dev_err(dev, "Resource bar size %lu too small, aborting\n", 73 bar->len); 74 return -EFAULT; 75 } 76 77 idev->dev_info_regs = bar->vaddr + IONIC_BAR0_DEV_INFO_REGS_OFFSET; 78 idev->dev_cmd_regs = bar->vaddr + IONIC_BAR0_DEV_CMD_REGS_OFFSET; 79 idev->intr_status = bar->vaddr + IONIC_BAR0_INTR_STATUS_OFFSET; 80 idev->intr_ctrl = bar->vaddr + IONIC_BAR0_INTR_CTRL_OFFSET; 81 82 idev->hwstamp_regs = &idev->dev_info_regs->hwstamp; 83 84 sig = ioread32(&idev->dev_info_regs->signature); 85 if (sig != IONIC_DEV_INFO_SIGNATURE) { 86 dev_err(dev, "Incompatible firmware signature %x", sig); 87 return -EFAULT; 88 } 89 90 ionic_init_devinfo(ionic); 91 92 /* BAR1: doorbells */ 93 bar++; 94 if (num_bars < 2) { 95 dev_err(dev, "Doorbell bar missing, aborting\n"); 96 return -EFAULT; 97 } 98 99 timer_setup(&ionic->watchdog_timer, ionic_watchdog_cb, 0); 100 ionic->watchdog_period = IONIC_WATCHDOG_SECS * HZ; 101 102 /* set times to ensure the first check will proceed */ 103 atomic_long_set(&idev->last_check_time, jiffies - 2 * HZ); 104 idev->last_hb_time = jiffies - 2 * ionic->watchdog_period; 105 /* init as ready, so no transition if the first check succeeds */ 106 idev->last_fw_hb = 0; 107 idev->fw_hb_ready = true; 108 idev->fw_status_ready = true; 109 idev->fw_generation = IONIC_FW_STS_F_GENERATION & 110 ioread8(&idev->dev_info_regs->fw_status); 111 112 mod_timer(&ionic->watchdog_timer, 113 round_jiffies(jiffies + ionic->watchdog_period)); 114 115 idev->db_pages = bar->vaddr; 116 idev->phy_db_pages = bar->bus_addr; 117 118 return 0; 119 } 120 121 /* Devcmd Interface */ 122 int ionic_heartbeat_check(struct ionic *ionic) 123 { 124 struct ionic_dev *idev = &ionic->idev; 125 unsigned long check_time, last_check_time; 126 bool fw_status_ready = true; 127 bool fw_hb_ready; 128 u8 fw_generation; 129 u8 fw_status; 130 u32 fw_hb; 131 132 /* wait a least one second before testing again */ 133 check_time = jiffies; 134 last_check_time = atomic_long_read(&idev->last_check_time); 135 do_check_time: 136 if (time_before(check_time, last_check_time + HZ)) 137 return 0; 138 if (!atomic_long_try_cmpxchg_relaxed(&idev->last_check_time, 139 &last_check_time, check_time)) { 140 /* if called concurrently, only the first should proceed. */ 141 dev_dbg(ionic->dev, "%s: do_check_time again\n", __func__); 142 goto do_check_time; 143 } 144 145 /* firmware is useful only if the running bit is set and 146 * fw_status != 0xff (bad PCI read) 147 * If fw_status is not ready don't bother with the generation. 148 */ 149 fw_status = ioread8(&idev->dev_info_regs->fw_status); 150 151 if (fw_status == 0xff || !(fw_status & IONIC_FW_STS_F_RUNNING)) { 152 fw_status_ready = false; 153 } else { 154 fw_generation = fw_status & IONIC_FW_STS_F_GENERATION; 155 if (idev->fw_generation != fw_generation) { 156 dev_info(ionic->dev, "FW generation 0x%02x -> 0x%02x\n", 157 idev->fw_generation, fw_generation); 158 159 idev->fw_generation = fw_generation; 160 161 /* If the generation changed, the fw status is not 162 * ready so we need to trigger a fw-down cycle. After 163 * the down, the next watchdog will see the fw is up 164 * and the generation value stable, so will trigger 165 * the fw-up activity. 166 */ 167 fw_status_ready = false; 168 } 169 } 170 171 /* is this a transition? */ 172 if (fw_status_ready != idev->fw_status_ready) { 173 struct ionic_lif *lif = ionic->lif; 174 bool trigger = false; 175 176 idev->fw_status_ready = fw_status_ready; 177 178 if (!fw_status_ready) { 179 dev_info(ionic->dev, "FW stopped %u\n", fw_status); 180 if (lif && !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 181 trigger = true; 182 } else { 183 dev_info(ionic->dev, "FW running %u\n", fw_status); 184 if (lif && test_bit(IONIC_LIF_F_FW_RESET, lif->state)) 185 trigger = true; 186 } 187 188 if (trigger) { 189 struct ionic_deferred_work *work; 190 191 work = kzalloc(sizeof(*work), GFP_ATOMIC); 192 if (work) { 193 work->type = IONIC_DW_TYPE_LIF_RESET; 194 work->fw_status = fw_status_ready; 195 ionic_lif_deferred_enqueue(&lif->deferred, work); 196 } 197 } 198 } 199 200 if (!fw_status_ready) 201 return -ENXIO; 202 203 /* wait at least one watchdog period since the last heartbeat */ 204 last_check_time = idev->last_hb_time; 205 if (time_before(check_time, last_check_time + ionic->watchdog_period)) 206 return 0; 207 208 fw_hb = ioread32(&idev->dev_info_regs->fw_heartbeat); 209 fw_hb_ready = fw_hb != idev->last_fw_hb; 210 211 /* early FW version had no heartbeat, so fake it */ 212 if (!fw_hb_ready && !fw_hb) 213 fw_hb_ready = true; 214 215 dev_dbg(ionic->dev, "%s: fw_hb %u last_fw_hb %u ready %u\n", 216 __func__, fw_hb, idev->last_fw_hb, fw_hb_ready); 217 218 idev->last_fw_hb = fw_hb; 219 220 /* log a transition */ 221 if (fw_hb_ready != idev->fw_hb_ready) { 222 idev->fw_hb_ready = fw_hb_ready; 223 if (!fw_hb_ready) 224 dev_info(ionic->dev, "FW heartbeat stalled at %d\n", fw_hb); 225 else 226 dev_info(ionic->dev, "FW heartbeat restored at %d\n", fw_hb); 227 } 228 229 if (!fw_hb_ready) 230 return -ENXIO; 231 232 idev->last_hb_time = check_time; 233 234 return 0; 235 } 236 237 u8 ionic_dev_cmd_status(struct ionic_dev *idev) 238 { 239 return ioread8(&idev->dev_cmd_regs->comp.comp.status); 240 } 241 242 bool ionic_dev_cmd_done(struct ionic_dev *idev) 243 { 244 return ioread32(&idev->dev_cmd_regs->done) & IONIC_DEV_CMD_DONE; 245 } 246 247 void ionic_dev_cmd_comp(struct ionic_dev *idev, union ionic_dev_cmd_comp *comp) 248 { 249 memcpy_fromio(comp, &idev->dev_cmd_regs->comp, sizeof(*comp)); 250 } 251 252 void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd) 253 { 254 memcpy_toio(&idev->dev_cmd_regs->cmd, cmd, sizeof(*cmd)); 255 iowrite32(0, &idev->dev_cmd_regs->done); 256 iowrite32(1, &idev->dev_cmd_regs->doorbell); 257 } 258 259 /* Device commands */ 260 void ionic_dev_cmd_identify(struct ionic_dev *idev, u8 ver) 261 { 262 union ionic_dev_cmd cmd = { 263 .identify.opcode = IONIC_CMD_IDENTIFY, 264 .identify.ver = ver, 265 }; 266 267 ionic_dev_cmd_go(idev, &cmd); 268 } 269 270 void ionic_dev_cmd_init(struct ionic_dev *idev) 271 { 272 union ionic_dev_cmd cmd = { 273 .init.opcode = IONIC_CMD_INIT, 274 .init.type = 0, 275 }; 276 277 ionic_dev_cmd_go(idev, &cmd); 278 } 279 280 void ionic_dev_cmd_reset(struct ionic_dev *idev) 281 { 282 union ionic_dev_cmd cmd = { 283 .reset.opcode = IONIC_CMD_RESET, 284 }; 285 286 ionic_dev_cmd_go(idev, &cmd); 287 } 288 289 /* Port commands */ 290 void ionic_dev_cmd_port_identify(struct ionic_dev *idev) 291 { 292 union ionic_dev_cmd cmd = { 293 .port_init.opcode = IONIC_CMD_PORT_IDENTIFY, 294 .port_init.index = 0, 295 }; 296 297 ionic_dev_cmd_go(idev, &cmd); 298 } 299 300 void ionic_dev_cmd_port_init(struct ionic_dev *idev) 301 { 302 union ionic_dev_cmd cmd = { 303 .port_init.opcode = IONIC_CMD_PORT_INIT, 304 .port_init.index = 0, 305 .port_init.info_pa = cpu_to_le64(idev->port_info_pa), 306 }; 307 308 ionic_dev_cmd_go(idev, &cmd); 309 } 310 311 void ionic_dev_cmd_port_reset(struct ionic_dev *idev) 312 { 313 union ionic_dev_cmd cmd = { 314 .port_reset.opcode = IONIC_CMD_PORT_RESET, 315 .port_reset.index = 0, 316 }; 317 318 ionic_dev_cmd_go(idev, &cmd); 319 } 320 321 void ionic_dev_cmd_port_state(struct ionic_dev *idev, u8 state) 322 { 323 union ionic_dev_cmd cmd = { 324 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, 325 .port_setattr.index = 0, 326 .port_setattr.attr = IONIC_PORT_ATTR_STATE, 327 .port_setattr.state = state, 328 }; 329 330 ionic_dev_cmd_go(idev, &cmd); 331 } 332 333 void ionic_dev_cmd_port_speed(struct ionic_dev *idev, u32 speed) 334 { 335 union ionic_dev_cmd cmd = { 336 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, 337 .port_setattr.index = 0, 338 .port_setattr.attr = IONIC_PORT_ATTR_SPEED, 339 .port_setattr.speed = cpu_to_le32(speed), 340 }; 341 342 ionic_dev_cmd_go(idev, &cmd); 343 } 344 345 void ionic_dev_cmd_port_autoneg(struct ionic_dev *idev, u8 an_enable) 346 { 347 union ionic_dev_cmd cmd = { 348 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, 349 .port_setattr.index = 0, 350 .port_setattr.attr = IONIC_PORT_ATTR_AUTONEG, 351 .port_setattr.an_enable = an_enable, 352 }; 353 354 ionic_dev_cmd_go(idev, &cmd); 355 } 356 357 void ionic_dev_cmd_port_fec(struct ionic_dev *idev, u8 fec_type) 358 { 359 union ionic_dev_cmd cmd = { 360 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, 361 .port_setattr.index = 0, 362 .port_setattr.attr = IONIC_PORT_ATTR_FEC, 363 .port_setattr.fec_type = fec_type, 364 }; 365 366 ionic_dev_cmd_go(idev, &cmd); 367 } 368 369 void ionic_dev_cmd_port_pause(struct ionic_dev *idev, u8 pause_type) 370 { 371 union ionic_dev_cmd cmd = { 372 .port_setattr.opcode = IONIC_CMD_PORT_SETATTR, 373 .port_setattr.index = 0, 374 .port_setattr.attr = IONIC_PORT_ATTR_PAUSE, 375 .port_setattr.pause_type = pause_type, 376 }; 377 378 ionic_dev_cmd_go(idev, &cmd); 379 } 380 381 /* VF commands */ 382 int ionic_set_vf_config(struct ionic *ionic, int vf, u8 attr, u8 *data) 383 { 384 union ionic_dev_cmd cmd = { 385 .vf_setattr.opcode = IONIC_CMD_VF_SETATTR, 386 .vf_setattr.attr = attr, 387 .vf_setattr.vf_index = cpu_to_le16(vf), 388 }; 389 int err; 390 391 switch (attr) { 392 case IONIC_VF_ATTR_SPOOFCHK: 393 cmd.vf_setattr.spoofchk = *data; 394 dev_dbg(ionic->dev, "%s: vf %d spoof %d\n", 395 __func__, vf, *data); 396 break; 397 case IONIC_VF_ATTR_TRUST: 398 cmd.vf_setattr.trust = *data; 399 dev_dbg(ionic->dev, "%s: vf %d trust %d\n", 400 __func__, vf, *data); 401 break; 402 case IONIC_VF_ATTR_LINKSTATE: 403 cmd.vf_setattr.linkstate = *data; 404 dev_dbg(ionic->dev, "%s: vf %d linkstate %d\n", 405 __func__, vf, *data); 406 break; 407 case IONIC_VF_ATTR_MAC: 408 ether_addr_copy(cmd.vf_setattr.macaddr, data); 409 dev_dbg(ionic->dev, "%s: vf %d macaddr %pM\n", 410 __func__, vf, data); 411 break; 412 case IONIC_VF_ATTR_VLAN: 413 cmd.vf_setattr.vlanid = cpu_to_le16(*(u16 *)data); 414 dev_dbg(ionic->dev, "%s: vf %d vlan %d\n", 415 __func__, vf, *(u16 *)data); 416 break; 417 case IONIC_VF_ATTR_RATE: 418 cmd.vf_setattr.maxrate = cpu_to_le32(*(u32 *)data); 419 dev_dbg(ionic->dev, "%s: vf %d maxrate %d\n", 420 __func__, vf, *(u32 *)data); 421 break; 422 case IONIC_VF_ATTR_STATSADDR: 423 cmd.vf_setattr.stats_pa = cpu_to_le64(*(u64 *)data); 424 dev_dbg(ionic->dev, "%s: vf %d stats_pa 0x%08llx\n", 425 __func__, vf, *(u64 *)data); 426 break; 427 default: 428 return -EINVAL; 429 } 430 431 mutex_lock(&ionic->dev_cmd_lock); 432 ionic_dev_cmd_go(&ionic->idev, &cmd); 433 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT); 434 mutex_unlock(&ionic->dev_cmd_lock); 435 436 return err; 437 } 438 439 /* LIF commands */ 440 void ionic_dev_cmd_queue_identify(struct ionic_dev *idev, 441 u16 lif_type, u8 qtype, u8 qver) 442 { 443 union ionic_dev_cmd cmd = { 444 .q_identify.opcode = IONIC_CMD_Q_IDENTIFY, 445 .q_identify.lif_type = cpu_to_le16(lif_type), 446 .q_identify.type = qtype, 447 .q_identify.ver = qver, 448 }; 449 450 ionic_dev_cmd_go(idev, &cmd); 451 } 452 453 void ionic_dev_cmd_lif_identify(struct ionic_dev *idev, u8 type, u8 ver) 454 { 455 union ionic_dev_cmd cmd = { 456 .lif_identify.opcode = IONIC_CMD_LIF_IDENTIFY, 457 .lif_identify.type = type, 458 .lif_identify.ver = ver, 459 }; 460 461 ionic_dev_cmd_go(idev, &cmd); 462 } 463 464 void ionic_dev_cmd_lif_init(struct ionic_dev *idev, u16 lif_index, 465 dma_addr_t info_pa) 466 { 467 union ionic_dev_cmd cmd = { 468 .lif_init.opcode = IONIC_CMD_LIF_INIT, 469 .lif_init.index = cpu_to_le16(lif_index), 470 .lif_init.info_pa = cpu_to_le64(info_pa), 471 }; 472 473 ionic_dev_cmd_go(idev, &cmd); 474 } 475 476 void ionic_dev_cmd_lif_reset(struct ionic_dev *idev, u16 lif_index) 477 { 478 union ionic_dev_cmd cmd = { 479 .lif_init.opcode = IONIC_CMD_LIF_RESET, 480 .lif_init.index = cpu_to_le16(lif_index), 481 }; 482 483 ionic_dev_cmd_go(idev, &cmd); 484 } 485 486 void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq, 487 u16 lif_index, u16 intr_index) 488 { 489 struct ionic_queue *q = &qcq->q; 490 struct ionic_cq *cq = &qcq->cq; 491 492 union ionic_dev_cmd cmd = { 493 .q_init.opcode = IONIC_CMD_Q_INIT, 494 .q_init.lif_index = cpu_to_le16(lif_index), 495 .q_init.type = q->type, 496 .q_init.ver = qcq->q.lif->qtype_info[q->type].version, 497 .q_init.index = cpu_to_le32(q->index), 498 .q_init.flags = cpu_to_le16(IONIC_QINIT_F_IRQ | 499 IONIC_QINIT_F_ENA), 500 .q_init.pid = cpu_to_le16(q->pid), 501 .q_init.intr_index = cpu_to_le16(intr_index), 502 .q_init.ring_size = ilog2(q->num_descs), 503 .q_init.ring_base = cpu_to_le64(q->base_pa), 504 .q_init.cq_ring_base = cpu_to_le64(cq->base_pa), 505 }; 506 507 ionic_dev_cmd_go(idev, &cmd); 508 } 509 510 int ionic_db_page_num(struct ionic_lif *lif, int pid) 511 { 512 return (lif->hw_index * lif->dbid_count) + pid; 513 } 514 515 int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq, 516 struct ionic_intr_info *intr, 517 unsigned int num_descs, size_t desc_size) 518 { 519 unsigned int ring_size; 520 521 if (desc_size == 0 || !is_power_of_2(num_descs)) 522 return -EINVAL; 523 524 ring_size = ilog2(num_descs); 525 if (ring_size < 2 || ring_size > 16) 526 return -EINVAL; 527 528 cq->lif = lif; 529 cq->bound_intr = intr; 530 cq->num_descs = num_descs; 531 cq->desc_size = desc_size; 532 cq->tail_idx = 0; 533 cq->done_color = 1; 534 535 return 0; 536 } 537 538 void ionic_cq_map(struct ionic_cq *cq, void *base, dma_addr_t base_pa) 539 { 540 struct ionic_cq_info *cur; 541 unsigned int i; 542 543 cq->base = base; 544 cq->base_pa = base_pa; 545 546 for (i = 0, cur = cq->info; i < cq->num_descs; i++, cur++) 547 cur->cq_desc = base + (i * cq->desc_size); 548 } 549 550 void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q) 551 { 552 cq->bound_q = q; 553 } 554 555 unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do, 556 ionic_cq_cb cb, ionic_cq_done_cb done_cb, 557 void *done_arg) 558 { 559 struct ionic_cq_info *cq_info; 560 unsigned int work_done = 0; 561 562 if (work_to_do == 0) 563 return 0; 564 565 cq_info = &cq->info[cq->tail_idx]; 566 while (cb(cq, cq_info)) { 567 if (cq->tail_idx == cq->num_descs - 1) 568 cq->done_color = !cq->done_color; 569 cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); 570 cq_info = &cq->info[cq->tail_idx]; 571 DEBUG_STATS_CQE_CNT(cq); 572 573 if (++work_done >= work_to_do) 574 break; 575 } 576 577 if (work_done && done_cb) 578 done_cb(done_arg); 579 580 return work_done; 581 } 582 583 int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev, 584 struct ionic_queue *q, unsigned int index, const char *name, 585 unsigned int num_descs, size_t desc_size, 586 size_t sg_desc_size, unsigned int pid) 587 { 588 unsigned int ring_size; 589 590 if (desc_size == 0 || !is_power_of_2(num_descs)) 591 return -EINVAL; 592 593 ring_size = ilog2(num_descs); 594 if (ring_size < 2 || ring_size > 16) 595 return -EINVAL; 596 597 q->lif = lif; 598 q->idev = idev; 599 q->index = index; 600 q->num_descs = num_descs; 601 q->desc_size = desc_size; 602 q->sg_desc_size = sg_desc_size; 603 q->tail_idx = 0; 604 q->head_idx = 0; 605 q->pid = pid; 606 607 snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index); 608 609 return 0; 610 } 611 612 void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) 613 { 614 struct ionic_desc_info *cur; 615 unsigned int i; 616 617 q->base = base; 618 q->base_pa = base_pa; 619 620 for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) 621 cur->desc = base + (i * q->desc_size); 622 } 623 624 void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) 625 { 626 struct ionic_desc_info *cur; 627 unsigned int i; 628 629 q->sg_base = base; 630 q->sg_base_pa = base_pa; 631 632 for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) 633 cur->sg_desc = base + (i * q->sg_desc_size); 634 } 635 636 void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, 637 void *cb_arg) 638 { 639 struct ionic_desc_info *desc_info; 640 struct ionic_lif *lif = q->lif; 641 struct device *dev = q->dev; 642 643 desc_info = &q->info[q->head_idx]; 644 desc_info->cb = cb; 645 desc_info->cb_arg = cb_arg; 646 647 q->head_idx = (q->head_idx + 1) & (q->num_descs - 1); 648 649 dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n", 650 q->lif->index, q->name, q->hw_type, q->hw_index, 651 q->head_idx, ring_doorbell); 652 653 if (ring_doorbell) 654 ionic_dbell_ring(lif->kern_dbpage, q->hw_type, 655 q->dbval | q->head_idx); 656 } 657 658 static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos) 659 { 660 unsigned int mask, tail, head; 661 662 mask = q->num_descs - 1; 663 tail = q->tail_idx; 664 head = q->head_idx; 665 666 return ((pos - tail) & mask) < ((head - tail) & mask); 667 } 668 669 void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, 670 unsigned int stop_index) 671 { 672 struct ionic_desc_info *desc_info; 673 ionic_desc_cb cb; 674 void *cb_arg; 675 u16 index; 676 677 /* check for empty queue */ 678 if (q->tail_idx == q->head_idx) 679 return; 680 681 /* stop index must be for a descriptor that is not yet completed */ 682 if (unlikely(!ionic_q_is_posted(q, stop_index))) 683 dev_err(q->dev, 684 "ionic stop is not posted %s stop %u tail %u head %u\n", 685 q->name, stop_index, q->tail_idx, q->head_idx); 686 687 do { 688 desc_info = &q->info[q->tail_idx]; 689 index = q->tail_idx; 690 q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 691 692 cb = desc_info->cb; 693 cb_arg = desc_info->cb_arg; 694 695 desc_info->cb = NULL; 696 desc_info->cb_arg = NULL; 697 698 if (cb) 699 cb(q, desc_info, cq_info, cb_arg); 700 } while (index != stop_index); 701 } 702