| /linux/drivers/net/ethernet/cavium/liquidio/ |
| H A D | request_manager.c | 51 struct octeon_instr_queue *iq; in octeon_init_instr_queue() local 73 iq = oct->instr_queue[iq_no]; in octeon_init_instr_queue() 75 iq->oct_dev = oct; in octeon_init_instr_queue() 77 iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma); in octeon_init_instr_queue() 78 if (!iq->base_addr) { in octeon_init_instr_queue() 84 iq->max_count = num_descs; in octeon_init_instr_queue() 89 iq->request_list = vzalloc_node(array_size(num_descs, sizeof(*iq->request_list)), in octeon_init_instr_queue() 91 if (!iq->request_list) in octeon_init_instr_queue() 92 iq->request_list = vzalloc(array_size(num_descs, sizeof(*iq->request_list))); in octeon_init_instr_queue() 93 if (!iq->request_list) { in octeon_init_instr_queue() [all …]
|
| H A D | cn23xx_vf_regs.h | 70 #define CN23XX_VF_SLI_IQ_PKT_CONTROL64(iq) \ argument 71 (CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_VF_IQ_OFFSET)) 73 #define CN23XX_VF_SLI_IQ_BASE_ADDR64(iq) \ argument 74 (CN23XX_VF_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_VF_IQ_OFFSET)) 76 #define CN23XX_VF_SLI_IQ_SIZE(iq) \ argument 77 (CN23XX_VF_SLI_IQ_SIZE_START + ((iq) * CN23XX_VF_IQ_OFFSET)) 79 #define CN23XX_VF_SLI_IQ_DOORBELL(iq) \ argument 80 (CN23XX_VF_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_VF_IQ_OFFSET)) 82 #define CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq) \ argument 83 (CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
|
| H A D | cn66xx_regs.h | 143 #define CN6XXX_SLI_IQ_BASE_ADDR64(iq) \ argument 144 (CN6XXX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN6XXX_IQ_OFFSET)) 146 #define CN6XXX_SLI_IQ_SIZE(iq) \ argument 147 (CN6XXX_SLI_IQ_SIZE_START + ((iq) * CN6XXX_IQ_OFFSET)) 149 #define CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq) \ argument 150 (CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 + ((iq) * CN6XXX_IQ_OFFSET)) 152 #define CN6XXX_SLI_IQ_DOORBELL(iq) \ argument 153 (CN6XXX_SLI_IQ_DOORBELL_START + ((iq) * CN6XXX_IQ_OFFSET)) 155 #define CN6XXX_SLI_IQ_INSTR_COUNT(iq) \ argument 156 (CN6XXX_SLI_IQ_INSTR_COUNT_START + ((iq) * CN6XXX_IQ_OFFSET)) [all …]
|
| H A D | cn23xx_vf_device.c | 104 struct octeon_instr_queue *iq; in cn23xx_vf_setup_global_input_regs() local 116 iq = oct->instr_queue[q_no]; in cn23xx_vf_setup_global_input_regs() 118 if (iq) in cn23xx_vf_setup_global_input_regs() 119 inst_cnt_reg = iq->inst_cnt_reg; in cn23xx_vf_setup_global_input_regs() 214 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in cn23xx_setup_vf_iq_regs() local 219 iq->base_addr_dma); in cn23xx_setup_vf_iq_regs() 220 octeon_write_csr(oct, CN23XX_VF_SLI_IQ_SIZE(iq_no), iq->max_count); in cn23xx_setup_vf_iq_regs() 225 iq->doorbell_reg = in cn23xx_setup_vf_iq_regs() 227 iq->inst_cnt_reg = in cn23xx_setup_vf_iq_regs() 230 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in cn23xx_setup_vf_iq_regs() [all …]
|
| H A D | cn23xx_pf_regs.h | 170 #define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \ argument 171 (CN23XX_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET)) 173 #define CN23XX_SLI_IQ_BASE_ADDR64(iq) \ argument 174 (CN23XX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_IQ_OFFSET)) 176 #define CN23XX_SLI_IQ_SIZE(iq) \ argument 177 (CN23XX_SLI_IQ_SIZE_START + ((iq) * CN23XX_IQ_OFFSET)) 179 #define CN23XX_SLI_IQ_DOORBELL(iq) \ argument 180 (CN23XX_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_IQ_OFFSET)) 182 #define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \ argument 183 (CN23XX_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_IQ_OFFSET))
|
| H A D | cn66xx_device.c | 266 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in lio_cn6xxx_setup_iq_regs() local 272 iq->base_addr_dma); in lio_cn6xxx_setup_iq_regs() 273 octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count); in lio_cn6xxx_setup_iq_regs() 278 iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no); in lio_cn6xxx_setup_iq_regs() 279 iq->inst_cnt_reg = oct->mmio[0].hw_addr in lio_cn6xxx_setup_iq_regs() 282 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in lio_cn6xxx_setup_iq_regs() 287 iq->reset_instr_cnt = readl(iq->inst_cnt_reg); in lio_cn6xxx_setup_iq_regs() 339 mask |= oct->io_qmask.iq; in lio_cn6xxx_enable_io_queues() 357 mask ^= oct->io_qmask.iq; in lio_cn6xxx_disable_io_queues() 361 mask = (u32)oct->io_qmask.iq; in lio_cn6xxx_disable_io_queues() [all …]
|
| H A D | octeon_config.h | 121 #define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) 122 #define CFG_GET_IQ_MAX_Q(cfg) ((cfg)->iq.max_iqs) 123 #define CFG_GET_IQ_PENDING_LIST_SIZE(cfg) ((cfg)->iq.pending_list_size) 124 #define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) 125 #define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) 126 #define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout) 128 #define CFG_GET_IQ_INTR_PKT(cfg) ((cfg)->iq.iq_intr_pkt) 129 #define CFG_SET_IQ_INTR_PKT(cfg, val) (cfg)->iq.iq_intr_pkt = val 410 struct octeon_iq_config iq; member
|
| H A D | cn23xx_pf_device.c | 234 struct octeon_instr_queue *iq; in cn23xx_pf_setup_global_input_regs() local 277 iq = oct->instr_queue[q_no]; in cn23xx_pf_setup_global_input_regs() 278 if (iq) in cn23xx_pf_setup_global_input_regs() 279 inst_cnt_reg = iq->inst_cnt_reg; in cn23xx_pf_setup_global_input_regs() 420 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in cn23xx_setup_iq_regs() local 427 iq->base_addr_dma); in cn23xx_setup_iq_regs() 428 octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count); in cn23xx_setup_iq_regs() 433 iq->doorbell_reg = in cn23xx_setup_iq_regs() 435 iq->inst_cnt_reg = in cn23xx_setup_iq_regs() 438 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in cn23xx_setup_iq_regs() [all …]
|
| H A D | octeon_device.c | 41 .iq = { 150 .iq = { 316 .iq = { 419 .iq = { 656 if (oct->io_qmask.iq & BIT_ULL(i)) in octeon_free_device_mem() 1286 (oct->io_qmask.iq & BIT_ULL(q_no))) in octeon_get_tx_qsize() 1436 void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) in lio_enable_irq() argument 1449 if (iq) { in lio_enable_irq() 1450 spin_lock_bh(&iq->lock); in lio_enable_irq() 1451 writel(iq->pkts_processed, iq->inst_cnt_reg); in lio_enable_irq() [all …]
|
| H A D | cn68xx_regs.h | 32 #define CN68XX_SLI_IQ_PORT_PKIND(iq) \ argument 33 (CN68XX_SLI_IQ_PORT0_PKIND + ((iq) * CN6XXX_IQ_OFFSET))
|
| H A D | octeon_iq.h | 375 struct octeon_instr_queue *iq, u32 napi_budget); 394 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
|
| /linux/drivers/net/ethernet/marvell/octeon_ep_vf/ |
| H A D | octep_vf_main.c | 62 ioq_vector->iq = oct->iq[i]; in octep_vf_alloc_ioq_vectors() 294 static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq) in octep_vf_enable_ioq_irq() argument 298 netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); in octep_vf_enable_ioq_irq() 299 if (iq->pkts_processed) { in octep_vf_enable_ioq_irq() 300 writel(iq->pkts_processed, iq->inst_cnt_reg); in octep_vf_enable_ioq_irq() 301 iq->pkt_in_done -= iq->pkts_processed; in octep_vf_enable_ioq_irq() 302 iq->pkts_processed = 0; in octep_vf_enable_ioq_irq() 312 writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); in octep_vf_enable_ioq_irq() 327 tx_pending = octep_vf_iq_process_completions(ioq_vector->iq, 64); in octep_vf_napi_poll() 337 octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); in octep_vf_napi_poll() [all …]
|
| H A D | octep_vf_cn9k.c | 146 conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS; in octep_vf_init_config_cn93_vf() 147 conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR; in octep_vf_init_config_cn93_vf() 148 conf->iq.db_min = OCTEP_VF_DB_MIN; in octep_vf_init_config_cn93_vf() 149 conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD; in octep_vf_init_config_cn93_vf() 163 struct octep_vf_iq *iq = oct->iq[iq_no]; in octep_vf_setup_iq_regs_cn93() local 181 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma); in octep_vf_setup_iq_regs_cn93() 182 octep_vf_write_csr64(oct, CN93_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count); in octep_vf_setup_iq_regs_cn93() 185 iq->doorbell_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INSTR_DBELL(iq_no); in octep_vf_setup_iq_regs_cn93() 186 iq->inst_cnt_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_CNTS(iq_no); in octep_vf_setup_iq_regs_cn93() 187 iq->intr_lvl_reg = oct->mmio.hw_addr + CN93_VF_SDP_R_IN_INT_LEVELS(iq_no); in octep_vf_setup_iq_regs_cn93() [all …]
|
| H A D | octep_vf_config.h | 56 #define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) 57 #define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs) 58 #define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) 60 #define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) 61 #define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold) 149 struct octep_vf_iq_config iq; member
|
| H A D | octep_vf_cnxk.c | 148 conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS; in octep_vf_init_config_cnxk_vf() 149 conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR; in octep_vf_init_config_cnxk_vf() 150 conf->iq.db_min = OCTEP_VF_DB_MIN; in octep_vf_init_config_cnxk_vf() 151 conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD; in octep_vf_init_config_cnxk_vf() 166 struct octep_vf_iq *iq = oct->iq[iq_no]; in octep_vf_setup_iq_regs_cnxk() local 184 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma); in octep_vf_setup_iq_regs_cnxk() 185 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count); in octep_vf_setup_iq_regs_cnxk() 188 iq->doorbell_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no); in octep_vf_setup_iq_regs_cnxk() 189 iq->inst_cnt_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_CNTS(iq_no); in octep_vf_setup_iq_regs_cnxk() 190 iq->intr_lvl_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no); in octep_vf_setup_iq_regs_cnxk() [all …]
|
| /linux/drivers/net/ethernet/marvell/octeon_ep/ |
| H A D | octep_main.c | 65 ioq_vector->iq = oct->iq[i]; in octep_alloc_ioq_vectors() 563 static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) in octep_enable_ioq_irq() argument 567 netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); in octep_enable_ioq_irq() 568 if (iq->pkts_processed) { in octep_enable_ioq_irq() 569 writel(iq->pkts_processed, iq->inst_cnt_reg); in octep_enable_ioq_irq() 570 iq->pkt_in_done -= iq->pkts_processed; in octep_enable_ioq_irq() 571 iq->pkts_processed = 0; in octep_enable_ioq_irq() 581 writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); in octep_enable_ioq_irq() 596 tx_pending = octep_iq_process_completions(ioq_vector->iq, budget); in octep_napi_poll() 606 octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); in octep_napi_poll() [all …]
|
| H A D | octep_config.h | 60 #define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) 61 #define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs) 62 #define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) 64 #define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) 65 #define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold) 232 struct octep_iq_config iq; member
|
| H A D | octep_cn9k_pf.c | 232 conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS; in octep_init_config_cn93_pf() 233 conf->iq.instr_type = OCTEP_64BYTE_INSTR; in octep_init_config_cn93_pf() 234 conf->iq.db_min = OCTEP_DB_MIN; in octep_init_config_cn93_pf() 235 conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD; in octep_init_config_cn93_pf() 265 struct octep_iq *iq = oct->iq[iq_no]; in octep_setup_iq_regs_cn93_pf() local 286 iq->desc_ring_dma); in octep_setup_iq_regs_cn93_pf() 288 iq->max_count); in octep_setup_iq_regs_cn93_pf() 293 iq->doorbell_reg = oct->mmio[0].hw_addr + in octep_setup_iq_regs_cn93_pf() 295 iq->inst_cnt_reg = oct->mmio[0].hw_addr + in octep_setup_iq_regs_cn93_pf() 297 iq->intr_lvl_reg = oct->mmio[0].hw_addr + in octep_setup_iq_regs_cn93_pf() [all …]
|
| H A D | octep_cnxk_pf.c | 251 conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS; in octep_init_config_cnxk_pf() 252 conf->iq.instr_type = OCTEP_64BYTE_INSTR; in octep_init_config_cnxk_pf() 253 conf->iq.db_min = OCTEP_DB_MIN; in octep_init_config_cnxk_pf() 254 conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD; in octep_init_config_cnxk_pf() 285 struct octep_iq *iq = oct->iq[iq_no]; in octep_setup_iq_regs_cnxk_pf() local 306 iq->desc_ring_dma); in octep_setup_iq_regs_cnxk_pf() 308 iq->max_count); in octep_setup_iq_regs_cnxk_pf() 313 iq->doorbell_reg = oct->mmio[0].hw_addr + in octep_setup_iq_regs_cnxk_pf() 315 iq->inst_cnt_reg = oct->mmio[0].hw_addr + in octep_setup_iq_regs_cnxk_pf() 317 iq->intr_lvl_reg = oct->mmio[0].hw_addr + in octep_setup_iq_regs_cnxk_pf() [all …]
|
| /linux/drivers/scsi/csiostor/ |
| H A D | csio_isr.c | 212 csio_scsi_isr_handler(struct csio_q *iq) in csio_scsi_isr_handler() argument 214 struct csio_hw *hw = (struct csio_hw *)iq->owner; in csio_scsi_isr_handler() 223 if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl, in csio_scsi_isr_handler() 258 struct csio_q *iq = (struct csio_q *) dev_id; in csio_scsi_isr() local 261 if (unlikely(!iq)) in csio_scsi_isr() 264 hw = (struct csio_hw *)iq->owner; in csio_scsi_isr() 271 csio_scsi_isr_handler(iq); in csio_scsi_isr() 288 struct csio_q *iq = priv; in csio_scsi_intx_handler() local 290 csio_scsi_isr_handler(iq); in csio_scsi_intx_handler()
|
| H A D | csio_wr.h | 410 struct csio_iq iq; member 463 #define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid) 465 ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid) 467 ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx) 473 #define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1) 476 csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx)
|
| /linux/scripts/ |
| H A D | tags.sh | 265 if $1 --version 2>&1 | grep -iq universal; then 297 if ! $1 --list-languages | grep -iq kconfig; then 315 if $1 --version 2>&1 | grep -iq exuberant; then 317 elif $1 --version 2>&1 | grep -iq emacs; then
|
| /linux/drivers/media/tuners/ |
| H A D | r820t.c | 1618 static void r820t_compre_cor(struct r820t_sect_type iq[3]) in r820t_compre_cor() 1623 if (iq[0].value > iq[i - 1].value) in r820t_compre_cor() 1624 swap(iq[0], iq[i - 1]); in r820t_compre_cor() 1629 struct r820t_sect_type iq[3], u8 reg) in r820t_compre_step() 1642 tmp.phase_y = iq[0].phase_y; in r820t_compre_step() 1643 tmp.gain_x = iq[0].gain_x; in r820t_compre_step() 1665 if (tmp.value <= iq[0].value) { in r820t_compre_step() 1666 iq[0].gain_x = tmp.gain_x; in r820t_compre_step() 1667 iq[0].phase_y = tmp.phase_y; in r820t_compre_step() 1668 iq[0].value = tmp.value; in r820t_compre_step() [all …]
|
| /linux/drivers/net/ethernet/chelsio/cxgb4/ |
| H A D | cxgb4_filter.c | 330 int iq; in get_filter_steerq() local 338 if (fs->iq) in get_filter_steerq() 340 iq = 0; in get_filter_steerq() 347 if (fs->iq < pi->nqsets) in get_filter_steerq() 348 iq = adapter->sge.ethrxq[pi->first_qset + in get_filter_steerq() 349 fs->iq].rspq.abs_id; in get_filter_steerq() 351 iq = fs->iq; in get_filter_steerq() 354 return iq; in get_filter_steerq() 856 FW_FILTER_WR_IQ_V(f->fs.iq)); in set_filter_wr() 1325 RSS_QUEUE_V(f->fs.iq) | in mk_act_open_req6() [all …]
|
| H A D | sge.c | 4360 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, in t4_sge_alloc_rxq() argument 4372 iq->size = roundup(iq->size, 16); in t4_sge_alloc_rxq() 4374 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, in t4_sge_alloc_rxq() 4375 &iq->phys_addr, NULL, 0, in t4_sge_alloc_rxq() 4377 if (!iq->desc) in t4_sge_alloc_rxq() 4394 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | in t4_sge_alloc_rxq() 4395 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); in t4_sge_alloc_rxq() 4396 c.iqsize = htons(iq->size); in t4_sge_alloc_rxq() 4397 c.iqaddr = cpu_to_be64(iq->phys_addr); in t4_sge_alloc_rxq() 4457 netif_napi_add(dev, &iq->napi, napi_rx_handler); in t4_sge_alloc_rxq() [all …]
|