| /linux/drivers/net/ethernet/huawei/hinic/ |
| H A D | hinic_hw_eqs.c | 27 #define GET_EQ_NUM_PAGES(eq, pg_size) \ argument 28 (ALIGN((eq)->q_len * (eq)->elem_size, pg_size) / (pg_size)) 30 #define GET_EQ_NUM_ELEMS_IN_PG(eq, pg_size) ((pg_size) / (eq)->elem_size) argument 32 #define EQ_CONS_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ argument 33 HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ 34 HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) 36 #define EQ_PROD_IDX_REG_ADDR(eq) (((eq)->type == HINIC_AEQ) ? \ argument 37 HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ 38 HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) 40 #define EQ_HI_PHYS_ADDR_REG(eq, pg_num) (((eq)->type == HINIC_AEQ) ? \ argument [all …]
|
| /linux/sound/pci/au88x0/ |
| H A D | au88x0_eq.c | 56 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetLeftCoefs() 78 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetRightCoefs() 101 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetLeftStates() 118 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetRightStates() 164 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetBypassGain() 211 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetLeftGainsTarget() 221 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetRightGainsTarget() 231 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetLeftGainsCurrent() 241 eqhw_t *eqhw = &(vortex->eq.this04); in vortex_EqHw_SetRightGainsCurrent() 252 eqhw_t *eqhw = &(vortex->eq 495 eqlzr_t *eq = &(vortex->eq); vortex_Eqlzr_GetLeftGain() local 506 eqlzr_t *eq = &(vortex->eq); vortex_Eqlzr_SetLeftGain() local 520 eqlzr_t *eq = &(vortex->eq); vortex_Eqlzr_GetRightGain() local 531 eqlzr_t *eq = &(vortex->eq); vortex_Eqlzr_SetRightGain() local 568 eqlzr_t *eq = &(vortex->eq); vortex_Eqlzr_SetAllBandsFromActiveCoeffSet() local 579 eqlzr_t *eq = &(vortex->eq); vortex_Eqlzr_SetAllBands() local 597 eqlzr_t *eq = &(vortex->eq); vortex_Eqlzr_SetA3dBypassGain() local 613 eqlzr_t *eq = &(vortex->eq); vortex_Eqlzr_ProgramA3dBypassGain() local 633 eqlzr_t *eq = &(vortex->eq); vortex_Eqlzr_SetBypass() local 650 eqlzr_t *eq = &(vortex->eq); vortex_Eqlzr_ReadAndSetActiveCoefSet() local 660 eqlzr_t *eq = &(vortex->eq); vortex_Eqlzr_GetAllPeaks() local 679 eqlzr_t *eq = &(vortex->eq); vortex_Eqlzr_init() local 726 eqlzr_t *eq = &(vortex->eq); snd_vortex_eqtoggle_get() local 739 eqlzr_t *eq = &(vortex->eq); snd_vortex_eqtoggle_put() local [all...] |
| /linux/drivers/infiniband/hw/mthca/ |
| H A D | mthca_eq.c | 173 static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) in tavor_set_eq_ci() argument 184 mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1), in tavor_set_eq_ci() 189 static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) in arbel_set_eq_ci() argument 194 dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8); in arbel_set_eq_ci() 199 static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) in set_eq_ci() argument 202 arbel_set_eq_ci(dev, eq, ci); in set_eq_ci() 204 tavor_set_eq_ci(dev, eq, ci); in set_eq_ci() 228 static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry) in get_eqe() argument 230 unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; in get_eqe() 231 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; in get_eqe() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| H A D | eq.c | 97 static void eq_set_ci(struct mlx4_eq *eq, int req_not) in eq_set_ci() argument 99 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | in eq_set_ci() 101 eq->doorbell); in eq_set_ci() 106 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor, in get_eqe() argument 110 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; in get_eqe() 118 …return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % … in get_eqe() 121 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size) in next_eqe_sw() argument 123 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); in next_eqe_sw() 124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; in next_eqe_sw() 241 struct mlx4_eq *eq = &priv->eq_table.eq[vec]; in mlx4_set_eq_affinity_hint() local [all …]
|
| /linux/drivers/scsi/elx/efct/ |
| H A D | efct_hw_queues.c | 14 struct hw_eq *eq = NULL; in efct_hw_init_queues() local 34 eq = efct_hw_new_eq(hw, EFCT_HW_EQ_DEPTH); in efct_hw_init_queues() 35 if (!eq) { in efct_hw_init_queues() 40 eqs[i] = eq; in efct_hw_init_queues() 44 cq = efct_hw_new_cq(eq, in efct_hw_init_queues() 59 cq = efct_hw_new_cq(eq, hw->num_qentries[SLI4_QTYPE_CQ]); in efct_hw_init_queues() 130 struct hw_eq *eq = kzalloc(sizeof(*eq), GFP_KERNEL); in efct_hw_new_eq() local 132 if (!eq) in efct_hw_new_eq() 135 eq->type = SLI4_QTYPE_EQ; in efct_hw_new_eq() 136 eq->hw = hw; in efct_hw_new_eq() [all …]
|
| /linux/arch/powerpc/kernel/ |
| H A D | cpu_setup_6xx.S | 217 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq 218 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq 371 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq 373 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq 374 cror 4*cr0+eq,4*cr0+eq,4*cr4+eq 375 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq 376 cror 4*cr0+eq,4*cr0+eq,4*cr5+eq 377 cror 4*cr0+eq,4*cr0+eq,4*cr7+eq 442 cror 4*cr1+eq,4*cr1+eq,4*cr2+eq 444 cror 4*cr0+eq,4*cr0+eq,4*cr3+eq [all …]
|
| /linux/drivers/pci/controller/ |
| H A D | pcie-iproc-msi.c | 65 unsigned int eq; member 129 unsigned int eq) in iproc_msi_read_reg() argument 133 return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]); in iproc_msi_read_reg() 138 int eq, u32 val) in iproc_msi_write_reg() argument 142 writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]); in iproc_msi_write_reg() 159 static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq) in iproc_msi_eq_offset() argument 162 return eq * EQ_MEM_REGION_SIZE; in iproc_msi_eq_offset() 164 return eq * EQ_LEN * sizeof(u32); in iproc_msi_eq_offset() 304 static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head) in decode_msi_hwirq() argument 310 offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32); in decode_msi_hwirq() [all …]
|
| /linux/rust/syn/gen/ |
| H A D | eq.rs | 14 fn eq(&self, other: &Self) -> bool { in eq() method 24 fn eq(&self, other: &Self) -> bool { in eq() method 34 fn eq(&self, other: &Self) -> bool { in eq() method 45 fn eq(&self, other: &Self) -> bool { in eq() method 56 fn eq(&self, other: &Self) -> bool { in eq() method 67 fn eq(&self, other: &Self) -> bool { in eq() method 81 fn eq(&self, other: &Self) -> bool { in eq() method 91 fn eq(&self, other: &Self) -> bool { in eq() method 101 fn eq(&self, other: &Self) -> bool { in eq() method 111 fn eq(&self, other: &Self) -> bool { in eq() method [all …]
|
| /linux/drivers/net/ethernet/ibm/ehea/ |
| H A D | ehea_qmr.c | 236 struct ehea_eq *eq; in ehea_create_eq() local 238 eq = kzalloc(sizeof(*eq), GFP_KERNEL); in ehea_create_eq() 239 if (!eq) in ehea_create_eq() 242 eq->adapter = adapter; in ehea_create_eq() 243 eq->attr.type = type; in ehea_create_eq() 244 eq->attr.max_nr_of_eqes = max_nr_of_eqes; in ehea_create_eq() 245 eq->attr.eqe_gen = eqe_gen; in ehea_create_eq() 246 spin_lock_init(&eq->spinlock); in ehea_create_eq() 249 &eq->attr, &eq->fw_handle); in ehea_create_eq() 255 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, in ehea_create_eq() [all …]
|
| /linux/include/linux/mlx5/ |
| H A D | eq.h | 24 mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq); 25 int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 27 void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq, 30 struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc); 31 void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm); 41 static inline u32 mlx5_eq_update_cc(struct mlx5_eq *eq, u32 cc) in mlx5_eq_update_cc() argument 44 mlx5_eq_update_ci(eq, cc, 0); in mlx5_eq_update_cc()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| H A D | cq.c | 102 mlx5_core_err(cq->eq->core.dev, in mlx5_core_cq_dummy_cb() 114 struct mlx5_eq_comp *eq; in mlx5_create_cq() local 117 eq = mlx5_eqn2comp_eq(dev, eqn); in mlx5_create_cq() 118 if (IS_ERR(eq)) in mlx5_create_cq() 119 return PTR_ERR(eq); in mlx5_create_cq() 130 cq->eq = eq; in mlx5_create_cq() 146 cq->tasklet_ctx.priv = &eq->tasklet_ctx; in mlx5_create_cq() 150 err = mlx5_eq_add_cq(&eq->core, cq); in mlx5_create_cq() 165 cq->irqn = eq->core.irqn; in mlx5_create_cq() 170 mlx5_eq_del_cq(&eq->core, cq); in mlx5_create_cq() [all …]
|
| /linux/arch/hexagon/lib/ |
| H A D | memset.S | 29 p0 = cmp.eq(r2, #0) 59 p1 = cmp.eq(r2, #1) 72 p1 = cmp.eq(r2, #2) 85 p1 = cmp.eq(r2, #4) 98 p1 = cmp.eq(r3, #1) 114 p1 = cmp.eq(r2, #8) 125 p1 = cmp.eq(r2, #4) 136 p1 = cmp.eq(r2, #2) 180 p1 = cmp.eq(r2, #1) 196 p0 = cmp.eq(r2, #2) [all …]
|
| H A D | memcpy.S | 185 p2 = cmp.eq(len, #0); /* =0 */ 188 p1 = cmp.eq(ptr_in, ptr_out); /* attempt to overwrite self */ 261 p1 = cmp.eq(prolog, #0); 267 nokernel = cmp.eq(kernel,#0); 276 p2 = cmp.eq(kernel, #1); /* skip ovr if kernel == 0 */ 346 nokernel = cmp.eq(kernel, #0); /* after adjustment, recheck */ 367 p3 = cmp.eq(kernel, rest); 436 noepilog = cmp.eq(epilog,#0); 443 p3 = cmp.eq(epilogdws, #0); 455 p3 = cmp.eq(kernel, #0);
|
| /linux/drivers/net/ethernet/microsoft/mana/ |
| H A D | gdma_main.c | 280 req.log2_throttle_limit = queue->eq.log2_throttle_limit; in mana_gd_create_hw_eq() 281 req.eq_pci_msix_index = queue->eq.msix_index; in mana_gd_create_hw_eq() 291 queue->eq.disable_needed = true; in mana_gd_create_hw_eq() 338 e.eq.id = qid; in mana_gd_ring_doorbell() 339 e.eq.tail_ptr = tail_ptr; in mana_gd_ring_doorbell() 340 e.eq.arm = num_req; in mana_gd_ring_doorbell() 561 static void mana_gd_process_eqe(struct gdma_queue *eq) in mana_gd_process_eqe() argument 563 u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE); in mana_gd_process_eqe() 564 struct gdma_context *gc = eq->gdma_dev->gdma_context; in mana_gd_process_eqe() 565 struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr; in mana_gd_process_eqe() [all …]
|
| /linux/drivers/infiniband/hw/mlx5/ |
| H A D | odp.c | 90 struct mlx5_ib_pf_eq *eq; member 1630 struct mlx5_ib_pf_eq *eq = pfault->eq; in mlx5_ib_eqe_pf_action() local 1632 mlx5_ib_pfault(eq->dev, pfault); in mlx5_ib_eqe_pf_action() 1633 mempool_free(pfault, eq->pool); in mlx5_ib_eqe_pf_action() 1637 static void mlx5_ib_eq_pf_process(struct mlx5_ib_pf_eq *eq) in mlx5_ib_eq_pf_process() argument 1644 while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) { in mlx5_ib_eq_pf_process() 1645 pfault = mempool_alloc(eq->pool, GFP_ATOMIC); in mlx5_ib_eq_pf_process() 1647 schedule_work(&eq->work); in mlx5_ib_eq_pf_process() 1673 eq->dev, in mlx5_ib_eq_pf_process() 1678 mlx5_ib_dbg(eq->dev, in mlx5_ib_eq_pf_process() [all …]
|
| /linux/net/dns_resolver/ |
| H A D | dns_key.c | 157 const char *eq; in dns_resolver_preparse() local 168 eq = memchr(opt, '=', opt_len); in dns_resolver_preparse() 169 if (eq) { in dns_resolver_preparse() 170 opt_nlen = eq - opt; in dns_resolver_preparse() 171 eq++; in dns_resolver_preparse() 172 memcpy(optval, eq, next_opt - eq); in dns_resolver_preparse() 173 optval[next_opt - eq] = '\0'; in dns_resolver_preparse()
|
| /linux/drivers/firmware/broadcom/ |
| H A D | bcm47xx_nvram.c | 187 char *var, *value, *end, *eq; in bcm47xx_nvram_getenv() local 203 eq = strchr(var, '='); in bcm47xx_nvram_getenv() 204 if (!eq) in bcm47xx_nvram_getenv() 206 value = eq + 1; in bcm47xx_nvram_getenv() 207 if (eq - var == strlen(name) && in bcm47xx_nvram_getenv() 208 strncmp(var, name, eq - var) == 0) in bcm47xx_nvram_getenv()
|
| /linux/drivers/clk/spear/ |
| H A D | spear1310_clock.c | 252 {.xscale = 10, .yscale = 204, .eq = 0}, /* 12.29 MHz */ 253 {.xscale = 4, .yscale = 21, .eq = 0}, /* 48 MHz */ 254 {.xscale = 2, .yscale = 6, .eq = 0}, /* 83 MHz */ 255 {.xscale = 2, .yscale = 4, .eq = 0}, /* 125 MHz */ 256 {.xscale = 1, .yscale = 3, .eq = 1}, /* 166 MHz */ 257 {.xscale = 1, .yscale = 2, .eq = 1}, /* 250 MHz */ 263 {.xscale = 2, .yscale = 6, .eq = 0}, /* divided by 6 */ 264 {.xscale = 2, .yscale = 4, .eq = 0}, /* divided by 4 */ 265 {.xscale = 1, .yscale = 3, .eq = 1}, /* divided by 3 */ 266 {.xscale = 1, .yscale = 2, .eq = 1}, /* divided by 2 */ [all …]
|
| /linux/arch/arc/lib/ |
| H A D | strlen.S | 21 mov.eq r7,r4 24 or.eq r12,r12,r1 38 or.eq r12,r12,r1 57 mov.eq r1,r12 69 mov.eq r2,r6
|
| /linux/lib/crc/arm64/ |
| H A D | crc32-core.S | 74 csel x3, x3, x4, eq 75 csel w0, w0, w8, eq 79 csel x3, x3, x4, eq 80 csel w0, w0, w8, eq 84 csel w3, w3, w4, eq 85 csel w0, w0, w8, eq 88 csel w0, w0, w8, eq 92 csel w0, w0, w8, eq
|
| /linux/arch/arm64/lib/ |
| H A D | strlen.S | 102 ccmp has_nul2, 0, 0, eq 142 ccmp has_nul2, 0, 0, eq 176 ccmp has_nul2, 0, 0, eq 185 ccmp has_nul2, 0, 0, eq 208 csel data1, data1, tmp4, eq 209 csel data2, data2, tmp2, eq
|
| /linux/drivers/infiniband/hw/hns/ |
| H A D | hns_roce_hw_v2.c | 6260 struct hns_roce_eq *eq, u32 queue_num) in hns_roce_v2_init_irq_work() argument 6270 irq_work->event_type = eq->event_type; in hns_roce_v2_init_irq_work() 6271 irq_work->sub_type = eq->sub_type; in hns_roce_v2_init_irq_work() 6276 static void update_eq_db(struct hns_roce_eq *eq) in update_eq_db() argument 6278 struct hns_roce_dev *hr_dev = eq->hr_dev; in update_eq_db() 6281 if (eq->type_flag == HNS_ROCE_AEQ) { in update_eq_db() 6283 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ? in update_eq_db() 6287 hr_reg_write(&eq_db, EQ_DB_TAG, eq->eqn); in update_eq_db() 6290 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ? in update_eq_db() 6295 hr_reg_write(&eq_db, EQ_DB_CI, eq->cons_index); in update_eq_db() [all …]
|
| /linux/drivers/accel/habanalabs/common/ |
| H A D | irq.c | 498 struct hl_eq *eq = arg; in hl_irq_handler_eq() local 499 struct hl_device *hdev = eq->hdev; in hl_irq_handler_eq() 507 eq_base = eq->kernel_address; in hl_irq_handler_eq() 510 cur_eqe = le32_to_cpu(eq_base[eq->ci].hdr.ctl); in hl_irq_handler_eq() 518 (((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK) != cur_eqe_index)) { in hl_irq_handler_eq() 522 ((eq->prev_eqe_index + 1) & EQ_CTL_INDEX_MASK), in hl_irq_handler_eq() 527 eq->prev_eqe_index++; in hl_irq_handler_eq() 529 eq_entry = &eq_base[eq->ci]; in hl_irq_handler_eq() 561 eq->ci = hl_eq_inc_ptr(eq->ci); in hl_irq_handler_eq() 563 hdev->asic_funcs->update_eq_ci(hdev, eq->ci); in hl_irq_handler_eq()
|
| /linux/drivers/nvmem/ |
| H A D | brcm_nvram.c | 145 char *eq, *name; in brcm_nvram_add_cells() local 147 eq = strchr(var, '='); in brcm_nvram_add_cells() 148 if (!eq) in brcm_nvram_add_cells() 150 *eq = '\0'; in brcm_nvram_add_cells() 152 *eq = '='; in brcm_nvram_add_cells() 157 value = eq + 1; in brcm_nvram_add_cells()
|
| /linux/drivers/infiniband/hw/ocrdma/ |
| H A D | ocrdma_hw.c | 111 static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq) in ocrdma_get_eqe() argument 113 return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe)); in ocrdma_get_eqe() 116 static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq) in ocrdma_eq_inc_tail() argument 118 eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1); in ocrdma_eq_inc_tail() 433 static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq) in ocrdma_mbx_create_eq() argument 448 ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma, in ocrdma_mbx_create_eq() 453 eq->q.id = rsp->vector_eqid & 0xffff; in ocrdma_mbx_create_eq() 454 eq->vector = (rsp->vector_eqid >> 16) & 0xffff; in ocrdma_mbx_create_eq() 455 eq->q.created = true; in ocrdma_mbx_create_eq() 461 struct ocrdma_eq *eq, u16 q_len) in ocrdma_create_eq() argument [all …]
|