Home
last modified time | relevance | path

Searched refs:eqe (Results 1 – 25 of 34) sorted by relevance

12

/linux/drivers/net/ethernet/mellanox/mlx4/
H A Deq.c123 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); in next_eqe_sw() local
124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; in next_eqe_sw()
129 struct mlx4_eqe *eqe = in next_slave_event_eqe() local
131 return (!!(eqe->owner & 0x80) ^ in next_slave_event_eqe()
133 eqe : NULL; in next_slave_event_eqe()
146 struct mlx4_eqe *eqe; in mlx4_gen_slave_eqe() local
150 for (eqe = next_slave_event_eqe(slave_eq); eqe; in mlx4_gen_slave_eqe()
151 eqe = next_slave_event_eqe(slave_eq)) { in mlx4_gen_slave_eqe()
152 slave = eqe->slave_id; in mlx4_gen_slave_eqe()
154 if (eqe->type == MLX4_EVENT_TYPE_PORT_CHANGE && in mlx4_gen_slave_eqe()
[all …]
/linux/drivers/infiniband/hw/mthca/
H A Dmthca_eq.c236 struct mthca_eqe *eqe; in next_eqe_sw() local
237 eqe = get_eqe(eq, eq->cons_index); in next_eqe_sw()
238 return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe; in next_eqe_sw()
241 static inline void set_eqe_hw(struct mthca_eqe *eqe) in set_eqe_hw() argument
243 eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW; in set_eqe_hw()
262 struct mthca_eqe *eqe; in mthca_eq_int() local
267 while ((eqe = next_eqe_sw(eq))) { in mthca_eq_int()
274 switch (eqe->type) { in mthca_eq_int()
276 disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; in mthca_eq_int()
282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Devents.c150 struct mlx5_eqe *eqe = data; in any_notifier() local
153 eqe_type_str(eqe->type), eqe->sub_type); in any_notifier()
179 struct mlx5_eqe *eqe = data; in temp_warn() local
183 value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb); in temp_warn()
188 value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb); in temp_warn()
253 struct mlx5_eqe *eqe = data; in port_module() local
261 module_event_eqe = &eqe->data.port_module; in port_module()
344 struct mlx5_eqe *eqe = data; in pcie_core() local
346 switch (eqe->sub_type) { in pcie_core()
367 struct mlx5_eqe *eqe = data; in forward_event() local
[all …]
H A Dcq.c70 struct mlx5_eqe *eqe) in mlx5_add_cq_to_tasklet() argument
100 static void mlx5_core_cq_dummy_cb(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe) in mlx5_core_cq_dummy_cb() argument
/linux/drivers/net/ethernet/mellanox/mlxsw/
H A Dpci_hw.h382 MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
389 MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
394 MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
399 MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
404 MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
409 MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
414 MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
419 MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
H A Dpci.c1123 char *eqe; in mlxsw_pci_eq_tasklet() local
1127 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) { in mlxsw_pci_eq_tasklet()
1128 cqn = mlxsw_pci_eqe_cqn_get(eqe); in mlxsw_pci_eq_tasklet()
/linux/include/linux/mlx5/
H A Dcq.h48 void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
56 void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
186 void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
/linux/drivers/net/ethernet/ibm/ehea/
H A Dehea_main.c925 struct ehea_eqe *eqe; in ehea_qp_aff_irq_handler() local
931 eqe = ehea_poll_eq(port->qp_eq); in ehea_qp_aff_irq_handler()
933 while (eqe) { in ehea_qp_aff_irq_handler()
934 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); in ehea_qp_aff_irq_handler()
936 eqe->entry, qp_token); in ehea_qp_aff_irq_handler()
950 eqe = ehea_poll_eq(port->qp_eq); in ehea_qp_aff_irq_handler()
1138 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) in ehea_parse_eqe() argument
1146 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); in ehea_parse_eqe()
1147 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); in ehea_parse_eqe()
1158 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { in ehea_parse_eqe()
[all …]
H A Dehea_qmr.c305 struct ehea_eqe *eqe; in ehea_poll_eq() local
309 eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue); in ehea_poll_eq()
312 return eqe; in ehea_poll_eq()
/linux/drivers/infiniband/hw/mlx5/
H A Dqpc.c93 static int dct_event_notifier(struct mlx5_ib_dev *dev, struct mlx5_eqe *eqe) in dct_event_notifier() argument
99 qpn = be32_to_cpu(eqe->data.dct.dctn) & 0xFFFFFF; in dct_event_notifier()
114 struct mlx5_eqe *eqe = data; in rsc_event_notifier() local
121 return dct_event_notifier(dev, eqe); in rsc_event_notifier()
130 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; in rsc_event_notifier()
131 rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN); in rsc_event_notifier()
H A Dodp.c1641 struct mlx5_eqe *eqe; in mlx5_ib_eq_pf_process() local
1644 while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) { in mlx5_ib_eq_pf_process()
1651 pf_eqe = &eqe->data.page_fault; in mlx5_ib_eq_pf_process()
1652 pfault->event_subtype = eqe->sub_type; in mlx5_ib_eq_pf_process()
1654 switch (eqe->sub_type) { in mlx5_ib_eq_pf_process()
1675 eqe->sub_type, pfault->bytes_committed, in mlx5_ib_eq_pf_process()
1702 eqe->sub_type, pfault->bytes_committed, in mlx5_ib_eq_pf_process()
1731 eqe->sub_type, pfault->token, in mlx5_ib_eq_pf_process()
1745 eqe->sub_type); in mlx5_ib_eq_pf_process()
H A Dsrq_cmd.c728 struct mlx5_eqe *eqe; in srq_event_notifier() local
737 eqe = data; in srq_event_notifier()
738 srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; in srq_event_notifier()
749 srq->event(srq, eqe->type); in srq_event_notifier()
H A Ddevx.c294 static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe) in get_event_obj_type() argument
307 return eqe->data.qp_srq.type; in get_event_obj_type()
315 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type); in get_event_obj_type()
1492 static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) in devx_cq_comp() argument
1510 dispatch_event_fd(&obj_event->obj_sub_list, eqe); in devx_cq_comp()
2477 struct mlx5_eqe *eqe = data; in devx_get_obj_id_from_event() local
2491 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; in devx_get_obj_id_from_event()
2494 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff; in devx_get_obj_id_from_event()
2498 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; in devx_get_obj_id_from_event()
2501 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; in devx_get_obj_id_from_event()
[all …]
H A Dmain.c2814 static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, in handle_general_event() argument
2817 u32 port = (eqe->data.port.port >> 4) & 0xf; in handle_general_event()
2819 switch (eqe->sub_type) { in handle_general_event()
2830 static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, in handle_port_change() argument
2833 u32 port = (eqe->data.port.port >> 4) & 0xf; in handle_port_change()
2837 switch (eqe->sub_type) { in handle_port_change()
2848 ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ? in handle_port_change()
/linux/drivers/scsi/be2iscsi/
H A Dbe_main.c686 struct be_eq_entry *eqe; in be_isr_mcc() local
696 eqe = queue_tail_node(eq); in be_isr_mcc()
699 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] in be_isr_mcc()
701 if (((eqe->dw[offsetof(struct amap_eq_entry, in be_isr_mcc()
706 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); in be_isr_mcc()
708 eqe = queue_tail_node(eq); in be_isr_mcc()
750 struct be_eq_entry *eqe; in be_isr() local
771 eqe = queue_tail_node(eq); in be_isr()
775 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] in be_isr()
777 if (((eqe->dw[offsetof(struct amap_eq_entry, in be_isr()
[all …]
/linux/drivers/net/ethernet/microsoft/mana/
H A Dgdma_main.c571 struct gdma_eqe *eqe; in mana_gd_process_eqe() local
574 eqe = &eq_eqe_ptr[head]; in mana_gd_process_eqe()
575 eqe_info.as_uint32 = eqe->eqe_info; in mana_gd_process_eqe()
580 cq_id = eqe->details[0] & 0xFFFFFF; in mana_gd_process_eqe()
608 memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE); in mana_gd_process_eqe()
664 struct gdma_eqe *eqe; in mana_gd_process_eq_events() local
675 eqe = &eq_eqe_ptr[eq->head % num_eqe]; in mana_gd_process_eq_events()
676 eqe_info.as_uint32 = eqe->eqe_info; in mana_gd_process_eq_events()
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dmonitor_stats.c72 unsigned long event, void *eqe) in mlx5e_monitor_event_handler() argument
/linux/drivers/infiniband/hw/ocrdma/
H A Docrdma_hw.c998 struct ocrdma_eqe eqe; in ocrdma_irq_handler() local
1006 eqe = *ptr; in ocrdma_irq_handler()
1007 ocrdma_le32_to_cpu(&eqe, sizeof(eqe)); in ocrdma_irq_handler()
1008 mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK) in ocrdma_irq_handler()
1012 eq->q.id, eqe.id_valid); in ocrdma_irq_handler()
1013 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0) in ocrdma_irq_handler()
1020 if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) { in ocrdma_irq_handler()
1021 cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT; in ocrdma_irq_handler()
/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
H A Dipsec_offload.c490 struct mlx5_eqe *eqe = data; in mlx5e_ipsec_event() local
496 object = &eqe->data.obj_change; in mlx5e_ipsec_event()
/linux/drivers/vfio/pci/mlx5/
H A Dcmd.c1111 struct mlx5_eqe *eqe = data; in mlx5vf_event_notifier() local
1121 queue_type = eqe->data.qp_srq.type; in mlx5vf_event_notifier()
1124 qp_num = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; in mlx5vf_event_notifier()
1131 object = &eqe->data.obj_change; in mlx5vf_event_notifier()
1144 struct mlx5_eqe *eqe) in mlx5vf_cq_complete() argument
/linux/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Dclock.c1169 struct mlx5_eqe *eqe = data; in mlx5_pps_event() local
1170 int pin = eqe->data.pps.pin; in mlx5_pps_event()
1179 be64_to_cpu(eqe->data.pps.time_stamp)) : in mlx5_pps_event()
1181 be64_to_cpu(eqe->data.pps.time_stamp)); in mlx5_pps_event()
/linux/include/linux/
H A Dhisi_acc_qm.h388 struct qm_eqe *eqe; member
/linux/drivers/crypto/hisilicon/
H A Dqm.c977 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; in qm_get_complete_eqe_num() local
979 u32 dw0 = le32_to_cpu(eqe->dw0); in qm_get_complete_eqe_num()
1000 eqe = qm->eqe; in qm_get_complete_eqe_num()
1003 eqe++; in qm_get_complete_eqe_num()
1007 dw0 = le32_to_cpu(eqe->dw0); in qm_get_complete_eqe_num()
5706 QM_INIT_BUF(qm, eqe, qm->eq_depth); in hisi_qm_memory_init()
/linux/drivers/scsi/lpfc/
H A Dlpfc_sli.c87 struct lpfc_eqe *eqe,
446 struct lpfc_eqe *eqe; in lpfc_sli4_eq_get() local
451 eqe = lpfc_sli4_qe(q, q->host_index); in lpfc_sli4_eq_get()
454 if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) in lpfc_sli4_eq_get()
467 return eqe; in lpfc_sli4_eq_get()
577 struct lpfc_eqe *eqe) in __lpfc_sli4_consume_eqe() argument
580 bf_set_le32(lpfc_eqe_valid, eqe, 0); in __lpfc_sli4_consume_eqe()
592 struct lpfc_eqe *eqe = NULL; in lpfc_sli4_eqcq_flush() local
599 eqe = lpfc_sli4_eq_get(eq); in lpfc_sli4_eqcq_flush()
600 while (eqe) { in lpfc_sli4_eqcq_flush()
[all …]
/linux/drivers/scsi/elx/efct/
H A Defct_hw.c2206 u8 eqe[sizeof(struct sli4_eqe)] = { 0 }; in efct_hw_eq_process() local
2215 while (!done && !sli_eq_read(&hw->sli, eq->queue, eqe)) { in efct_hw_eq_process()
2219 rc = sli_eq_parse(&hw->sli, eqe, &cq_id); in efct_hw_eq_process()

12