Home
last modified time | relevance | path

Searched refs:eqe (Results 1 – 25 of 42) sorted by relevance

12

/linux/drivers/net/ethernet/mellanox/mlx4/
H A Deq.c123 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); in next_eqe_sw() local
124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; in next_eqe_sw()
129 struct mlx4_eqe *eqe = in next_slave_event_eqe() local
131 return (!!(eqe->owner & 0x80) ^ in next_slave_event_eqe()
133 eqe : NULL; in next_slave_event_eqe()
146 struct mlx4_eqe *eqe; in mlx4_gen_slave_eqe() local
150 for (eqe = next_slave_event_eqe(slave_eq); eqe; in mlx4_gen_slave_eqe()
151 eqe = next_slave_event_eqe(slave_eq)) { in mlx4_gen_slave_eqe()
152 slave = eqe->slave_id; in mlx4_gen_slave_eqe()
154 if (eqe->type == MLX4_EVENT_TYPE_PORT_CHANGE && in mlx4_gen_slave_eqe()
[all …]
/linux/drivers/infiniband/hw/mthca/
H A Dmthca_eq.c236 struct mthca_eqe *eqe; in next_eqe_sw() local
237 eqe = get_eqe(eq, eq->cons_index); in next_eqe_sw()
238 return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe; in next_eqe_sw()
241 static inline void set_eqe_hw(struct mthca_eqe *eqe) in set_eqe_hw() argument
243 eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW; in set_eqe_hw()
262 struct mthca_eqe *eqe; in mthca_eq_int() local
267 while ((eqe = next_eqe_sw(eq))) { in mthca_eq_int()
274 switch (eqe->type) { in mthca_eq_int()
276 disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; in mthca_eq_int()
282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Devents.c149 struct mlx5_eqe *eqe = data; in any_notifier() local
152 eqe_type_str(eqe->type), eqe->sub_type); in any_notifier()
161 struct mlx5_eqe *eqe = data; in temp_warn() local
165 value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb); in temp_warn()
166 value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb); in temp_warn()
223 struct mlx5_eqe *eqe = data; in port_module() local
231 module_event_eqe = &eqe->data.port_module; in port_module()
314 struct mlx5_eqe *eqe = data; in pcie_core() local
316 switch (eqe->sub_type) { in pcie_core()
337 struct mlx5_eqe *eqe = data; in forward_event() local
[all …]
H A Deq.c115 struct mlx5_eqe *eqe; in mlx5_eq_comp_int() local
118 while ((eqe = next_eqe_sw(eq))) { in mlx5_eq_comp_int()
127 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; in mlx5_eq_comp_int()
132 cq->comp(cq, eqe); in mlx5_eq_comp_int()
201 struct mlx5_eqe *eqe; in mlx5_eq_async_int() local
212 while ((eqe = next_eqe_sw(eq))) { in mlx5_eq_async_int()
219 atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe); in mlx5_eq_async_int()
220 atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe); in mlx5_eq_async_int()
246 struct mlx5_eqe *eqe; in init_eq_buf() local
250 eqe = get_eqe(eq, i); in init_eq_buf()
[all …]
H A Dpagealloc.c622 struct mlx5_eqe *eqe; in req_pages_handler() local
630 eqe = data; in req_pages_handler()
632 func_id = be16_to_cpu(eqe->data.req_pages.func_id); in req_pages_handler()
633 npages = be32_to_cpu(eqe->data.req_pages.num_pages); in req_pages_handler()
634 ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK; in req_pages_handler()
635 release_all = be16_to_cpu(eqe->data.req_pages.ec_function) & in req_pages_handler()
H A Dfw_reset.c695 static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct mlx5_eqe *eqe) in mlx5_sync_reset_events_handle() argument
700 sync_fw_update_eqe = &eqe->data.sync_fw_update; in mlx5_sync_reset_events_handle()
721 struct mlx5_eqe *eqe = data; in fw_reset_event_notifier() local
726 switch (eqe->sub_type) { in fw_reset_event_notifier()
731 mlx5_sync_reset_events_handle(fw_reset, eqe); in fw_reset_event_notifier()
/linux/drivers/net/ethernet/mellanox/mlxsw/
H A Dpci_hw.h377 MLXSW_ITEM32(pci, eqe, event_type, 0x0C, 24, 8);
384 MLXSW_ITEM32(pci, eqe, event_sub_type, 0x0C, 16, 8);
389 MLXSW_ITEM32(pci, eqe, cqn, 0x0C, 8, 7);
394 MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
399 MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
404 MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
409 MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
414 MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
/linux/drivers/net/ethernet/mellanox/mlx5/core/fpga/
H A Dcore.c163 static int fpga_err_event(struct notifier_block *nb, unsigned long event, void *eqe) in fpga_err_event() argument
167 return mlx5_fpga_event(fdev, event, eqe); in fpga_err_event()
170 static int fpga_qp_err_event(struct notifier_block *nb, unsigned long event, void *eqe) in fpga_qp_err_event() argument
174 return mlx5_fpga_event(fdev, event, eqe); in fpga_qp_err_event()
334 unsigned long event, void *eqe) in mlx5_fpga_event() argument
336 void *data = ((struct mlx5_eqe *)eqe)->data.raw; in mlx5_fpga_event()
/linux/drivers/infiniband/hw/mlx4/
H A Dmad.c63 #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.bl… argument
64 #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_e… argument
1120 struct mlx4_eqe *eqe) in propagate_pkey_ev() argument
1122 __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe), in propagate_pkey_ev()
1123 GET_MASK_FROM_EQE(eqe)); in propagate_pkey_ev()
1181 struct mlx4_eqe *eqe = &(ew->ib_eqe); in handle_port_mgmt_change_event() local
1182 u32 port = eqe->event.port_mgmt_change.port; in handle_port_mgmt_change_event()
1187 switch (eqe->subtype) { in handle_port_mgmt_change_event()
1189 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr); in handle_port_mgmt_change_event()
1194 u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid); in handle_port_mgmt_change_event()
[all …]
/linux/include/linux/mlx5/
H A Dcq.h49 void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
57 void (*comp)(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe);
/linux/drivers/infiniband/hw/mlx5/
H A Dqpc.c91 static int dct_event_notifier(struct mlx5_ib_dev *dev, struct mlx5_eqe *eqe) in dct_event_notifier() argument
97 qpn = be32_to_cpu(eqe->data.dct.dctn) & 0xFFFFFF; in dct_event_notifier()
112 struct mlx5_eqe *eqe = data; in rsc_event_notifier() local
119 return dct_event_notifier(dev, eqe); in rsc_event_notifier()
128 rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; in rsc_event_notifier()
129 rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN); in rsc_event_notifier()
H A Dodp.c1612 struct mlx5_eqe *eqe; in mlx5_ib_eq_pf_process() local
1615 while ((eqe = mlx5_eq_get_eqe(eq->core, cc))) { in mlx5_ib_eq_pf_process()
1622 pf_eqe = &eqe->data.page_fault; in mlx5_ib_eq_pf_process()
1623 pfault->event_subtype = eqe->sub_type; in mlx5_ib_eq_pf_process()
1625 switch (eqe->sub_type) { in mlx5_ib_eq_pf_process()
1646 eqe->sub_type, pfault->bytes_committed, in mlx5_ib_eq_pf_process()
1673 eqe->sub_type, pfault->bytes_committed, in mlx5_ib_eq_pf_process()
1702 eqe->sub_type, pfault->token, in mlx5_ib_eq_pf_process()
1716 eqe->sub_type); in mlx5_ib_eq_pf_process()
H A Dsrq_cmd.c728 struct mlx5_eqe *eqe; in srq_event_notifier() local
737 eqe = data; in srq_event_notifier()
738 srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; in srq_event_notifier()
749 srq->event(srq, eqe->type); in srq_event_notifier()
/linux/drivers/net/ethernet/ibm/ehea/
H A Dehea_main.c925 struct ehea_eqe *eqe; in ehea_qp_aff_irq_handler() local
931 eqe = ehea_poll_eq(port->qp_eq); in ehea_qp_aff_irq_handler()
933 while (eqe) { in ehea_qp_aff_irq_handler()
934 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); in ehea_qp_aff_irq_handler()
936 eqe->entry, qp_token); in ehea_qp_aff_irq_handler()
950 eqe = ehea_poll_eq(port->qp_eq); in ehea_qp_aff_irq_handler()
1138 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) in ehea_parse_eqe() argument
1146 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); in ehea_parse_eqe()
1147 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); in ehea_parse_eqe()
1158 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { in ehea_parse_eqe()
[all …]
H A Dehea_qmr.c305 struct ehea_eqe *eqe; in ehea_poll_eq() local
309 eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue); in ehea_poll_eq()
312 return eqe; in ehea_poll_eq()
/linux/drivers/net/ethernet/mellanox/mlx5/core/sf/
H A Dvhca_event.c122 struct mlx5_eqe *eqe = data; in mlx5_vhca_state_change_notifier() local
130 work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id); in mlx5_vhca_state_change_notifier()
/linux/drivers/net/ethernet/microsoft/mana/
H A Dgdma_main.c356 struct gdma_eqe *eqe; in mana_gd_process_eqe() local
359 eqe = &eq_eqe_ptr[head]; in mana_gd_process_eqe()
360 eqe_info.as_uint32 = eqe->eqe_info; in mana_gd_process_eqe()
365 cq_id = eqe->details[0] & 0xFFFFFF; in mana_gd_process_eqe()
391 memcpy(&event.details, &eqe->details, GDMA_EVENT_DATA_SIZE); in mana_gd_process_eqe()
407 struct gdma_eqe *eqe; in mana_gd_process_eq_events() local
418 eqe = &eq_eqe_ptr[eq->head % num_eqe]; in mana_gd_process_eq_events()
419 eqe_info.as_uint32 = eqe->eqe_info; in mana_gd_process_eq_events()
/linux/drivers/infiniband/hw/efa/
H A Defa_com.c1167 struct efa_admin_eqe *eqe; in efa_com_eq_comp_intr_handler() local
1174 eqe = &eeq->eqes[ci]; in efa_com_eq_comp_intr_handler()
1177 while ((READ_ONCE(eqe->common) & EFA_ADMIN_EQE_PHASE_MASK) == phase) { in efa_com_eq_comp_intr_handler()
1184 eeq->cb(eeq, eqe); in efa_com_eq_comp_intr_handler()
1195 eqe = &eeq->eqes[ci]; in efa_com_eq_comp_intr_handler()
/linux/drivers/scsi/be2iscsi/
H A Dbe_main.c686 struct be_eq_entry *eqe; in be_isr_mcc() local
696 eqe = queue_tail_node(eq); in be_isr_mcc()
699 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] in be_isr_mcc()
701 if (((eqe->dw[offsetof(struct amap_eq_entry, in be_isr_mcc()
706 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0); in be_isr_mcc()
708 eqe = queue_tail_node(eq); in be_isr_mcc()
750 struct be_eq_entry *eqe; in be_isr() local
771 eqe = queue_tail_node(eq); in be_isr()
775 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] in be_isr()
777 if (((eqe->dw[offsetof(struct amap_eq_entry, in be_isr()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dmonitor_stats.c72 unsigned long event, void *eqe) in mlx5e_monitor_event_handler() argument
/linux/drivers/infiniband/hw/ocrdma/
H A Docrdma_hw.c998 struct ocrdma_eqe eqe; in ocrdma_irq_handler() local
1006 eqe = *ptr; in ocrdma_irq_handler()
1007 ocrdma_le32_to_cpu(&eqe, sizeof(eqe)); in ocrdma_irq_handler()
1008 mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK) in ocrdma_irq_handler()
1012 eq->q.id, eqe.id_valid); in ocrdma_irq_handler()
1013 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0) in ocrdma_irq_handler()
1020 if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) { in ocrdma_irq_handler()
1021 cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT; in ocrdma_irq_handler()
/linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/
H A Dipsec_offload.c491 struct mlx5_eqe *eqe = data; in mlx5e_ipsec_event() local
497 object = &eqe->data.obj_change; in mlx5e_ipsec_event()
/linux/drivers/vfio/pci/mlx5/
H A Dcmd.c1053 struct mlx5_eqe *eqe = data; in mlx5vf_event_notifier() local
1063 queue_type = eqe->data.qp_srq.type; in mlx5vf_event_notifier()
1066 qp_num = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; in mlx5vf_event_notifier()
1073 object = &eqe->data.obj_change; in mlx5vf_event_notifier()
1086 struct mlx5_eqe *eqe) in mlx5vf_cq_complete() argument
/linux/drivers/net/ethernet/mellanox/mlx5/core/diag/
H A Dfw_tracer.c1170 struct mlx5_eqe *eqe = data; in fw_tracer_event() local
1172 switch (eqe->sub_type) { in fw_tracer_event()
1184 eqe->sub_type); in fw_tracer_event()
/linux/drivers/crypto/hisilicon/
H A Ddebugfs.c397 if (qm->eqe && !strcmp(name, "EQE")) { in qm_eq_aeq_dump()
398 xeqe = qm->eqe + xeqe_id; in qm_eq_aeq_dump()

12