Home
last modified time | relevance | path

Searched refs:qpn (Results 1 – 25 of 143) sorted by relevance

123456

/linux/drivers/net/ethernet/mellanox/mlx4/
H A Dqp.c56 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) in mlx4_qp_event() argument
63 qp = __mlx4_qp_lookup(dev, qpn); in mlx4_qp_event()
70 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); in mlx4_qp_event()
84 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; in is_master_qp0()
86 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn && in is_master_qp0()
87 qp->qpn <= dev->phys_caps.base_sqpn + 1; in is_master_qp0()
150 ret = mlx4_cmd(dev, 0, qp->qpn, 2, in __mlx4_qp_modify()
155 port = (qp->qpn & 1) + 1; in __mlx4_qp_modify()
179 cpu_to_be16(mlx4_qp_roce_entropy(dev, qp->qpn)); in __mlx4_qp_modify()
185 cpu_to_be32(qp->qpn); in __mlx4_qp_modify()
[all …]
H A Dmcg.c125 u32 qpn) in get_promisc_qp() argument
136 if (pqp->qpn == qpn) in get_promisc_qp()
149 unsigned int index, u32 qpn) in new_steering_entry() argument
176 pqp = get_promisc_qp(dev, port, steer, qpn); in new_steering_entry()
183 dqp->qpn = qpn; in new_steering_entry()
209 if (pqp->qpn == qpn) in new_steering_entry()
218 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); in new_steering_entry()
241 unsigned int index, u32 qpn) in existing_steering_entry() argument
253 pqp = get_promisc_qp(dev, port, steer, qpn); in existing_steering_entry()
272 if (qpn == dqp->qpn) in existing_steering_entry()
[all …]
H A Den_resources.c41 int is_tx, int rss, int qpn, int cqn, in mlx4_en_fill_qp_context() argument
63 context->local_qpn = cpu_to_be32(qpn); in mlx4_en_fill_qp_context()
89 en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn); in mlx4_en_fill_qp_context()
104 ret = mlx4_update_qp(priv->mdev->dev, qp->qpn, in mlx4_en_change_mcast_lb()
H A Dresource_tracker.c225 int qpn; member
751 u8 slave, u32 qpn) in update_vport_qp_param() argument
772 if (mlx4_is_qp_reserved(dev, qpn)) in update_vport_qp_param()
786 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params); in update_vport_qp_param()
1172 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) in alloc_fs_rule_tr() argument
1182 ret->qpn = qpn; in alloc_fs_rule_tr()
1504 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, in qp_res_start_move_to() argument
1514 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn); in qp_res_start_move_to()
1775 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) in valid_reserved() argument
1777 return mlx4_is_qp_reserved(dev, qpn) && in valid_reserved()
[all …]
/linux/drivers/infiniband/hw/mlx5/
H A Dqpc.c95 u32 qpn; in dct_event_notifier() local
97 qpn = be32_to_cpu(eqe->data.dct.dctn) & 0xFFFFFF; in dct_event_notifier()
99 dct = xa_load(&dev->qp_table.dct_xa, qpn); in dct_event_notifier()
168 qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN), in create_resource_common()
189 qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN)); in destroy_resource_common()
202 MLX5_SET(destroy_dct_in, in, dctn, qp->qpn); in _mlx5_core_destroy_dct()
220 qp->qpn = MLX5_GET(create_dct_out, out, dctn); in mlx5_core_create_dct()
222 err = xa_err(xa_store_irq(&dev->qp_table.dct_xa, qp->qpn, dct, GFP_KERNEL)); in mlx5_core_create_dct()
246 qp->qpn = MLX5_GET(create_qp_out, out, qpn); in mlx5_qpc_create_qp()
259 MLX5_SET(destroy_qp_in, din, qpn, qp->qpn); in mlx5_qpc_create_qp()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/ipoib/
H A Dipoib.c219 MLX5_SET(rst2init_qp_in, in, qpn, ipriv->qpn); in mlx5i_init_underlay_qp()
228 MLX5_SET(init2rtr_qp_in, in, qpn, ipriv->qpn); in mlx5i_init_underlay_qp()
237 MLX5_SET(rtr2rts_qp_in, in, qpn, ipriv->qpn); in mlx5i_init_underlay_qp()
249 MLX5_SET(qp_2err_in, in, qpn, ipriv->qpn); in mlx5i_init_underlay_qp()
262 MLX5_SET(qp_2rst_in, in, qpn, ipriv->qpn); in mlx5i_uninit_underlay_qp()
275 int qpn = 0; in mlx5i_create_underlay_qp() local
280 qpn = (dev_addr[1] << 16) + (dev_addr[2] << 8) + dev_addr[3]; in mlx5i_create_underlay_qp()
281 MLX5_SET(create_qp_in, in, input_qpn, qpn); in mlx5i_create_underlay_qp()
300 ipriv->qpn = MLX5_GET(create_qp_out, out, qpn); in mlx5i_create_underlay_qp()
305 void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn) in mlx5i_destroy_underlay_qp() argument
[all …]
H A Dipoib_vlan.c72 u32 qpn) in mlx5i_find_qpn_to_netdev_node() argument
74 struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)]; in mlx5i_find_qpn_to_netdev_node()
78 if (node->underlay_qpn == qpn) in mlx5i_find_qpn_to_netdev_node()
85 int mlx5i_pkey_add_qpn(struct net_device *netdev, u32 qpn) in mlx5i_pkey_add_qpn() argument
89 u8 key = hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP); in mlx5i_pkey_add_qpn()
97 new_node->underlay_qpn = qpn; in mlx5i_pkey_add_qpn()
105 int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn) in mlx5i_pkey_del_qpn() argument
112 node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn); in mlx5i_pkey_del_qpn()
126 struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn) in mlx5i_pkey_get_netdev() argument
131 node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn); in mlx5i_pkey_get_netdev()
[all …]
H A Dipoib.h55 u32 qpn; member
71 void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, u32 qpn);
82 int mlx5i_pkey_add_qpn(struct net_device *netdev, u32 qpn);
83 int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn);
86 struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
/linux/drivers/net/ethernet/mellanox/mlx5/core/
H A Dmcg.c38 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) in mlx5_core_attach_mcg() argument
44 MLX5_SET(attach_to_mcg_in, in, qpn, qpn); in mlx5_core_attach_mcg()
51 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) in mlx5_core_detach_mcg() argument
57 MLX5_SET(detach_from_mcg_in, in, qpn, qpn); in mlx5_core_detach_mcg()
/linux/drivers/infiniband/hw/qib/
H A Dqib_qp.c130 u32 i, offset, max_scan, qpn; in qib_alloc_qpn() local
152 qpn = qpt->last + 2; in qib_alloc_qpn()
153 if (qpn >= RVT_QPN_MAX) in qib_alloc_qpn()
154 qpn = 2; in qib_alloc_qpn()
155 if (qpt_mask && ((qpn & qpt_mask) >> 1) >= dd->n_krcv_queues) in qib_alloc_qpn()
156 qpn = (qpn | qpt_mask) + 2; in qib_alloc_qpn()
157 offset = qpn & RVT_BITS_PER_PAGE_MASK; in qib_alloc_qpn()
158 map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; in qib_alloc_qpn()
168 qpt->last = qpn; in qib_alloc_qpn()
169 ret = qpn; in qib_alloc_qpn()
[all …]
/linux/drivers/infiniband/hw/hfi1/
H A Dtrace_tid.h28 #define OPFN_PARAM_PRN "[%s] qpn 0x%x %s OPFN: qp 0x%x, max read %u, " \
32 #define TID_FLOW_PRN "[%s] qpn 0x%x flow %d: idx %d resp_ib_psn 0x%x " \
37 #define TID_NODE_PRN "[%s] qpn 0x%x %s idx %u grp base 0x%x map 0x%x " \
40 #define RSP_INFO_PRN "[%s] qpn 0x%x state 0x%x s_state 0x%x psn 0x%x " \
47 #define SENDER_INFO_PRN "[%s] qpn 0x%x state 0x%x s_cur %u s_tail %u " \
52 #define TID_READ_SENDER_PRN "[%s] qpn 0x%x newreq %u tid_r_reqs %u " \
58 #define TID_REQ_PRN "[%s] qpn 0x%x newreq %u opcode 0x%x psn 0x%x lpsn 0x%x " \
64 #define RCV_ERR_PRN "[%s] qpn 0x%x s_flags 0x%x state 0x%x " \
69 #define TID_WRITE_RSPDR_PRN "[%s] qpn 0x%x r_tid_head %u r_tid_tail %u " \
77 #define TID_WRITE_SENDER_PRN "[%s] qpn
[all...]
H A Dtrace_rc.h22 __field(u32, qpn)
33 __entry->qpn = qp->ibqp.qp_num;
45 __entry->qpn,
84 __field(u32, qpn)
93 __entry->qpn = qp->ibqp.qp_num;
103 __entry->qpn,
H A Dtrace_ibhdrs.h77 u16 *pkey, u32 *psn, u32 *qpn);
84 u32 *psn, u32 *qpn);
100 u8 tver, u16 pkey, u32 psn, u32 qpn,
143 __field(u32, qpn)
185 &__entry->qpn);
209 &__entry->qpn);
256 __entry->qpn,
302 __field(u32, qpn)
350 &__entry->qpn);
377 &__entry->qpn);
[all …]
H A Dipoib_main.c58 u32 qpn = qpn_from_mac(priv->netdev->dev_addr); in hfi1_ipoib_dev_open() local
61 qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn); in hfi1_ipoib_dev_open()
109 u32 qpn = (u32)qpn_from_mac(priv->netdev->dev_addr); in hfi1_ipoib_mcast_attach() local
116 qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn); in hfi1_ipoib_mcast_attach()
139 u32 qpn = (u32)qpn_from_mac(priv->netdev->dev_addr); in hfi1_ipoib_mcast_detach() local
146 qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn); in hfi1_ipoib_mcast_detach()
H A Dtrace_iowait.h23 __field(u32, qpn)
29 __entry->qpn = iowait_to_qp(wait)->ibqp.qp_num;
34 __entry->qpn,
/linux/drivers/infiniband/sw/rdmavt/
H A Dtrace_qp.h22 __field(u32, qpn)
27 __entry->qpn = qp->ibqp.qp_num;
33 __entry->qpn,
52 __field(u32, qpn)
59 __entry->qpn = qp->ibqp.qp_num;
67 __entry->qpn,
H A Dtrace_tx.h55 __field(u32, qpn)
75 __entry->qpn = qp->ibqp.qp_num;
97 __entry->qpn,
122 __field(u32, qpn)
134 __entry->qpn = qp->ibqp.qp_num;
145 __entry->qpn,
H A Dtrace_rc.h22 __field(u32, qpn)
33 __entry->qpn = qp->ibqp.qp_num;
45 __entry->qpn,
/linux/drivers/infiniband/sw/rxe/
H A Drxe_hdr.h65 __be32 qpn; member
180 return BTH_QPN_MASK & be32_to_cpu(bth->qpn); in __bth_qpn()
183 static inline void __bth_set_qpn(void *arg, u32 qpn) in __bth_set_qpn() argument
186 u32 resvqpn = be32_to_cpu(bth->qpn); in __bth_set_qpn()
188 bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) | in __bth_set_qpn()
196 return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn); in __bth_fecn()
204 bth->qpn |= cpu_to_be32(BTH_FECN_MASK); in __bth_set_fecn()
206 bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK); in __bth_set_fecn()
213 return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn); in __bth_becn()
221 bth->qpn |= cpu_to_be32(BTH_BECN_MASK); in __bth_set_becn()
[all …]
H A Drxe_recv.c76 u32 qpn, struct rxe_qp *qp) in check_keys() argument
89 u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey; in check_keys()
140 u32 qpn = bth_qpn(pkt); in hdr_check() local
147 if (unlikely(qpn == 0)) in hdr_check()
150 if (qpn != IB_MULTICAST_QPN) { in hdr_check()
151 index = (qpn == 1) ? port->qp_gsi_index : qpn; in hdr_check()
165 err = check_keys(rxe, pkt, qpn, qp); in hdr_check()
/linux/drivers/infiniband/hw/mthca/
H A Dmthca_qp.c198 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp()
199 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp()
204 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0()
205 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0()
238 void mthca_qp_event(struct mthca_dev *dev, u32 qpn, in mthca_qp_event() argument
245 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); in mthca_qp_event()
252 event_type, qpn); in mthca_qp_event()
454 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); in mthca_query_qp()
630 qp_context->local_qpn = cpu_to_be32(qp->qpn); in __mthca_modify_qp()
771 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << in __mthca_modify_qp()
[all …]
/linux/include/uapi/rdma/
H A Dib_user_mad.h79 __be32 qpn; member
123 __be32 qpn; member
188 __u8 qpn; member
223 __u32 qpn; member
/linux/drivers/infiniband/hw/hns/
H A Dhns_roce_qp.c96 void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) in hns_roce_qp_event() argument
102 qp = __hns_roce_qp_lookup(hr_dev, qpn); in hns_roce_qp_event()
108 dev_warn(dev, "async event for bogus QP %08x\n", qpn); in hns_roce_qp_event()
166 type, hr_qp->qpn); in hns_roce_ib_qp_event()
207 unsigned long *qpn) in alloc_qpn_with_bankid() argument
223 *qpn = (id << 3) | bankid; in alloc_qpn_with_bankid()
254 hr_qp->qpn = num; in alloc_qpn()
289 if (!hr_qp->qpn) in hns_roce_qp_store()
292 ret = xa_err(xa_store_irq(xa, hr_qp->qpn, hr_qp, GFP_KERNEL)); in hns_roce_qp_store()
309 if (!hr_qp->qpn) in alloc_qpc()
[all …]
/linux/include/linux/mlx4/
H A Ddevice.h757 int qpn; member
924 __be32 qpn; member
1052 static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) in mlx4_is_qp_reserved() argument
1054 return (qpn < dev->phys_caps.base_sqpn + 8 + in mlx4_is_qp_reserved()
1056 qpn >= dev->phys_caps.base_sqpn) || in mlx4_is_qp_reserved()
1057 (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]); in mlx4_is_qp_reserved()
1060 static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) in mlx4_is_guest_proxy() argument
1064 if (qpn >= guest_proxy_base && qpn < guest_proxy_base + 8) in mlx4_is_guest_proxy()
1155 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
1282 u32 qpn; member
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/fpga/
H A Dconn.c168 ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.qpn << 8)); in mlx5_fpga_conn_post_send()
242 MLX5_SET(mkc, mkc, qpn, 0xffffff); in mlx5_fpga_conn_create_mkey()
589 conn->qp.qpn = MLX5_GET(create_qp_out, out, qpn); in mlx5_fpga_conn_create_qp()
590 mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.qpn); in mlx5_fpga_conn_create_qp()
647 MLX5_SET(destroy_qp_in, in, qpn, conn->qp.qpn); in mlx5_fpga_conn_destroy_qp()
662 mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.qpn); in mlx5_fpga_conn_reset_qp()
665 MLX5_SET(qp_2rst_in, in, qpn, conn->qp.qpn); in mlx5_fpga_conn_reset_qp()
677 mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.qpn); in mlx5_fpga_conn_init_qp()
691 MLX5_SET(rst2init_qp_in, in, qpn, conn->qp.qpn); in mlx5_fpga_conn_init_qp()
726 MLX5_SET(init2rtr_qp_in, in, qpn, conn->qp.qpn); in mlx5_fpga_conn_rtr_qp()
[all …]

123456