xref: /freebsd/sys/dev/mlx5/cq.h (revision 32c7dde816fd1d738a48af82bf490307cb7b4739)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #ifndef MLX5_CORE_CQ_H
29 #define MLX5_CORE_CQ_H
30 
31 #include <rdma/ib_verbs.h>
32 #include <dev/mlx5/driver.h>
33 #include <dev/mlx5/mlx5_ifc.h>
34 
35 struct mlx5_eqe;
36 struct mlx5_core_cq {
37 	u32			cqn;
38 	int			cqe_sz;
39 	__be32		       *set_ci_db;
40 	__be32		       *arm_db;
41 	unsigned		vector;
42 	int			irqn;
43 	void (*comp)		(struct mlx5_core_cq *, struct mlx5_eqe *);
44 	void (*event)		(struct mlx5_core_cq *, int);
45 	struct mlx5_uars_page  *uar;
46 	u32			cons_index;
47 	unsigned		arm_sn;
48 	struct mlx5_rsc_debug	*dbg;
49 	int			pid;
50 	int			reset_notify_added;
51 	struct list_head	reset_notify;
52 };
53 
54 
55 enum {
56 	MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR		= 0x01,
57 	MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR		= 0x02,
58 	MLX5_CQE_SYNDROME_LOCAL_PROT_ERR		= 0x04,
59 	MLX5_CQE_SYNDROME_WR_FLUSH_ERR			= 0x05,
60 	MLX5_CQE_SYNDROME_MW_BIND_ERR			= 0x06,
61 	MLX5_CQE_SYNDROME_BAD_RESP_ERR			= 0x10,
62 	MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR		= 0x11,
63 	MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR		= 0x12,
64 	MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR		= 0x13,
65 	MLX5_CQE_SYNDROME_REMOTE_OP_ERR			= 0x14,
66 	MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR	= 0x15,
67 	MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR		= 0x16,
68 	MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR		= 0x22,
69 };
70 
71 enum {
72 	MLX5_CQE_OWNER_MASK	= 1,
73 	MLX5_CQE_REQ		= 0,
74 	MLX5_CQE_RESP_WR_IMM	= 1,
75 	MLX5_CQE_RESP_SEND	= 2,
76 	MLX5_CQE_RESP_SEND_IMM	= 3,
77 	MLX5_CQE_RESP_SEND_INV	= 4,
78 	MLX5_CQE_RESIZE_CQ	= 5,
79 	MLX5_CQE_SIG_ERR	= 12,
80 	MLX5_CQE_REQ_ERR	= 13,
81 	MLX5_CQE_RESP_ERR	= 14,
82 	MLX5_CQE_INVALID	= 15,
83 };
84 
85 enum {
86 	MLX5_CQ_MODIFY_PERIOD	= 1 << 0,
87 	MLX5_CQ_MODIFY_COUNT	= 1 << 1,
88 	MLX5_CQ_MODIFY_OVERRUN	= 1 << 2,
89 	MLX5_CQ_MODIFY_EQN	= 1 << 3,
90 	MLX5_CQ_MODIFY_PERIOD_MODE = 1 << 4,
91 };
92 
93 enum {
94 	MLX5_CQ_OPMOD_RESIZE		= 1,
95 	MLX5_MODIFY_CQ_MASK_LOG_SIZE	= 1 << 0,
96 	MLX5_MODIFY_CQ_MASK_PG_OFFSET	= 1 << 1,
97 	MLX5_MODIFY_CQ_MASK_PG_SIZE	= 1 << 2,
98 };
99 
100 struct mlx5_cq_modify_params {
101 	int	type;
102 	union {
103 		struct {
104 			u32	page_offset;
105 			u8	log_cq_size;
106 		} resize;
107 
108 		struct {
109 		} moder;
110 
111 		struct {
112 		} mapping;
113 	} params;
114 };
115 
116 static inline int cqe_sz_to_mlx_sz(u8 size)
117 {
118 	return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
119 }
120 
121 static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
122 {
123 	*cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
124 }
125 
126 enum {
127 	MLX5_CQ_DB_REQ_NOT_SOL		= 1 << 24,
128 	MLX5_CQ_DB_REQ_NOT		= 0 << 24
129 };
130 
131 static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
132 			       void __iomem *uar_page,
133 			       spinlock_t *doorbell_lock,
134 			       u32 cons_index)
135 {
136 	__be32 doorbell[2];
137 	u32 sn;
138 	u32 ci;
139 
140 	sn = cq->arm_sn & 3;
141 	ci = cons_index & 0xffffff;
142 
143 	*cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
144 
145 	/* Make sure that the doorbell record in host memory is
146 	 * written before ringing the doorbell via PCI MMIO.
147 	 */
148 	wmb();
149 
150 	doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
151 	doorbell[1] = cpu_to_be32(cq->cqn);
152 
153 	mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock);
154 }
155 
156 int mlx5_init_cq_table(struct mlx5_core_dev *dev);
157 void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
158 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
159 			u32 *in, int inlen, u32 *out, int outlen);
160 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
161 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
162 		       u32 *out, int outlen);
163 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
164 			u32 *in, int inlen);
165 int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
166 				   struct mlx5_core_cq *cq, u16 cq_period,
167 				   u16 cq_max_count);
168 int mlx5_core_modify_cq_moderation_mode(struct mlx5_core_dev *dev,
169 					struct mlx5_core_cq *cq,
170 					u16 cq_period,
171 					u16 cq_max_count,
172 					u8 cq_mode);
173 int mlx5_core_modify_cq_by_mask(struct mlx5_core_dev *,
174 				struct mlx5_core_cq *, u32 mask,
175 				u16 cq_period, u16 cq_max_count,
176 				u8 cq_mode, u8 cq_eqn);
177 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
178 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
179 
180 #endif /* MLX5_CORE_CQ_H */
181