1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #ifndef MLX5_CORE_CQ_H 29 #define MLX5_CORE_CQ_H 30 31 #include <rdma/ib_verbs.h> 32 #include <dev/mlx5/driver.h> 33 #include <dev/mlx5/mlx5_ifc.h> 34 35 36 struct mlx5_core_cq { 37 u32 cqn; 38 int cqe_sz; 39 __be32 *set_ci_db; 40 __be32 *arm_db; 41 atomic_t refcount; 42 struct completion free; 43 unsigned vector; 44 int irqn; 45 void (*comp) (struct mlx5_core_cq *); 46 void (*event) (struct mlx5_core_cq *, int); 47 struct mlx5_uar *uar; 48 u32 cons_index; 49 unsigned arm_sn; 50 struct mlx5_rsc_debug *dbg; 51 int pid; 52 int reset_notify_added; 53 struct list_head reset_notify; 54 }; 55 56 57 enum { 58 MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR = 0x01, 59 MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR = 0x02, 60 MLX5_CQE_SYNDROME_LOCAL_PROT_ERR = 0x04, 61 MLX5_CQE_SYNDROME_WR_FLUSH_ERR = 0x05, 62 MLX5_CQE_SYNDROME_MW_BIND_ERR = 0x06, 63 MLX5_CQE_SYNDROME_BAD_RESP_ERR = 0x10, 64 MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR = 0x11, 65 MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, 66 MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR = 0x13, 67 MLX5_CQE_SYNDROME_REMOTE_OP_ERR = 0x14, 68 MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR = 0x15, 69 MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR = 0x16, 70 MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR = 0x22, 71 }; 72 73 enum { 74 MLX5_CQE_OWNER_MASK = 1, 75 MLX5_CQE_REQ = 0, 76 MLX5_CQE_RESP_WR_IMM = 1, 77 MLX5_CQE_RESP_SEND = 2, 78 MLX5_CQE_RESP_SEND_IMM = 3, 79 MLX5_CQE_RESP_SEND_INV = 4, 80 MLX5_CQE_RESIZE_CQ = 5, 81 MLX5_CQE_SIG_ERR = 12, 82 MLX5_CQE_REQ_ERR = 13, 83 MLX5_CQE_RESP_ERR = 14, 84 MLX5_CQE_INVALID = 15, 85 }; 86 87 enum { 88 MLX5_CQ_MODIFY_PERIOD = 1 << 0, 89 MLX5_CQ_MODIFY_COUNT = 1 << 1, 90 MLX5_CQ_MODIFY_OVERRUN = 1 << 2, 91 MLX5_CQ_MODIFY_PERIOD_MODE = 1 << 4, 92 }; 93 94 enum { 95 MLX5_CQ_OPMOD_RESIZE = 1, 96 MLX5_MODIFY_CQ_MASK_LOG_SIZE = 1 << 0, 97 MLX5_MODIFY_CQ_MASK_PG_OFFSET = 1 << 1, 98 MLX5_MODIFY_CQ_MASK_PG_SIZE = 1 << 2, 99 }; 100 101 struct mlx5_cq_modify_params { 102 int type; 103 union { 104 struct { 105 u32 page_offset; 106 u8 log_cq_size; 107 } resize; 108 109 struct { 110 } moder; 111 112 struct { 113 } mapping; 114 } params; 115 }; 116 117 static inline int cqe_sz_to_mlx_sz(u8 size) 118 { 119 return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128; 120 } 121 122 static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) 123 { 124 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); 125 } 126 127 enum { 128 MLX5_CQ_DB_REQ_NOT_SOL = 1 << 24, 129 MLX5_CQ_DB_REQ_NOT = 0 << 24 130 }; 131 132 static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd, 133 void __iomem *uar_page, 134 spinlock_t *doorbell_lock, 135 u32 cons_index) 136 { 137 __be32 doorbell[2]; 138 u32 sn; 139 u32 ci; 140 141 sn = cq->arm_sn & 3; 142 ci = cons_index & 0xffffff; 143 144 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); 145 146 /* Make sure that the doorbell record in host memory is 147 * written before ringing the doorbell via PCI MMIO. 148 */ 149 wmb(); 150 151 doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci); 152 doorbell[1] = cpu_to_be32(cq->cqn); 153 154 mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, doorbell_lock); 155 } 156 157 int mlx5_init_cq_table(struct mlx5_core_dev *dev); 158 void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev); 159 int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 160 u32 *in, int inlen); 161 int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); 162 int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 163 u32 *out, int outlen); 164 int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, 165 u32 *in, int inlen); 166 int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev, 167 struct mlx5_core_cq *cq, u16 cq_period, 168 u16 cq_max_count); 169 int mlx5_core_modify_cq_moderation_mode(struct mlx5_core_dev *dev, 170 struct mlx5_core_cq *cq, 171 u16 cq_period, 172 u16 cq_max_count, 173 u8 cq_mode); 174 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); 175 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq); 176 177 #endif /* MLX5_CORE_CQ_H */ 178