1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #ifndef MTHCA_PROVIDER_H 36 #define MTHCA_PROVIDER_H 37 38 #include <rdma/ib_verbs.h> 39 #include <rdma/ib_pack.h> 40 41 #include <linux/wait.h> 42 43 #define MTHCA_MPT_FLAG_ATOMIC (1 << 14) 44 #define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13) 45 #define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12) 46 #define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11) 47 #define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10) 48 49 struct mthca_buf_list { 50 void *buf; 51 DEFINE_DMA_UNMAP_ADDR(mapping); 52 }; 53 54 union mthca_buf { 55 struct mthca_buf_list direct; 56 struct mthca_buf_list *page_list; 57 }; 58 59 struct mthca_uar { 60 unsigned long pfn; 61 int index; 62 }; 63 64 struct mthca_user_db_table; 65 66 struct mthca_ucontext { 67 struct ib_ucontext ibucontext; 68 struct mthca_uar uar; 69 struct mthca_user_db_table *db_tab; 70 int reg_mr_warned; 71 }; 72 73 struct mthca_mtt; 74 75 struct mthca_mr { 76 struct ib_mr ibmr; 77 struct ib_umem *umem; 78 struct mthca_mtt *mtt; 79 }; 80 81 struct mthca_fmr { 82 struct ib_fmr ibmr; 83 struct ib_fmr_attr attr; 84 struct mthca_mtt *mtt; 85 int maps; 86 union { 87 struct { 88 struct mthca_mpt_entry __iomem *mpt; 89 u64 __iomem *mtts; 90 } tavor; 91 struct { 92 struct mthca_mpt_entry *mpt; 93 __be64 *mtts; 94 dma_addr_t dma_handle; 95 } arbel; 96 } mem; 97 }; 98 99 struct mthca_pd { 100 struct ib_pd ibpd; 101 u32 pd_num; 102 atomic_t sqp_count; 103 struct mthca_mr ntmr; 104 int privileged; 105 }; 106 107 struct mthca_eq { 108 struct mthca_dev *dev; 109 int eqn; 110 u32 eqn_mask; 111 u32 cons_index; 112 u16 msi_x_vector; 113 u16 msi_x_entry; 114 int have_irq; 115 int nent; 116 struct mthca_buf_list *page_list; 117 struct mthca_mr mr; 118 char irq_name[IB_DEVICE_NAME_MAX]; 119 }; 120 121 struct mthca_av; 122 123 enum mthca_ah_type { 124 MTHCA_AH_ON_HCA, 125 MTHCA_AH_PCI_POOL, 126 MTHCA_AH_KMALLOC 127 }; 128 129 struct mthca_ah { 130 struct ib_ah ibah; 131 enum mthca_ah_type type; 132 u32 key; 133 struct mthca_av *av; 134 dma_addr_t avdma; 135 }; 136 137 /* 138 * Quick description of our CQ/QP locking scheme: 139 * 140 * We have one global lock that protects dev->cq/qp_table. Each 141 * struct mthca_cq/qp also has its own lock. An individual qp lock 142 * may be taken inside of an individual cq lock. Both cqs attached to 143 * a qp may be locked, with the cq with the lower cqn locked first. 144 * No other nesting should be done. 145 * 146 * Each struct mthca_cq/qp also has an ref count, protected by the 147 * corresponding table lock. The pointer from the cq/qp_table to the 148 * struct counts as one reference. This reference also is good for 149 * access through the consumer API, so modifying the CQ/QP etc doesn't 150 * need to take another reference. Access to a QP because of a 151 * completion being polled does not need a reference either. 152 * 153 * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the 154 * destroy function to sleep on. 155 * 156 * This means that access from the consumer API requires nothing but 157 * taking the struct's lock. 158 * 159 * Access because of a completion event should go as follows: 160 * - lock cq/qp_table and look up struct 161 * - increment ref count in struct 162 * - drop cq/qp_table lock 163 * - lock struct, do your thing, and unlock struct 164 * - decrement ref count; if zero, wake up waiters 165 * 166 * To destroy a CQ/QP, we can do the following: 167 * - lock cq/qp_table 168 * - remove pointer and decrement ref count 169 * - unlock cq/qp_table lock 170 * - wait_event until ref count is zero 171 * 172 * It is the consumer's responsibilty to make sure that no QP 173 * operations (WQE posting or state modification) are pending when a 174 * QP is destroyed. Also, the consumer must make sure that calls to 175 * qp_modify are serialized. Similarly, the consumer is responsible 176 * for ensuring that no CQ resize operations are pending when a CQ 177 * is destroyed. 178 * 179 * Possible optimizations (wait for profile data to see if/where we 180 * have locks bouncing between CPUs): 181 * - split cq/qp table lock into n separate (cache-aligned) locks, 182 * indexed (say) by the page in the table 183 * - split QP struct lock into three (one for common info, one for the 184 * send queue and one for the receive queue) 185 */ 186 187 struct mthca_cq_buf { 188 union mthca_buf queue; 189 struct mthca_mr mr; 190 int is_direct; 191 }; 192 193 struct mthca_cq_resize { 194 struct mthca_cq_buf buf; 195 int cqe; 196 enum { 197 CQ_RESIZE_ALLOC, 198 CQ_RESIZE_READY, 199 CQ_RESIZE_SWAPPED 200 } state; 201 }; 202 203 struct mthca_cq { 204 struct ib_cq ibcq; 205 spinlock_t lock; 206 int refcount; 207 int cqn; 208 u32 cons_index; 209 struct mthca_cq_buf buf; 210 struct mthca_cq_resize *resize_buf; 211 int is_kernel; 212 213 /* Next fields are Arbel only */ 214 int set_ci_db_index; 215 __be32 *set_ci_db; 216 int arm_db_index; 217 __be32 *arm_db; 218 int arm_sn; 219 220 wait_queue_head_t wait; 221 struct mutex mutex; 222 }; 223 224 struct mthca_srq { 225 struct ib_srq ibsrq; 226 spinlock_t lock; 227 int refcount; 228 int srqn; 229 int max; 230 int max_gs; 231 int wqe_shift; 232 int first_free; 233 int last_free; 234 u16 counter; /* Arbel only */ 235 int db_index; /* Arbel only */ 236 __be32 *db; /* Arbel only */ 237 void *last; 238 239 int is_direct; 240 u64 *wrid; 241 union mthca_buf queue; 242 struct mthca_mr mr; 243 244 wait_queue_head_t wait; 245 struct mutex mutex; 246 }; 247 248 struct mthca_wq { 249 spinlock_t lock; 250 int max; 251 unsigned next_ind; 252 unsigned last_comp; 253 unsigned head; 254 unsigned tail; 255 void *last; 256 int max_gs; 257 int wqe_shift; 258 259 int db_index; /* Arbel only */ 260 __be32 *db; 261 }; 262 263 struct mthca_qp { 264 struct ib_qp ibqp; 265 int refcount; 266 u32 qpn; 267 int is_direct; 268 u8 port; /* for SQP and memfree use only */ 269 u8 alt_port; /* for memfree use only */ 270 u8 transport; 271 u8 state; 272 u8 atomic_rd_en; 273 u8 resp_depth; 274 275 struct mthca_mr mr; 276 277 struct mthca_wq rq; 278 struct mthca_wq sq; 279 enum ib_sig_type sq_policy; 280 int send_wqe_offset; 281 int max_inline_data; 282 283 u64 *wrid; 284 union mthca_buf queue; 285 286 wait_queue_head_t wait; 287 struct mutex mutex; 288 }; 289 290 struct mthca_sqp { 291 struct mthca_qp qp; 292 int pkey_index; 293 u32 qkey; 294 u32 send_psn; 295 struct ib_ud_header ud_header; 296 int header_buf_size; 297 void *header_buf; 298 dma_addr_t header_dma; 299 }; 300 301 static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext) 302 { 303 return container_of(ibucontext, struct mthca_ucontext, ibucontext); 304 } 305 306 static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr) 307 { 308 return container_of(ibmr, struct mthca_fmr, ibmr); 309 } 310 311 static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr) 312 { 313 return container_of(ibmr, struct mthca_mr, ibmr); 314 } 315 316 static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd) 317 { 318 return container_of(ibpd, struct mthca_pd, ibpd); 319 } 320 321 static inline struct mthca_ah *to_mah(struct ib_ah *ibah) 322 { 323 return container_of(ibah, struct mthca_ah, ibah); 324 } 325 326 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) 327 { 328 return container_of(ibcq, struct mthca_cq, ibcq); 329 } 330 331 static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq) 332 { 333 return container_of(ibsrq, struct mthca_srq, ibsrq); 334 } 335 336 static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) 337 { 338 return container_of(ibqp, struct mthca_qp, ibqp); 339 } 340 341 static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp) 342 { 343 return container_of(qp, struct mthca_sqp, qp); 344 } 345 346 #endif /* MTHCA_PROVIDER_H */ 347