1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 * 33 * $Id: mthca_provider.h 1349 2004-12-16 21:09:43Z roland $ 34 */ 35 36 #ifndef MTHCA_PROVIDER_H 37 #define MTHCA_PROVIDER_H 38 39 #include <ib_verbs.h> 40 #include <ib_pack.h> 41 42 #define MTHCA_MPT_FLAG_ATOMIC (1 << 14) 43 #define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13) 44 #define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12) 45 #define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11) 46 #define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10) 47 48 struct mthca_buf_list { 49 void *buf; 50 DECLARE_PCI_UNMAP_ADDR(mapping) 51 }; 52 53 struct mthca_uar { 54 unsigned long pfn; 55 int index; 56 }; 57 58 struct mthca_user_db_table; 59 60 struct mthca_ucontext { 61 struct ib_ucontext ibucontext; 62 struct mthca_uar uar; 63 struct mthca_user_db_table *db_tab; 64 }; 65 66 struct mthca_mtt; 67 68 struct mthca_mr { 69 struct ib_mr ibmr; 70 struct mthca_mtt *mtt; 71 }; 72 73 struct mthca_fmr { 74 struct ib_fmr ibmr; 75 struct ib_fmr_attr attr; 76 struct mthca_mtt *mtt; 77 int maps; 78 union { 79 struct { 80 struct mthca_mpt_entry __iomem *mpt; 81 u64 __iomem *mtts; 82 } tavor; 83 struct { 84 struct mthca_mpt_entry *mpt; 85 __be64 *mtts; 86 } arbel; 87 } mem; 88 }; 89 90 struct mthca_pd { 91 struct ib_pd ibpd; 92 u32 pd_num; 93 atomic_t sqp_count; 94 struct mthca_mr ntmr; 95 int privileged; 96 }; 97 98 struct mthca_eq { 99 struct mthca_dev *dev; 100 int eqn; 101 u32 eqn_mask; 102 u32 cons_index; 103 u16 msi_x_vector; 104 u16 msi_x_entry; 105 int have_irq; 106 int nent; 107 struct mthca_buf_list *page_list; 108 struct mthca_mr mr; 109 }; 110 111 struct mthca_av; 112 113 enum mthca_ah_type { 114 MTHCA_AH_ON_HCA, 115 MTHCA_AH_PCI_POOL, 116 MTHCA_AH_KMALLOC 117 }; 118 119 struct mthca_ah { 120 struct ib_ah ibah; 121 enum mthca_ah_type type; 122 u32 key; 123 struct mthca_av *av; 124 dma_addr_t avdma; 125 }; 126 127 /* 128 * Quick description of our CQ/QP locking scheme: 129 * 130 * We have one global lock that protects dev->cq/qp_table. Each 131 * struct mthca_cq/qp also has its own lock. An individual qp lock 132 * may be taken inside of an individual cq lock. Both cqs attached to 133 * a qp may be locked, with the send cq locked first. No other 134 * nesting should be done. 135 * 136 * Each struct mthca_cq/qp also has an atomic_t ref count. The 137 * pointer from the cq/qp_table to the struct counts as one reference. 138 * This reference also is good for access through the consumer API, so 139 * modifying the CQ/QP etc doesn't need to take another reference. 140 * Access because of a completion being polled does need a reference. 141 * 142 * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the 143 * destroy function to sleep on. 144 * 145 * This means that access from the consumer API requires nothing but 146 * taking the struct's lock. 147 * 148 * Access because of a completion event should go as follows: 149 * - lock cq/qp_table and look up struct 150 * - increment ref count in struct 151 * - drop cq/qp_table lock 152 * - lock struct, do your thing, and unlock struct 153 * - decrement ref count; if zero, wake up waiters 154 * 155 * To destroy a CQ/QP, we can do the following: 156 * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock 157 * - decrement ref count 158 * - wait_event until ref count is zero 159 * 160 * It is the consumer's responsibilty to make sure that no QP 161 * operations (WQE posting or state modification) are pending when the 162 * QP is destroyed. Also, the consumer must make sure that calls to 163 * qp_modify are serialized. 164 * 165 * Possible optimizations (wait for profile data to see if/where we 166 * have locks bouncing between CPUs): 167 * - split cq/qp table lock into n separate (cache-aligned) locks, 168 * indexed (say) by the page in the table 169 * - split QP struct lock into three (one for common info, one for the 170 * send queue and one for the receive queue) 171 */ 172 173 struct mthca_cq { 174 struct ib_cq ibcq; 175 spinlock_t lock; 176 atomic_t refcount; 177 int cqn; 178 u32 cons_index; 179 int is_direct; 180 int is_kernel; 181 182 /* Next fields are Arbel only */ 183 int set_ci_db_index; 184 u32 *set_ci_db; 185 int arm_db_index; 186 u32 *arm_db; 187 int arm_sn; 188 189 union { 190 struct mthca_buf_list direct; 191 struct mthca_buf_list *page_list; 192 } queue; 193 struct mthca_mr mr; 194 wait_queue_head_t wait; 195 }; 196 197 struct mthca_wq { 198 spinlock_t lock; 199 int max; 200 unsigned next_ind; 201 unsigned last_comp; 202 unsigned head; 203 unsigned tail; 204 void *last; 205 int max_gs; 206 int wqe_shift; 207 208 int db_index; /* Arbel only */ 209 u32 *db; 210 }; 211 212 struct mthca_qp { 213 struct ib_qp ibqp; 214 atomic_t refcount; 215 u32 qpn; 216 int is_direct; 217 u8 transport; 218 u8 state; 219 u8 atomic_rd_en; 220 u8 resp_depth; 221 222 struct mthca_mr mr; 223 224 struct mthca_wq rq; 225 struct mthca_wq sq; 226 enum ib_sig_type sq_policy; 227 int send_wqe_offset; 228 229 u64 *wrid; 230 union { 231 struct mthca_buf_list direct; 232 struct mthca_buf_list *page_list; 233 } queue; 234 235 wait_queue_head_t wait; 236 }; 237 238 struct mthca_sqp { 239 struct mthca_qp qp; 240 int port; 241 int pkey_index; 242 u32 qkey; 243 u32 send_psn; 244 struct ib_ud_header ud_header; 245 int header_buf_size; 246 void *header_buf; 247 dma_addr_t header_dma; 248 }; 249 250 static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext) 251 { 252 return container_of(ibucontext, struct mthca_ucontext, ibucontext); 253 } 254 255 static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr) 256 { 257 return container_of(ibmr, struct mthca_fmr, ibmr); 258 } 259 260 static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr) 261 { 262 return container_of(ibmr, struct mthca_mr, ibmr); 263 } 264 265 static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd) 266 { 267 return container_of(ibpd, struct mthca_pd, ibpd); 268 } 269 270 static inline struct mthca_ah *to_mah(struct ib_ah *ibah) 271 { 272 return container_of(ibah, struct mthca_ah, ibah); 273 } 274 275 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) 276 { 277 return container_of(ibcq, struct mthca_cq, ibcq); 278 } 279 280 static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) 281 { 282 return container_of(ibqp, struct mthca_qp, ibqp); 283 } 284 285 static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp) 286 { 287 return container_of(qp, struct mthca_sqp, qp); 288 } 289 290 #endif /* MTHCA_PROVIDER_H */ 291