1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: mthca_provider.h 1349 2004-12-16 21:09:43Z roland $ 33 */ 34 35 #ifndef MTHCA_PROVIDER_H 36 #define MTHCA_PROVIDER_H 37 38 #include <ib_verbs.h> 39 #include <ib_pack.h> 40 41 #define MTHCA_MPT_FLAG_ATOMIC (1 << 14) 42 #define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13) 43 #define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12) 44 #define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11) 45 #define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10) 46 47 struct mthca_buf_list { 48 void *buf; 49 DECLARE_PCI_UNMAP_ADDR(mapping) 50 }; 51 52 struct mthca_uar { 53 unsigned long pfn; 54 int index; 55 }; 56 57 struct mthca_mr { 58 struct ib_mr ibmr; 59 int order; 60 u32 first_seg; 61 }; 62 63 struct mthca_fmr { 64 struct ib_fmr ibmr; 65 struct ib_fmr_attr attr; 66 int order; 67 u32 first_seg; 68 int maps; 69 union { 70 struct { 71 struct mthca_mpt_entry __iomem *mpt; 72 u64 __iomem *mtts; 73 } tavor; 74 struct { 75 struct mthca_mpt_entry *mpt; 76 __be64 *mtts; 77 } arbel; 78 } mem; 79 }; 80 81 struct mthca_pd { 82 struct ib_pd ibpd; 83 u32 pd_num; 84 atomic_t sqp_count; 85 struct mthca_mr ntmr; 86 }; 87 88 struct mthca_eq { 89 struct mthca_dev *dev; 90 int eqn; 91 u32 eqn_mask; 92 u32 cons_index; 93 u16 msi_x_vector; 94 u16 msi_x_entry; 95 int have_irq; 96 int nent; 97 struct mthca_buf_list *page_list; 98 struct mthca_mr mr; 99 }; 100 101 struct mthca_av; 102 103 enum mthca_ah_type { 104 MTHCA_AH_ON_HCA, 105 MTHCA_AH_PCI_POOL, 106 MTHCA_AH_KMALLOC 107 }; 108 109 struct mthca_ah { 110 struct ib_ah ibah; 111 enum mthca_ah_type type; 112 u32 key; 113 struct mthca_av *av; 114 dma_addr_t avdma; 115 }; 116 117 /* 118 * Quick description of our CQ/QP locking scheme: 119 * 120 * We have one global lock that protects dev->cq/qp_table. Each 121 * struct mthca_cq/qp also has its own lock. An individual qp lock 122 * may be taken inside of an individual cq lock. Both cqs attached to 123 * a qp may be locked, with the send cq locked first. No other 124 * nesting should be done. 125 * 126 * Each struct mthca_cq/qp also has an atomic_t ref count. The 127 * pointer from the cq/qp_table to the struct counts as one reference. 128 * This reference also is good for access through the consumer API, so 129 * modifying the CQ/QP etc doesn't need to take another reference. 130 * Access because of a completion being polled does need a reference. 131 * 132 * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the 133 * destroy function to sleep on. 134 * 135 * This means that access from the consumer API requires nothing but 136 * taking the struct's lock. 137 * 138 * Access because of a completion event should go as follows: 139 * - lock cq/qp_table and look up struct 140 * - increment ref count in struct 141 * - drop cq/qp_table lock 142 * - lock struct, do your thing, and unlock struct 143 * - decrement ref count; if zero, wake up waiters 144 * 145 * To destroy a CQ/QP, we can do the following: 146 * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock 147 * - decrement ref count 148 * - wait_event until ref count is zero 149 * 150 * It is the consumer's responsibilty to make sure that no QP 151 * operations (WQE posting or state modification) are pending when the 152 * QP is destroyed. Also, the consumer must make sure that calls to 153 * qp_modify are serialized. 154 * 155 * Possible optimizations (wait for profile data to see if/where we 156 * have locks bouncing between CPUs): 157 * - split cq/qp table lock into n separate (cache-aligned) locks, 158 * indexed (say) by the page in the table 159 * - split QP struct lock into three (one for common info, one for the 160 * send queue and one for the receive queue) 161 */ 162 163 struct mthca_cq { 164 struct ib_cq ibcq; 165 spinlock_t lock; 166 atomic_t refcount; 167 int cqn; 168 u32 cons_index; 169 int is_direct; 170 171 /* Next fields are Arbel only */ 172 int set_ci_db_index; 173 u32 *set_ci_db; 174 int arm_db_index; 175 u32 *arm_db; 176 int arm_sn; 177 178 union { 179 struct mthca_buf_list direct; 180 struct mthca_buf_list *page_list; 181 } queue; 182 struct mthca_mr mr; 183 wait_queue_head_t wait; 184 }; 185 186 struct mthca_wq { 187 spinlock_t lock; 188 int max; 189 unsigned next_ind; 190 unsigned last_comp; 191 unsigned head; 192 unsigned tail; 193 void *last; 194 int max_gs; 195 int wqe_shift; 196 197 int db_index; /* Arbel only */ 198 u32 *db; 199 }; 200 201 struct mthca_qp { 202 struct ib_qp ibqp; 203 atomic_t refcount; 204 u32 qpn; 205 int is_direct; 206 u8 transport; 207 u8 state; 208 u8 atomic_rd_en; 209 u8 resp_depth; 210 211 struct mthca_mr mr; 212 213 struct mthca_wq rq; 214 struct mthca_wq sq; 215 enum ib_sig_type sq_policy; 216 int send_wqe_offset; 217 218 u64 *wrid; 219 union { 220 struct mthca_buf_list direct; 221 struct mthca_buf_list *page_list; 222 } queue; 223 224 wait_queue_head_t wait; 225 }; 226 227 struct mthca_sqp { 228 struct mthca_qp qp; 229 int port; 230 int pkey_index; 231 u32 qkey; 232 u32 send_psn; 233 struct ib_ud_header ud_header; 234 int header_buf_size; 235 void *header_buf; 236 dma_addr_t header_dma; 237 }; 238 239 static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr) 240 { 241 return container_of(ibmr, struct mthca_fmr, ibmr); 242 } 243 244 static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr) 245 { 246 return container_of(ibmr, struct mthca_mr, ibmr); 247 } 248 249 static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd) 250 { 251 return container_of(ibpd, struct mthca_pd, ibpd); 252 } 253 254 static inline struct mthca_ah *to_mah(struct ib_ah *ibah) 255 { 256 return container_of(ibah, struct mthca_ah, ibah); 257 } 258 259 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) 260 { 261 return container_of(ibcq, struct mthca_cq, ibcq); 262 } 263 264 static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) 265 { 266 return container_of(ibqp, struct mthca_qp, ibqp); 267 } 268 269 static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp) 270 { 271 return container_of(qp, struct mthca_sqp, qp); 272 } 273 274 #endif /* MTHCA_PROVIDER_H */ 275