xref: /linux/drivers/infiniband/hw/mthca/mthca_av.c (revision 0526b56cbc3c489642bd6a5fe4b718dea7ef0ee8)
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/string.h>
35 #include <linux/slab.h>
36 
37 #include <rdma/ib_verbs.h>
38 #include <rdma/ib_cache.h>
39 
40 #include "mthca_dev.h"
41 
42 enum {
43       MTHCA_RATE_TAVOR_FULL   = 0,
44       MTHCA_RATE_TAVOR_1X     = 1,
45       MTHCA_RATE_TAVOR_4X     = 2,
46       MTHCA_RATE_TAVOR_1X_DDR = 3
47 };
48 
49 enum {
50       MTHCA_RATE_MEMFREE_FULL    = 0,
51       MTHCA_RATE_MEMFREE_QUARTER = 1,
52       MTHCA_RATE_MEMFREE_EIGHTH  = 2,
53       MTHCA_RATE_MEMFREE_HALF    = 3
54 };
55 
56 struct mthca_av {
57 	__be32 port_pd;
58 	u8     reserved1;
59 	u8     g_slid;
60 	__be16 dlid;
61 	u8     reserved2;
62 	u8     gid_index;
63 	u8     msg_sr;
64 	u8     hop_limit;
65 	__be32 sl_tclass_flowlabel;
66 	__be32 dgid[4];
67 };
68 
69 static enum ib_rate memfree_rate_to_ib(u8 mthca_rate, u8 port_rate)
70 {
71 	switch (mthca_rate) {
72 	case MTHCA_RATE_MEMFREE_EIGHTH:
73 		return mult_to_ib_rate(port_rate >> 3);
74 	case MTHCA_RATE_MEMFREE_QUARTER:
75 		return mult_to_ib_rate(port_rate >> 2);
76 	case MTHCA_RATE_MEMFREE_HALF:
77 		return mult_to_ib_rate(port_rate >> 1);
78 	case MTHCA_RATE_MEMFREE_FULL:
79 	default:
80 		return mult_to_ib_rate(port_rate);
81 	}
82 }
83 
84 static enum ib_rate tavor_rate_to_ib(u8 mthca_rate, u8 port_rate)
85 {
86 	switch (mthca_rate) {
87 	case MTHCA_RATE_TAVOR_1X:     return IB_RATE_2_5_GBPS;
88 	case MTHCA_RATE_TAVOR_1X_DDR: return IB_RATE_5_GBPS;
89 	case MTHCA_RATE_TAVOR_4X:     return IB_RATE_10_GBPS;
90 	default:		      return mult_to_ib_rate(port_rate);
91 	}
92 }
93 
94 enum ib_rate mthca_rate_to_ib(struct mthca_dev *dev, u8 mthca_rate, u32 port)
95 {
96 	if (mthca_is_memfree(dev)) {
97 		/* Handle old Arbel FW */
98 		if (dev->limits.stat_rate_support == 0x3 && mthca_rate)
99 			return IB_RATE_2_5_GBPS;
100 
101 		return memfree_rate_to_ib(mthca_rate, dev->rate[port - 1]);
102 	} else
103 		return tavor_rate_to_ib(mthca_rate, dev->rate[port - 1]);
104 }
105 
106 static u8 ib_rate_to_memfree(u8 req_rate, u8 cur_rate)
107 {
108 	if (cur_rate <= req_rate)
109 		return 0;
110 
111 	/*
112 	 * Inter-packet delay (IPD) to get from rate X down to a rate
113 	 * no more than Y is (X - 1) / Y.
114 	 */
115 	switch ((cur_rate - 1) / req_rate) {
116 	case 0:	 return MTHCA_RATE_MEMFREE_FULL;
117 	case 1:	 return MTHCA_RATE_MEMFREE_HALF;
118 	case 2:
119 	case 3:	 return MTHCA_RATE_MEMFREE_QUARTER;
120 	default: return MTHCA_RATE_MEMFREE_EIGHTH;
121 	}
122 }
123 
124 static u8 ib_rate_to_tavor(u8 static_rate)
125 {
126 	switch (static_rate) {
127 	case IB_RATE_2_5_GBPS: return MTHCA_RATE_TAVOR_1X;
128 	case IB_RATE_5_GBPS:   return MTHCA_RATE_TAVOR_1X_DDR;
129 	case IB_RATE_10_GBPS:  return MTHCA_RATE_TAVOR_4X;
130 	default:	       return MTHCA_RATE_TAVOR_FULL;
131 	}
132 }
133 
134 u8 mthca_get_rate(struct mthca_dev *dev, int static_rate, u32 port)
135 {
136 	u8 rate;
137 
138 	if (!static_rate || ib_rate_to_mult(static_rate) >= dev->rate[port - 1])
139 		return 0;
140 
141 	if (mthca_is_memfree(dev))
142 		rate = ib_rate_to_memfree(ib_rate_to_mult(static_rate),
143 					  dev->rate[port - 1]);
144 	else
145 		rate = ib_rate_to_tavor(static_rate);
146 
147 	if (!(dev->limits.stat_rate_support & (1 << rate)))
148 		rate = 1;
149 
150 	return rate;
151 }
152 
153 int mthca_create_ah(struct mthca_dev *dev,
154 		    struct mthca_pd *pd,
155 		    struct rdma_ah_attr *ah_attr,
156 		    struct mthca_ah *ah)
157 {
158 	u32 index = -1;
159 	struct mthca_av *av = NULL;
160 
161 	ah->type = MTHCA_AH_PCI_POOL;
162 
163 	if (mthca_is_memfree(dev)) {
164 		ah->av   = kmalloc(sizeof *ah->av, GFP_ATOMIC);
165 		if (!ah->av)
166 			return -ENOMEM;
167 
168 		ah->type = MTHCA_AH_KMALLOC;
169 		av       = ah->av;
170 	} else if (!atomic_read(&pd->sqp_count) &&
171 		 !(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
172 		index = mthca_alloc(&dev->av_table.alloc);
173 
174 		/* fall back to allocate in host memory */
175 		if (index == -1)
176 			goto on_hca_fail;
177 
178 		av = kmalloc(sizeof *av, GFP_ATOMIC);
179 		if (!av)
180 			goto on_hca_fail;
181 
182 		ah->type = MTHCA_AH_ON_HCA;
183 		ah->avdma  = dev->av_table.ddr_av_base +
184 			index * MTHCA_AV_SIZE;
185 	}
186 
187 on_hca_fail:
188 	if (ah->type == MTHCA_AH_PCI_POOL) {
189 		ah->av = dma_pool_zalloc(dev->av_table.pool,
190 					 GFP_ATOMIC, &ah->avdma);
191 		if (!ah->av)
192 			return -ENOMEM;
193 
194 		av = ah->av;
195 	}
196 
197 	ah->key = pd->ntmr.ibmr.lkey;
198 
199 	av->port_pd = cpu_to_be32(pd->pd_num |
200 				  (rdma_ah_get_port_num(ah_attr) << 24));
201 	av->g_slid  = rdma_ah_get_path_bits(ah_attr);
202 	av->dlid    = cpu_to_be16(rdma_ah_get_dlid(ah_attr));
203 	av->msg_sr  = (3 << 4) | /* 2K message */
204 		mthca_get_rate(dev, rdma_ah_get_static_rate(ah_attr),
205 			       rdma_ah_get_port_num(ah_attr));
206 	av->sl_tclass_flowlabel = cpu_to_be32(rdma_ah_get_sl(ah_attr) << 28);
207 	if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
208 		const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
209 
210 		av->g_slid |= 0x80;
211 		av->gid_index = (rdma_ah_get_port_num(ah_attr) - 1) *
212 				  dev->limits.gid_table_len +
213 				  grh->sgid_index;
214 		av->hop_limit = grh->hop_limit;
215 		av->sl_tclass_flowlabel |=
216 			cpu_to_be32((grh->traffic_class << 20) |
217 				    grh->flow_label);
218 		memcpy(av->dgid, grh->dgid.raw, 16);
219 	} else {
220 		/* Arbel workaround -- low byte of GID must be 2 */
221 		av->dgid[3] = cpu_to_be32(2);
222 	}
223 
224 	if (0) {
225 		int j;
226 
227 		mthca_dbg(dev, "Created UDAV at %p/%08lx:\n",
228 			  av, (unsigned long) ah->avdma);
229 		for (j = 0; j < 8; ++j)
230 			printk(KERN_DEBUG "  [%2x] %08x\n",
231 			       j * 4, be32_to_cpu(((__be32 *) av)[j]));
232 	}
233 
234 	if (ah->type == MTHCA_AH_ON_HCA) {
235 		memcpy_toio(dev->av_table.av_map + index * MTHCA_AV_SIZE,
236 			    av, MTHCA_AV_SIZE);
237 		kfree(av);
238 	}
239 
240 	return 0;
241 }
242 
243 int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah)
244 {
245 	switch (ah->type) {
246 	case MTHCA_AH_ON_HCA:
247 		mthca_free(&dev->av_table.alloc,
248 			   (ah->avdma - dev->av_table.ddr_av_base) /
249 			   MTHCA_AV_SIZE);
250 		break;
251 
252 	case MTHCA_AH_PCI_POOL:
253 		dma_pool_free(dev->av_table.pool, ah->av, ah->avdma);
254 		break;
255 
256 	case MTHCA_AH_KMALLOC:
257 		kfree(ah->av);
258 		break;
259 	}
260 
261 	return 0;
262 }
263 
264 int mthca_ah_grh_present(struct mthca_ah *ah)
265 {
266 	return !!(ah->av->g_slid & 0x80);
267 }
268 
269 int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
270 		  struct ib_ud_header *header)
271 {
272 	if (ah->type == MTHCA_AH_ON_HCA)
273 		return -EINVAL;
274 
275 	header->lrh.service_level   = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
276 	header->lrh.destination_lid = ah->av->dlid;
277 	header->lrh.source_lid      = cpu_to_be16(ah->av->g_slid & 0x7f);
278 	if (mthca_ah_grh_present(ah)) {
279 		header->grh.traffic_class =
280 			(be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff;
281 		header->grh.flow_label    =
282 			ah->av->sl_tclass_flowlabel & cpu_to_be32(0xfffff);
283 		header->grh.hop_limit     = ah->av->hop_limit;
284 		header->grh.source_gid = ah->ibah.sgid_attr->gid;
285 		memcpy(header->grh.destination_gid.raw,
286 		       ah->av->dgid, 16);
287 	}
288 
289 	return 0;
290 }
291 
292 int mthca_ah_query(struct ib_ah *ibah, struct rdma_ah_attr *attr)
293 {
294 	struct mthca_ah *ah   = to_mah(ibah);
295 	struct mthca_dev *dev = to_mdev(ibah->device);
296 	u32 port_num = be32_to_cpu(ah->av->port_pd) >> 24;
297 
298 	/* Only implement for MAD and memfree ah for now. */
299 	if (ah->type == MTHCA_AH_ON_HCA)
300 		return -ENOSYS;
301 
302 	memset(attr, 0, sizeof *attr);
303 	attr->type = ibah->type;
304 	rdma_ah_set_dlid(attr, be16_to_cpu(ah->av->dlid));
305 	rdma_ah_set_sl(attr, be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28);
306 	rdma_ah_set_port_num(attr, port_num);
307 	rdma_ah_set_static_rate(attr,
308 				mthca_rate_to_ib(dev, ah->av->msg_sr & 0x7,
309 						 port_num));
310 	rdma_ah_set_path_bits(attr, ah->av->g_slid & 0x7F);
311 	if (mthca_ah_grh_present(ah)) {
312 		u32 tc_fl = be32_to_cpu(ah->av->sl_tclass_flowlabel);
313 
314 		rdma_ah_set_grh(attr, NULL,
315 				tc_fl & 0xfffff,
316 				ah->av->gid_index &
317 				(dev->limits.gid_table_len - 1),
318 				ah->av->hop_limit,
319 				(tc_fl >> 20) & 0xff);
320 		rdma_ah_set_dgid_raw(attr, ah->av->dgid);
321 	}
322 
323 	return 0;
324 }
325 
326 int mthca_init_av_table(struct mthca_dev *dev)
327 {
328 	int err;
329 
330 	if (mthca_is_memfree(dev))
331 		return 0;
332 
333 	err = mthca_alloc_init(&dev->av_table.alloc,
334 			       dev->av_table.num_ddr_avs,
335 			       dev->av_table.num_ddr_avs - 1,
336 			       0);
337 	if (err)
338 		return err;
339 
340 	dev->av_table.pool = dma_pool_create("mthca_av", &dev->pdev->dev,
341 					     MTHCA_AV_SIZE,
342 					     MTHCA_AV_SIZE, 0);
343 	if (!dev->av_table.pool)
344 		goto out_free_alloc;
345 
346 	if (!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) {
347 		dev->av_table.av_map = ioremap(pci_resource_start(dev->pdev, 4) +
348 					       dev->av_table.ddr_av_base -
349 					       dev->ddr_start,
350 					       dev->av_table.num_ddr_avs *
351 					       MTHCA_AV_SIZE);
352 		if (!dev->av_table.av_map)
353 			goto out_free_pool;
354 	} else
355 		dev->av_table.av_map = NULL;
356 
357 	return 0;
358 
359  out_free_pool:
360 	dma_pool_destroy(dev->av_table.pool);
361 
362  out_free_alloc:
363 	mthca_alloc_cleanup(&dev->av_table.alloc);
364 	return -ENOMEM;
365 }
366 
367 void mthca_cleanup_av_table(struct mthca_dev *dev)
368 {
369 	if (mthca_is_memfree(dev))
370 		return;
371 
372 	if (dev->av_table.av_map)
373 		iounmap(dev->av_table.av_map);
374 	dma_pool_destroy(dev->av_table.pool);
375 	mthca_alloc_cleanup(&dev->av_table.alloc);
376 }
377