xref: /linux/drivers/infiniband/hw/mlx4/ah.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <rdma/ib_addr.h>
34 #include <rdma/ib_cache.h>
35 
36 #include <linux/slab.h>
37 #include <linux/inet.h>
38 #include <linux/string.h>
39 #include <linux/mlx4/driver.h>
40 
41 #include "mlx4_ib.h"
42 
43 static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
44 				  struct mlx4_ib_ah *ah)
45 {
46 	struct mlx4_dev *dev = to_mdev(pd->device)->dev;
47 
48 	ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
49 	ah->av.ib.g_slid  = ah_attr->src_path_bits;
50 	ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
51 	if (ah_attr->ah_flags & IB_AH_GRH) {
52 		ah->av.ib.g_slid   |= 0x80;
53 		ah->av.ib.gid_index = ah_attr->grh.sgid_index;
54 		ah->av.ib.hop_limit = ah_attr->grh.hop_limit;
55 		ah->av.ib.sl_tclass_flowlabel |=
56 			cpu_to_be32((ah_attr->grh.traffic_class << 20) |
57 				    ah_attr->grh.flow_label);
58 		memcpy(ah->av.ib.dgid, ah_attr->grh.dgid.raw, 16);
59 	}
60 
61 	ah->av.ib.dlid    = cpu_to_be16(ah_attr->dlid);
62 	if (ah_attr->static_rate) {
63 		ah->av.ib.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
64 		while (ah->av.ib.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
65 		       !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
66 			--ah->av.ib.stat_rate;
67 	}
68 
69 	return &ah->ibah;
70 }
71 
72 static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
73 				    struct mlx4_ib_ah *ah)
74 {
75 	struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
76 	struct mlx4_dev *dev = ibdev->dev;
77 	int is_mcast = 0;
78 	struct in6_addr in6;
79 	u16 vlan_tag = 0xffff;
80 	union ib_gid sgid;
81 	struct ib_gid_attr gid_attr;
82 	int ret;
83 
84 	memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
85 	if (rdma_is_multicast_addr(&in6)) {
86 		is_mcast = 1;
87 		rdma_get_mcast_mac(&in6, ah->av.eth.mac);
88 	} else {
89 		memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN);
90 	}
91 	ret = ib_get_cached_gid(pd->device, ah_attr->port_num,
92 				ah_attr->grh.sgid_index, &sgid, &gid_attr);
93 	if (ret)
94 		return ERR_PTR(ret);
95 	eth_zero_addr(ah->av.eth.s_mac);
96 	if (gid_attr.ndev) {
97 		if (is_vlan_dev(gid_attr.ndev))
98 			vlan_tag = vlan_dev_vlan_id(gid_attr.ndev);
99 		memcpy(ah->av.eth.s_mac, gid_attr.ndev->dev_addr, ETH_ALEN);
100 		dev_put(gid_attr.ndev);
101 	}
102 	if (vlan_tag < 0x1000)
103 		vlan_tag |= (ah_attr->sl & 7) << 13;
104 	ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
105 	ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index);
106 	if (ret < 0)
107 		return ERR_PTR(ret);
108 	ah->av.eth.gid_index = ret;
109 	ah->av.eth.vlan = cpu_to_be16(vlan_tag);
110 	ah->av.eth.hop_limit = ah_attr->grh.hop_limit;
111 	if (ah_attr->static_rate) {
112 		ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET;
113 		while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET &&
114 		       !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support))
115 			--ah->av.eth.stat_rate;
116 	}
117 	ah->av.eth.sl_tclass_flowlabel |=
118 			cpu_to_be32((ah_attr->grh.traffic_class << 20) |
119 				    ah_attr->grh.flow_label);
120 	/*
121 	 * HW requires multicast LID so we just choose one.
122 	 */
123 	if (is_mcast)
124 		ah->av.ib.dlid = cpu_to_be16(0xc000);
125 
126 	memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
127 	ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29);
128 
129 	return &ah->ibah;
130 }
131 
132 struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
133 				struct ib_udata *udata)
134 
135 {
136 	struct mlx4_ib_ah *ah;
137 	struct ib_ah *ret;
138 
139 	ah = kzalloc(sizeof *ah, GFP_ATOMIC);
140 	if (!ah)
141 		return ERR_PTR(-ENOMEM);
142 
143 	if (rdma_port_get_link_layer(pd->device, ah_attr->port_num) == IB_LINK_LAYER_ETHERNET) {
144 		if (!(ah_attr->ah_flags & IB_AH_GRH)) {
145 			ret = ERR_PTR(-EINVAL);
146 		} else {
147 			/*
148 			 * TBD: need to handle the case when we get
149 			 * called in an atomic context and there we
150 			 * might sleep.  We don't expect this
151 			 * currently since we're working with link
152 			 * local addresses which we can translate
153 			 * without going to sleep.
154 			 */
155 			ret = create_iboe_ah(pd, ah_attr, ah);
156 		}
157 
158 		if (IS_ERR(ret))
159 			kfree(ah);
160 
161 		return ret;
162 	} else
163 		return create_ib_ah(pd, ah_attr, ah); /* never fails */
164 }
165 
166 int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
167 {
168 	struct mlx4_ib_ah *ah = to_mah(ibah);
169 	enum rdma_link_layer ll;
170 
171 	memset(ah_attr, 0, sizeof *ah_attr);
172 	ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
173 	ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
174 	if (ll == IB_LINK_LAYER_ETHERNET)
175 		ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
176 	else
177 		ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
178 
179 	ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
180 	if (ah->av.ib.stat_rate)
181 		ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
182 	ah_attr->src_path_bits = ah->av.ib.g_slid & 0x7F;
183 
184 	if (mlx4_ib_ah_grh_present(ah)) {
185 		ah_attr->ah_flags = IB_AH_GRH;
186 
187 		ah_attr->grh.traffic_class =
188 			be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20;
189 		ah_attr->grh.flow_label =
190 			be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) & 0xfffff;
191 		ah_attr->grh.hop_limit  = ah->av.ib.hop_limit;
192 		ah_attr->grh.sgid_index = ah->av.ib.gid_index;
193 		memcpy(ah_attr->grh.dgid.raw, ah->av.ib.dgid, 16);
194 	}
195 
196 	return 0;
197 }
198 
199 int mlx4_ib_destroy_ah(struct ib_ah *ah)
200 {
201 	kfree(to_mah(ah));
202 	return 0;
203 }
204