1 /* 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <sys/types.h> 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <rdma/ib_addr.h> 37 #include <rdma/ib_cache.h> 38 39 #include <linux/slab.h> 40 #include <linux/string.h> 41 #include <linux/etherdevice.h> 42 43 #include "mlx4_ib.h" 44 45 static int create_ib_ah(struct ib_ah *ib_ah, struct ib_ah_attr *ah_attr) 46 { 47 struct ib_pd *pd = ib_ah->pd; 48 struct mlx4_ib_ah *ah = to_mah(ib_ah); 49 struct mlx4_dev *dev = to_mdev(ib_ah->device)->dev; 50 51 ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); 52 ah->av.ib.g_slid = ah_attr->src_path_bits; 53 ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28); 54 if (ah_attr->ah_flags & IB_AH_GRH) { 55 ah->av.ib.g_slid |= 0x80; 56 ah->av.ib.gid_index = ah_attr->grh.sgid_index; 57 ah->av.ib.hop_limit = ah_attr->grh.hop_limit; 58 ah->av.ib.sl_tclass_flowlabel |= 59 cpu_to_be32((ah_attr->grh.traffic_class << 20) | 60 ah_attr->grh.flow_label); 61 memcpy(ah->av.ib.dgid, ah_attr->grh.dgid.raw, 16); 62 } 63 64 ah->av.ib.dlid = cpu_to_be16(ah_attr->dlid); 65 if (ah_attr->static_rate) { 66 ah->av.ib.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET; 67 while (ah->av.ib.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && 68 !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support)) 69 --ah->av.ib.stat_rate; 70 } 71 return 0; 72 } 73 74 static int create_iboe_ah(struct ib_ah *ib_ah, struct ib_ah_attr *ah_attr) 75 { 76 struct ib_pd *pd = ib_ah->pd; 77 struct mlx4_ib_dev *ibdev = to_mdev(ib_ah->device); 78 struct mlx4_ib_ah *ah = to_mah(ib_ah); 79 struct mlx4_dev *dev = ibdev->dev; 80 int is_mcast = 0; 81 struct in6_addr in6; 82 u16 vlan_tag = 0xffff; 83 union ib_gid sgid; 84 struct ib_gid_attr gid_attr; 85 int ret; 86 87 memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); 88 if (rdma_is_multicast_addr(&in6)) { 89 is_mcast = 1; 90 rdma_get_mcast_mac(&in6, ah->av.eth.mac); 91 } else { 92 memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN); 93 } 94 ret = ib_get_cached_gid(pd->device, ah_attr->port_num, 95 ah_attr->grh.sgid_index, &sgid, &gid_attr); 96 if (ret) 97 return ret; 98 eth_zero_addr(ah->av.eth.s_mac); 99 if (gid_attr.ndev) { 100 vlan_tag = rdma_vlan_dev_vlan_id(gid_attr.ndev); 101 memcpy(ah->av.eth.s_mac, IF_LLADDR(gid_attr.ndev), ETH_ALEN); 102 if_rele(gid_attr.ndev); 103 } 104 if (vlan_tag < 0x1000) 105 vlan_tag |= (ah_attr->sl & 7) << 13; 106 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); 107 ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); 108 if (ret < 0) 109 return ret; 110 ah->av.eth.gid_index = ret; 111 ah->av.eth.vlan = cpu_to_be16(vlan_tag); 112 ah->av.eth.hop_limit = ah_attr->grh.hop_limit; 113 if (ah_attr->static_rate) { 114 ah->av.eth.stat_rate = ah_attr->static_rate + MLX4_STAT_RATE_OFFSET; 115 while (ah->av.eth.stat_rate > IB_RATE_2_5_GBPS + MLX4_STAT_RATE_OFFSET && 116 !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support)) 117 --ah->av.eth.stat_rate; 118 } 119 120 /* 121 * HW requires multicast LID so we just choose one. 122 */ 123 if (is_mcast) 124 ah->av.ib.dlid = cpu_to_be16(0xc000); 125 126 memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16); 127 ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29); 128 129 return 0; 130 } 131 132 int mlx4_ib_create_ah(struct ib_ah *ib_ah, struct ib_ah_attr *ah_attr, 133 u32 flags, struct ib_udata *udata) 134 { 135 if (rdma_port_get_link_layer(ib_ah->pd->device, ah_attr->port_num) == IB_LINK_LAYER_ETHERNET) { 136 if (!(ah_attr->ah_flags & IB_AH_GRH)) { 137 return -EINVAL; 138 } else { 139 /* 140 * TBD: need to handle the case when we get 141 * called in an atomic context and there we 142 * might sleep. We don't expect this 143 * currently since we're working with link 144 * local addresses which we can translate 145 * without going to sleep. 146 */ 147 return create_iboe_ah(ib_ah, ah_attr); 148 } 149 } 150 return create_ib_ah(ib_ah, ah_attr); 151 } 152 153 int mlx4_ib_create_ah_slave(struct ib_ah *ah, struct ib_ah_attr *ah_attr, 154 int slave_sgid_index, u8 *s_mac, u16 vlan_tag) 155 { 156 struct ib_ah_attr slave_attr = *ah_attr; 157 struct mlx4_ib_ah *mah = to_mah(ah); 158 int ret; 159 160 slave_attr.grh.sgid_index = slave_sgid_index; 161 ret = mlx4_ib_create_ah(ah, &slave_attr, 0, NULL); 162 if (ret) 163 return ret; 164 165 /* get rid of force-loopback bit */ 166 mah->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF); 167 168 if (rdma_port_get_link_layer(ah->pd->device, ah_attr->port_num) == IB_LINK_LAYER_ETHERNET) 169 memcpy(mah->av.eth.s_mac, s_mac, 6); 170 171 if (vlan_tag < 0x1000) 172 vlan_tag |= (ah_attr->sl & 7) << 13; 173 mah->av.eth.vlan = cpu_to_be16(vlan_tag); 174 175 return 0; 176 } 177 178 int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) 179 { 180 struct mlx4_ib_ah *ah = to_mah(ibah); 181 enum rdma_link_layer ll; 182 183 memset(ah_attr, 0, sizeof *ah_attr); 184 ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24; 185 ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num); 186 if (ll == IB_LINK_LAYER_ETHERNET) 187 ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29; 188 else 189 ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; 190 191 ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0; 192 if (ah->av.ib.stat_rate) 193 ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET; 194 ah_attr->src_path_bits = ah->av.ib.g_slid & 0x7F; 195 196 if (mlx4_ib_ah_grh_present(ah)) { 197 ah_attr->ah_flags = IB_AH_GRH; 198 199 ah_attr->grh.traffic_class = 200 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 20; 201 ah_attr->grh.flow_label = 202 be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) & 0xfffff; 203 ah_attr->grh.hop_limit = ah->av.ib.hop_limit; 204 ah_attr->grh.sgid_index = ah->av.ib.gid_index; 205 memcpy(ah_attr->grh.dgid.raw, ah->av.ib.dgid, 16); 206 } 207 208 return 0; 209 } 210 211 void mlx4_ib_destroy_ah(struct ib_ah *ah, u32 flags) 212 { 213 return; 214 } 215