1 /* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 * 34 * $Id: mthca_mad.c 1349 2004-12-16 21:09:43Z roland $ 35 */ 36 37 #include <linux/string.h> 38 #include <linux/slab.h> 39 40 #include <rdma/ib_verbs.h> 41 #include <rdma/ib_mad.h> 42 #include <rdma/ib_smi.h> 43 44 #include "mthca_dev.h" 45 #include "mthca_cmd.h" 46 47 enum { 48 MTHCA_VENDOR_CLASS1 = 0x9, 49 MTHCA_VENDOR_CLASS2 = 0xa 50 }; 51 52 static void update_sm_ah(struct mthca_dev *dev, 53 u8 port_num, u16 lid, u8 sl) 54 { 55 struct ib_ah *new_ah; 56 struct ib_ah_attr ah_attr; 57 unsigned long flags; 58 59 if (!dev->send_agent[port_num - 1][0]) 60 return; 61 62 memset(&ah_attr, 0, sizeof ah_attr); 63 ah_attr.dlid = lid; 64 ah_attr.sl = sl; 65 ah_attr.port_num = port_num; 66 67 new_ah = ib_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, 68 &ah_attr); 69 if (IS_ERR(new_ah)) 70 return; 71 72 spin_lock_irqsave(&dev->sm_lock, flags); 73 if (dev->sm_ah[port_num - 1]) 74 ib_destroy_ah(dev->sm_ah[port_num - 1]); 75 dev->sm_ah[port_num - 1] = new_ah; 76 spin_unlock_irqrestore(&dev->sm_lock, flags); 77 } 78 79 /* 80 * Snoop SM MADs for port info and P_Key table sets, so we can 81 * synthesize LID change and P_Key change events. 82 */ 83 static void smp_snoop(struct ib_device *ibdev, 84 u8 port_num, 85 struct ib_mad *mad) 86 { 87 struct ib_event event; 88 89 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 90 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && 91 mad->mad_hdr.method == IB_MGMT_METHOD_SET) { 92 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { 93 update_sm_ah(to_mdev(ibdev), port_num, 94 be16_to_cpup((__be16 *) (mad->data + 58)), 95 (*(u8 *) (mad->data + 76)) & 0xf); 96 97 event.device = ibdev; 98 event.event = IB_EVENT_LID_CHANGE; 99 event.element.port_num = port_num; 100 ib_dispatch_event(&event); 101 } 102 103 if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { 104 event.device = ibdev; 105 event.event = IB_EVENT_PKEY_CHANGE; 106 event.element.port_num = port_num; 107 ib_dispatch_event(&event); 108 } 109 } 110 } 111 112 static void node_desc_override(struct ib_device *dev, 113 struct ib_mad *mad) 114 { 115 if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 116 mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && 117 mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && 118 mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { 119 mutex_lock(&to_mdev(dev)->cap_mask_mutex); 120 memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64); 121 mutex_unlock(&to_mdev(dev)->cap_mask_mutex); 122 } 123 } 124 125 static void forward_trap(struct mthca_dev *dev, 126 u8 port_num, 127 struct ib_mad *mad) 128 { 129 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; 130 struct ib_mad_send_buf *send_buf; 131 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; 132 int ret; 133 unsigned long flags; 134 135 if (agent) { 136 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, 137 IB_MGMT_MAD_DATA, GFP_ATOMIC); 138 /* 139 * We rely here on the fact that MLX QPs don't use the 140 * address handle after the send is posted (this is 141 * wrong following the IB spec strictly, but we know 142 * it's OK for our devices). 143 */ 144 spin_lock_irqsave(&dev->sm_lock, flags); 145 memcpy(send_buf->mad, mad, sizeof *mad); 146 if ((send_buf->ah = dev->sm_ah[port_num - 1])) 147 ret = ib_post_send_mad(send_buf, NULL); 148 else 149 ret = -EINVAL; 150 spin_unlock_irqrestore(&dev->sm_lock, flags); 151 152 if (ret) 153 ib_free_send_mad(send_buf); 154 } 155 } 156 157 int mthca_process_mad(struct ib_device *ibdev, 158 int mad_flags, 159 u8 port_num, 160 struct ib_wc *in_wc, 161 struct ib_grh *in_grh, 162 struct ib_mad *in_mad, 163 struct ib_mad *out_mad) 164 { 165 int err; 166 u8 status; 167 u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 168 169 /* Forward locally generated traps to the SM */ 170 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && 171 slid == 0) { 172 forward_trap(to_mdev(ibdev), port_num, in_mad); 173 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 174 } 175 176 /* 177 * Only handle SM gets, sets and trap represses for SM class 178 * 179 * Only handle PMA and Mellanox vendor-specific class gets and 180 * sets for other classes. 181 */ 182 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || 183 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 184 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 185 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && 186 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) 187 return IB_MAD_RESULT_SUCCESS; 188 189 /* 190 * Don't process SMInfo queries or vendor-specific 191 * MADs -- the SMA can't handle them. 192 */ 193 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || 194 ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == 195 IB_SMP_ATTR_VENDOR_MASK)) 196 return IB_MAD_RESULT_SUCCESS; 197 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || 198 in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 || 199 in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) { 200 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && 201 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) 202 return IB_MAD_RESULT_SUCCESS; 203 } else 204 return IB_MAD_RESULT_SUCCESS; 205 206 err = mthca_MAD_IFC(to_mdev(ibdev), 207 mad_flags & IB_MAD_IGNORE_MKEY, 208 mad_flags & IB_MAD_IGNORE_BKEY, 209 port_num, in_wc, in_grh, in_mad, out_mad, 210 &status); 211 if (err) { 212 mthca_err(to_mdev(ibdev), "MAD_IFC failed\n"); 213 return IB_MAD_RESULT_FAILURE; 214 } 215 if (status == MTHCA_CMD_STAT_BAD_PKT) 216 return IB_MAD_RESULT_SUCCESS; 217 if (status) { 218 mthca_err(to_mdev(ibdev), "MAD_IFC returned status %02x\n", 219 status); 220 return IB_MAD_RESULT_FAILURE; 221 } 222 223 if (!out_mad->mad_hdr.status) { 224 smp_snoop(ibdev, port_num, in_mad); 225 node_desc_override(ibdev, out_mad); 226 } 227 228 /* set return bit in status of directed route responses */ 229 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 230 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15); 231 232 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) 233 /* no response for trap repress */ 234 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 235 236 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 237 } 238 239 static void send_handler(struct ib_mad_agent *agent, 240 struct ib_mad_send_wc *mad_send_wc) 241 { 242 ib_free_send_mad(mad_send_wc->send_buf); 243 } 244 245 int mthca_create_agents(struct mthca_dev *dev) 246 { 247 struct ib_mad_agent *agent; 248 int p, q; 249 250 spin_lock_init(&dev->sm_lock); 251 252 for (p = 0; p < dev->limits.num_ports; ++p) 253 for (q = 0; q <= 1; ++q) { 254 agent = ib_register_mad_agent(&dev->ib_dev, p + 1, 255 q ? IB_QPT_GSI : IB_QPT_SMI, 256 NULL, 0, send_handler, 257 NULL, NULL); 258 if (IS_ERR(agent)) 259 goto err; 260 dev->send_agent[p][q] = agent; 261 } 262 263 return 0; 264 265 err: 266 for (p = 0; p < dev->limits.num_ports; ++p) 267 for (q = 0; q <= 1; ++q) 268 if (dev->send_agent[p][q]) 269 ib_unregister_mad_agent(dev->send_agent[p][q]); 270 271 return PTR_ERR(agent); 272 } 273 274 void __devexit mthca_free_agents(struct mthca_dev *dev) 275 { 276 struct ib_mad_agent *agent; 277 int p, q; 278 279 for (p = 0; p < dev->limits.num_ports; ++p) { 280 for (q = 0; q <= 1; ++q) { 281 agent = dev->send_agent[p][q]; 282 dev->send_agent[p][q] = NULL; 283 ib_unregister_mad_agent(agent); 284 } 285 286 if (dev->sm_ah[p]) 287 ib_destroy_ah(dev->sm_ah[p]); 288 } 289 } 290