1 /* 2 * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. 3 * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. 4 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. 5 * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. 6 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 * 37 * $Id: agent.c 1389 2004-12-27 22:56:47Z roland $ 38 */ 39 40 #include <linux/dma-mapping.h> 41 42 #include <asm/bug.h> 43 44 #include <rdma/ib_smi.h> 45 46 #include "smi.h" 47 #include "agent_priv.h" 48 #include "mad_priv.h" 49 #include "agent.h" 50 51 spinlock_t ib_agent_port_list_lock; 52 static LIST_HEAD(ib_agent_port_list); 53 54 /* 55 * Caller must hold ib_agent_port_list_lock 56 */ 57 static inline struct ib_agent_port_private * 58 __ib_get_agent_port(struct ib_device *device, int port_num, 59 struct ib_mad_agent *mad_agent) 60 { 61 struct ib_agent_port_private *entry; 62 63 BUG_ON(!(!!device ^ !!mad_agent)); /* Exactly one MUST be (!NULL) */ 64 65 if (device) { 66 list_for_each_entry(entry, &ib_agent_port_list, port_list) { 67 if (entry->smp_agent->device == device && 68 entry->port_num == port_num) 69 return entry; 70 } 71 } else { 72 list_for_each_entry(entry, &ib_agent_port_list, port_list) { 73 if ((entry->smp_agent == mad_agent) || 74 (entry->perf_mgmt_agent == mad_agent)) 75 return entry; 76 } 77 } 78 return NULL; 79 } 80 81 static inline struct ib_agent_port_private * 82 ib_get_agent_port(struct ib_device *device, int port_num, 83 struct ib_mad_agent *mad_agent) 84 { 85 struct ib_agent_port_private *entry; 86 unsigned long flags; 87 88 spin_lock_irqsave(&ib_agent_port_list_lock, flags); 89 entry = __ib_get_agent_port(device, port_num, mad_agent); 90 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); 91 92 return entry; 93 } 94 95 int smi_check_local_dr_smp(struct ib_smp *smp, 96 struct ib_device *device, 97 int port_num) 98 { 99 struct ib_agent_port_private *port_priv; 100 101 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 102 return 1; 103 port_priv = ib_get_agent_port(device, port_num, NULL); 104 if (!port_priv) { 105 printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d " 106 "not open\n", 107 device->name, port_num); 108 return 1; 109 } 110 111 return smi_check_local_smp(port_priv->smp_agent, smp); 112 } 113 114 static int agent_mad_send(struct ib_mad_agent *mad_agent, 115 struct ib_agent_port_private *port_priv, 116 struct ib_mad_private *mad_priv, 117 struct ib_grh *grh, 118 struct ib_wc *wc) 119 { 120 struct ib_agent_send_wr *agent_send_wr; 121 struct ib_sge gather_list; 122 struct ib_send_wr send_wr; 123 struct ib_send_wr *bad_send_wr; 124 struct ib_ah_attr ah_attr; 125 unsigned long flags; 126 int ret = 1; 127 128 agent_send_wr = kmalloc(sizeof(*agent_send_wr), GFP_KERNEL); 129 if (!agent_send_wr) 130 goto out; 131 agent_send_wr->mad = mad_priv; 132 133 gather_list.addr = dma_map_single(mad_agent->device->dma_device, 134 &mad_priv->mad, 135 sizeof(mad_priv->mad), 136 DMA_TO_DEVICE); 137 gather_list.length = sizeof(mad_priv->mad); 138 gather_list.lkey = mad_agent->mr->lkey; 139 140 send_wr.next = NULL; 141 send_wr.opcode = IB_WR_SEND; 142 send_wr.sg_list = &gather_list; 143 send_wr.num_sge = 1; 144 send_wr.wr.ud.remote_qpn = wc->src_qp; /* DQPN */ 145 send_wr.wr.ud.timeout_ms = 0; 146 send_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; 147 148 ah_attr.dlid = wc->slid; 149 ah_attr.port_num = mad_agent->port_num; 150 ah_attr.src_path_bits = wc->dlid_path_bits; 151 ah_attr.sl = wc->sl; 152 ah_attr.static_rate = 0; 153 ah_attr.ah_flags = 0; /* No GRH */ 154 if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { 155 if (wc->wc_flags & IB_WC_GRH) { 156 ah_attr.ah_flags = IB_AH_GRH; 157 /* Should sgid be looked up ? */ 158 ah_attr.grh.sgid_index = 0; 159 ah_attr.grh.hop_limit = grh->hop_limit; 160 ah_attr.grh.flow_label = be32_to_cpu( 161 grh->version_tclass_flow) & 0xfffff; 162 ah_attr.grh.traffic_class = (be32_to_cpu( 163 grh->version_tclass_flow) >> 20) & 0xff; 164 memcpy(ah_attr.grh.dgid.raw, 165 grh->sgid.raw, 166 sizeof(ah_attr.grh.dgid)); 167 } 168 } 169 170 agent_send_wr->ah = ib_create_ah(mad_agent->qp->pd, &ah_attr); 171 if (IS_ERR(agent_send_wr->ah)) { 172 printk(KERN_ERR SPFX "No memory for address handle\n"); 173 kfree(agent_send_wr); 174 goto out; 175 } 176 177 send_wr.wr.ud.ah = agent_send_wr->ah; 178 if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { 179 send_wr.wr.ud.pkey_index = wc->pkey_index; 180 send_wr.wr.ud.remote_qkey = IB_QP1_QKEY; 181 } else { /* for SMPs */ 182 send_wr.wr.ud.pkey_index = 0; 183 send_wr.wr.ud.remote_qkey = 0; 184 } 185 send_wr.wr.ud.mad_hdr = &mad_priv->mad.mad.mad_hdr; 186 send_wr.wr_id = (unsigned long)agent_send_wr; 187 188 pci_unmap_addr_set(agent_send_wr, mapping, gather_list.addr); 189 190 /* Send */ 191 spin_lock_irqsave(&port_priv->send_list_lock, flags); 192 if (ib_post_send_mad(mad_agent, &send_wr, &bad_send_wr)) { 193 spin_unlock_irqrestore(&port_priv->send_list_lock, flags); 194 dma_unmap_single(mad_agent->device->dma_device, 195 pci_unmap_addr(agent_send_wr, mapping), 196 sizeof(mad_priv->mad), 197 DMA_TO_DEVICE); 198 ib_destroy_ah(agent_send_wr->ah); 199 kfree(agent_send_wr); 200 } else { 201 list_add_tail(&agent_send_wr->send_list, 202 &port_priv->send_posted_list); 203 spin_unlock_irqrestore(&port_priv->send_list_lock, flags); 204 ret = 0; 205 } 206 207 out: 208 return ret; 209 } 210 211 int agent_send(struct ib_mad_private *mad, 212 struct ib_grh *grh, 213 struct ib_wc *wc, 214 struct ib_device *device, 215 int port_num) 216 { 217 struct ib_agent_port_private *port_priv; 218 struct ib_mad_agent *mad_agent; 219 220 port_priv = ib_get_agent_port(device, port_num, NULL); 221 if (!port_priv) { 222 printk(KERN_DEBUG SPFX "agent_send %s port %d not open\n", 223 device->name, port_num); 224 return 1; 225 } 226 227 /* Get mad agent based on mgmt_class in MAD */ 228 switch (mad->mad.mad.mad_hdr.mgmt_class) { 229 case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 230 case IB_MGMT_CLASS_SUBN_LID_ROUTED: 231 mad_agent = port_priv->smp_agent; 232 break; 233 case IB_MGMT_CLASS_PERF_MGMT: 234 mad_agent = port_priv->perf_mgmt_agent; 235 break; 236 default: 237 return 1; 238 } 239 240 return agent_mad_send(mad_agent, port_priv, mad, grh, wc); 241 } 242 243 static void agent_send_handler(struct ib_mad_agent *mad_agent, 244 struct ib_mad_send_wc *mad_send_wc) 245 { 246 struct ib_agent_port_private *port_priv; 247 struct ib_agent_send_wr *agent_send_wr; 248 unsigned long flags; 249 250 /* Find matching MAD agent */ 251 port_priv = ib_get_agent_port(NULL, 0, mad_agent); 252 if (!port_priv) { 253 printk(KERN_ERR SPFX "agent_send_handler: no matching MAD " 254 "agent %p\n", mad_agent); 255 return; 256 } 257 258 agent_send_wr = (struct ib_agent_send_wr *)(unsigned long)mad_send_wc->wr_id; 259 spin_lock_irqsave(&port_priv->send_list_lock, flags); 260 /* Remove completed send from posted send MAD list */ 261 list_del(&agent_send_wr->send_list); 262 spin_unlock_irqrestore(&port_priv->send_list_lock, flags); 263 264 dma_unmap_single(mad_agent->device->dma_device, 265 pci_unmap_addr(agent_send_wr, mapping), 266 sizeof(agent_send_wr->mad->mad), 267 DMA_TO_DEVICE); 268 269 ib_destroy_ah(agent_send_wr->ah); 270 271 /* Release allocated memory */ 272 kmem_cache_free(ib_mad_cache, agent_send_wr->mad); 273 kfree(agent_send_wr); 274 } 275 276 int ib_agent_port_open(struct ib_device *device, int port_num) 277 { 278 int ret; 279 struct ib_agent_port_private *port_priv; 280 unsigned long flags; 281 282 /* First, check if port already open for SMI */ 283 port_priv = ib_get_agent_port(device, port_num, NULL); 284 if (port_priv) { 285 printk(KERN_DEBUG SPFX "%s port %d already open\n", 286 device->name, port_num); 287 return 0; 288 } 289 290 /* Create new device info */ 291 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); 292 if (!port_priv) { 293 printk(KERN_ERR SPFX "No memory for ib_agent_port_private\n"); 294 ret = -ENOMEM; 295 goto error1; 296 } 297 298 memset(port_priv, 0, sizeof *port_priv); 299 port_priv->port_num = port_num; 300 spin_lock_init(&port_priv->send_list_lock); 301 INIT_LIST_HEAD(&port_priv->send_posted_list); 302 303 /* Obtain send only MAD agent for SM class (SMI QP) */ 304 port_priv->smp_agent = ib_register_mad_agent(device, port_num, 305 IB_QPT_SMI, 306 NULL, 0, 307 &agent_send_handler, 308 NULL, NULL); 309 310 if (IS_ERR(port_priv->smp_agent)) { 311 ret = PTR_ERR(port_priv->smp_agent); 312 goto error2; 313 } 314 315 /* Obtain send only MAD agent for PerfMgmt class (GSI QP) */ 316 port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num, 317 IB_QPT_GSI, 318 NULL, 0, 319 &agent_send_handler, 320 NULL, NULL); 321 if (IS_ERR(port_priv->perf_mgmt_agent)) { 322 ret = PTR_ERR(port_priv->perf_mgmt_agent); 323 goto error3; 324 } 325 326 spin_lock_irqsave(&ib_agent_port_list_lock, flags); 327 list_add_tail(&port_priv->port_list, &ib_agent_port_list); 328 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); 329 330 return 0; 331 332 error3: 333 ib_unregister_mad_agent(port_priv->smp_agent); 334 error2: 335 kfree(port_priv); 336 error1: 337 return ret; 338 } 339 340 int ib_agent_port_close(struct ib_device *device, int port_num) 341 { 342 struct ib_agent_port_private *port_priv; 343 unsigned long flags; 344 345 spin_lock_irqsave(&ib_agent_port_list_lock, flags); 346 port_priv = __ib_get_agent_port(device, port_num, NULL); 347 if (port_priv == NULL) { 348 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); 349 printk(KERN_ERR SPFX "Port %d not found\n", port_num); 350 return -ENODEV; 351 } 352 list_del(&port_priv->port_list); 353 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); 354 355 ib_unregister_mad_agent(port_priv->perf_mgmt_agent); 356 ib_unregister_mad_agent(port_priv->smp_agent); 357 kfree(port_priv); 358 359 return 0; 360 } 361