1 /* 2 * Copyright (c) 2005 Intel Inc. All rights reserved. 3 * Copyright (c) 2005-2006 Voltaire, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 * 33 * $Id: mad_rmpp.c 1921 2005-03-02 22:58:44Z sean.hefty $ 34 */ 35 36 #include <linux/dma-mapping.h> 37 38 #include "mad_priv.h" 39 #include "mad_rmpp.h" 40 41 enum rmpp_state { 42 RMPP_STATE_ACTIVE, 43 RMPP_STATE_TIMEOUT, 44 RMPP_STATE_COMPLETE 45 }; 46 47 struct mad_rmpp_recv { 48 struct ib_mad_agent_private *agent; 49 struct list_head list; 50 struct work_struct timeout_work; 51 struct work_struct cleanup_work; 52 struct completion comp; 53 enum rmpp_state state; 54 spinlock_t lock; 55 atomic_t refcount; 56 57 struct ib_ah *ah; 58 struct ib_mad_recv_wc *rmpp_wc; 59 struct ib_mad_recv_buf *cur_seg_buf; 60 int last_ack; 61 int seg_num; 62 int newwin; 63 64 __be64 tid; 65 u32 src_qp; 66 u16 slid; 67 u8 mgmt_class; 68 u8 class_version; 69 u8 method; 70 }; 71 72 static inline void deref_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) 73 { 74 if (atomic_dec_and_test(&rmpp_recv->refcount)) 75 complete(&rmpp_recv->comp); 76 } 77 78 static void destroy_rmpp_recv(struct mad_rmpp_recv *rmpp_recv) 79 { 80 deref_rmpp_recv(rmpp_recv); 81 wait_for_completion(&rmpp_recv->comp); 82 ib_destroy_ah(rmpp_recv->ah); 83 kfree(rmpp_recv); 84 } 85 86 void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent) 87 { 88 struct mad_rmpp_recv *rmpp_recv, *temp_rmpp_recv; 89 unsigned long flags; 90 91 spin_lock_irqsave(&agent->lock, flags); 92 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { 93 cancel_delayed_work(&rmpp_recv->timeout_work); 94 cancel_delayed_work(&rmpp_recv->cleanup_work); 95 } 96 spin_unlock_irqrestore(&agent->lock, flags); 97 98 flush_workqueue(agent->qp_info->port_priv->wq); 99 100 list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv, 101 &agent->rmpp_list, list) { 102 list_del(&rmpp_recv->list); 103 if (rmpp_recv->state != RMPP_STATE_COMPLETE) 104 ib_free_recv_mad(rmpp_recv->rmpp_wc); 105 destroy_rmpp_recv(rmpp_recv); 106 } 107 } 108 109 static void format_ack(struct ib_mad_send_buf *msg, 110 struct ib_rmpp_mad *data, 111 struct mad_rmpp_recv *rmpp_recv) 112 { 113 struct ib_rmpp_mad *ack = msg->mad; 114 unsigned long flags; 115 116 memcpy(ack, &data->mad_hdr, msg->hdr_len); 117 118 ack->mad_hdr.method ^= IB_MGMT_METHOD_RESP; 119 ack->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ACK; 120 ib_set_rmpp_flags(&ack->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 121 122 spin_lock_irqsave(&rmpp_recv->lock, flags); 123 rmpp_recv->last_ack = rmpp_recv->seg_num; 124 ack->rmpp_hdr.seg_num = cpu_to_be32(rmpp_recv->seg_num); 125 ack->rmpp_hdr.paylen_newwin = cpu_to_be32(rmpp_recv->newwin); 126 spin_unlock_irqrestore(&rmpp_recv->lock, flags); 127 } 128 129 static void ack_recv(struct mad_rmpp_recv *rmpp_recv, 130 struct ib_mad_recv_wc *recv_wc) 131 { 132 struct ib_mad_send_buf *msg; 133 int ret, hdr_len; 134 135 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 136 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 137 recv_wc->wc->pkey_index, 1, hdr_len, 138 0, GFP_KERNEL); 139 if (!msg) 140 return; 141 142 format_ack(msg, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); 143 msg->ah = rmpp_recv->ah; 144 ret = ib_post_send_mad(msg, NULL); 145 if (ret) 146 ib_free_send_mad(msg); 147 } 148 149 static struct ib_mad_send_buf *alloc_response_msg(struct ib_mad_agent *agent, 150 struct ib_mad_recv_wc *recv_wc) 151 { 152 struct ib_mad_send_buf *msg; 153 struct ib_ah *ah; 154 int hdr_len; 155 156 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, 157 recv_wc->recv_buf.grh, agent->port_num); 158 if (IS_ERR(ah)) 159 return (void *) ah; 160 161 hdr_len = ib_get_mad_data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class); 162 msg = ib_create_send_mad(agent, recv_wc->wc->src_qp, 163 recv_wc->wc->pkey_index, 1, 164 hdr_len, 0, GFP_KERNEL); 165 if (IS_ERR(msg)) 166 ib_destroy_ah(ah); 167 else 168 msg->ah = ah; 169 170 return msg; 171 } 172 173 void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) 174 { 175 struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad; 176 177 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_ACK) 178 ib_destroy_ah(mad_send_wc->send_buf->ah); 179 ib_free_send_mad(mad_send_wc->send_buf); 180 } 181 182 static void nack_recv(struct ib_mad_agent_private *agent, 183 struct ib_mad_recv_wc *recv_wc, u8 rmpp_status) 184 { 185 struct ib_mad_send_buf *msg; 186 struct ib_rmpp_mad *rmpp_mad; 187 int ret; 188 189 msg = alloc_response_msg(&agent->agent, recv_wc); 190 if (IS_ERR(msg)) 191 return; 192 193 rmpp_mad = msg->mad; 194 memcpy(rmpp_mad, recv_wc->recv_buf.mad, msg->hdr_len); 195 196 rmpp_mad->mad_hdr.method ^= IB_MGMT_METHOD_RESP; 197 rmpp_mad->rmpp_hdr.rmpp_version = IB_MGMT_RMPP_VERSION; 198 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_ABORT; 199 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 200 rmpp_mad->rmpp_hdr.rmpp_status = rmpp_status; 201 rmpp_mad->rmpp_hdr.seg_num = 0; 202 rmpp_mad->rmpp_hdr.paylen_newwin = 0; 203 204 ret = ib_post_send_mad(msg, NULL); 205 if (ret) { 206 ib_destroy_ah(msg->ah); 207 ib_free_send_mad(msg); 208 } 209 } 210 211 static void recv_timeout_handler(void *data) 212 { 213 struct mad_rmpp_recv *rmpp_recv = data; 214 struct ib_mad_recv_wc *rmpp_wc; 215 unsigned long flags; 216 217 spin_lock_irqsave(&rmpp_recv->agent->lock, flags); 218 if (rmpp_recv->state != RMPP_STATE_ACTIVE) { 219 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); 220 return; 221 } 222 rmpp_recv->state = RMPP_STATE_TIMEOUT; 223 list_del(&rmpp_recv->list); 224 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); 225 226 rmpp_wc = rmpp_recv->rmpp_wc; 227 nack_recv(rmpp_recv->agent, rmpp_wc, IB_MGMT_RMPP_STATUS_T2L); 228 destroy_rmpp_recv(rmpp_recv); 229 ib_free_recv_mad(rmpp_wc); 230 } 231 232 static void recv_cleanup_handler(void *data) 233 { 234 struct mad_rmpp_recv *rmpp_recv = data; 235 unsigned long flags; 236 237 spin_lock_irqsave(&rmpp_recv->agent->lock, flags); 238 list_del(&rmpp_recv->list); 239 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); 240 destroy_rmpp_recv(rmpp_recv); 241 } 242 243 static struct mad_rmpp_recv * 244 create_rmpp_recv(struct ib_mad_agent_private *agent, 245 struct ib_mad_recv_wc *mad_recv_wc) 246 { 247 struct mad_rmpp_recv *rmpp_recv; 248 struct ib_mad_hdr *mad_hdr; 249 250 rmpp_recv = kmalloc(sizeof *rmpp_recv, GFP_KERNEL); 251 if (!rmpp_recv) 252 return NULL; 253 254 rmpp_recv->ah = ib_create_ah_from_wc(agent->agent.qp->pd, 255 mad_recv_wc->wc, 256 mad_recv_wc->recv_buf.grh, 257 agent->agent.port_num); 258 if (IS_ERR(rmpp_recv->ah)) 259 goto error; 260 261 rmpp_recv->agent = agent; 262 init_completion(&rmpp_recv->comp); 263 INIT_WORK(&rmpp_recv->timeout_work, recv_timeout_handler, rmpp_recv); 264 INIT_WORK(&rmpp_recv->cleanup_work, recv_cleanup_handler, rmpp_recv); 265 spin_lock_init(&rmpp_recv->lock); 266 rmpp_recv->state = RMPP_STATE_ACTIVE; 267 atomic_set(&rmpp_recv->refcount, 1); 268 269 rmpp_recv->rmpp_wc = mad_recv_wc; 270 rmpp_recv->cur_seg_buf = &mad_recv_wc->recv_buf; 271 rmpp_recv->newwin = 1; 272 rmpp_recv->seg_num = 1; 273 rmpp_recv->last_ack = 0; 274 275 mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; 276 rmpp_recv->tid = mad_hdr->tid; 277 rmpp_recv->src_qp = mad_recv_wc->wc->src_qp; 278 rmpp_recv->slid = mad_recv_wc->wc->slid; 279 rmpp_recv->mgmt_class = mad_hdr->mgmt_class; 280 rmpp_recv->class_version = mad_hdr->class_version; 281 rmpp_recv->method = mad_hdr->method; 282 return rmpp_recv; 283 284 error: kfree(rmpp_recv); 285 return NULL; 286 } 287 288 static struct mad_rmpp_recv * 289 find_rmpp_recv(struct ib_mad_agent_private *agent, 290 struct ib_mad_recv_wc *mad_recv_wc) 291 { 292 struct mad_rmpp_recv *rmpp_recv; 293 struct ib_mad_hdr *mad_hdr = &mad_recv_wc->recv_buf.mad->mad_hdr; 294 295 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { 296 if (rmpp_recv->tid == mad_hdr->tid && 297 rmpp_recv->src_qp == mad_recv_wc->wc->src_qp && 298 rmpp_recv->slid == mad_recv_wc->wc->slid && 299 rmpp_recv->mgmt_class == mad_hdr->mgmt_class && 300 rmpp_recv->class_version == mad_hdr->class_version && 301 rmpp_recv->method == mad_hdr->method) 302 return rmpp_recv; 303 } 304 return NULL; 305 } 306 307 static struct mad_rmpp_recv * 308 acquire_rmpp_recv(struct ib_mad_agent_private *agent, 309 struct ib_mad_recv_wc *mad_recv_wc) 310 { 311 struct mad_rmpp_recv *rmpp_recv; 312 unsigned long flags; 313 314 spin_lock_irqsave(&agent->lock, flags); 315 rmpp_recv = find_rmpp_recv(agent, mad_recv_wc); 316 if (rmpp_recv) 317 atomic_inc(&rmpp_recv->refcount); 318 spin_unlock_irqrestore(&agent->lock, flags); 319 return rmpp_recv; 320 } 321 322 static struct mad_rmpp_recv * 323 insert_rmpp_recv(struct ib_mad_agent_private *agent, 324 struct mad_rmpp_recv *rmpp_recv) 325 { 326 struct mad_rmpp_recv *cur_rmpp_recv; 327 328 cur_rmpp_recv = find_rmpp_recv(agent, rmpp_recv->rmpp_wc); 329 if (!cur_rmpp_recv) 330 list_add_tail(&rmpp_recv->list, &agent->rmpp_list); 331 332 return cur_rmpp_recv; 333 } 334 335 static inline int get_last_flag(struct ib_mad_recv_buf *seg) 336 { 337 struct ib_rmpp_mad *rmpp_mad; 338 339 rmpp_mad = (struct ib_rmpp_mad *) seg->mad; 340 return ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_LAST; 341 } 342 343 static inline int get_seg_num(struct ib_mad_recv_buf *seg) 344 { 345 struct ib_rmpp_mad *rmpp_mad; 346 347 rmpp_mad = (struct ib_rmpp_mad *) seg->mad; 348 return be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); 349 } 350 351 static inline struct ib_mad_recv_buf * get_next_seg(struct list_head *rmpp_list, 352 struct ib_mad_recv_buf *seg) 353 { 354 if (seg->list.next == rmpp_list) 355 return NULL; 356 357 return container_of(seg->list.next, struct ib_mad_recv_buf, list); 358 } 359 360 static inline int window_size(struct ib_mad_agent_private *agent) 361 { 362 return max(agent->qp_info->recv_queue.max_active >> 3, 1); 363 } 364 365 static struct ib_mad_recv_buf * find_seg_location(struct list_head *rmpp_list, 366 int seg_num) 367 { 368 struct ib_mad_recv_buf *seg_buf; 369 int cur_seg_num; 370 371 list_for_each_entry_reverse(seg_buf, rmpp_list, list) { 372 cur_seg_num = get_seg_num(seg_buf); 373 if (seg_num > cur_seg_num) 374 return seg_buf; 375 if (seg_num == cur_seg_num) 376 break; 377 } 378 return NULL; 379 } 380 381 static void update_seg_num(struct mad_rmpp_recv *rmpp_recv, 382 struct ib_mad_recv_buf *new_buf) 383 { 384 struct list_head *rmpp_list = &rmpp_recv->rmpp_wc->rmpp_list; 385 386 while (new_buf && (get_seg_num(new_buf) == rmpp_recv->seg_num + 1)) { 387 rmpp_recv->cur_seg_buf = new_buf; 388 rmpp_recv->seg_num++; 389 new_buf = get_next_seg(rmpp_list, new_buf); 390 } 391 } 392 393 static inline int get_mad_len(struct mad_rmpp_recv *rmpp_recv) 394 { 395 struct ib_rmpp_mad *rmpp_mad; 396 int hdr_size, data_size, pad; 397 398 rmpp_mad = (struct ib_rmpp_mad *)rmpp_recv->cur_seg_buf->mad; 399 400 hdr_size = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); 401 data_size = sizeof(struct ib_rmpp_mad) - hdr_size; 402 pad = IB_MGMT_RMPP_DATA - be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 403 if (pad > IB_MGMT_RMPP_DATA || pad < 0) 404 pad = 0; 405 406 return hdr_size + rmpp_recv->seg_num * data_size - pad; 407 } 408 409 static struct ib_mad_recv_wc * complete_rmpp(struct mad_rmpp_recv *rmpp_recv) 410 { 411 struct ib_mad_recv_wc *rmpp_wc; 412 413 ack_recv(rmpp_recv, rmpp_recv->rmpp_wc); 414 if (rmpp_recv->seg_num > 1) 415 cancel_delayed_work(&rmpp_recv->timeout_work); 416 417 rmpp_wc = rmpp_recv->rmpp_wc; 418 rmpp_wc->mad_len = get_mad_len(rmpp_recv); 419 /* 10 seconds until we can find the packet lifetime */ 420 queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq, 421 &rmpp_recv->cleanup_work, msecs_to_jiffies(10000)); 422 return rmpp_wc; 423 } 424 425 static struct ib_mad_recv_wc * 426 continue_rmpp(struct ib_mad_agent_private *agent, 427 struct ib_mad_recv_wc *mad_recv_wc) 428 { 429 struct mad_rmpp_recv *rmpp_recv; 430 struct ib_mad_recv_buf *prev_buf; 431 struct ib_mad_recv_wc *done_wc; 432 int seg_num; 433 unsigned long flags; 434 435 rmpp_recv = acquire_rmpp_recv(agent, mad_recv_wc); 436 if (!rmpp_recv) 437 goto drop1; 438 439 seg_num = get_seg_num(&mad_recv_wc->recv_buf); 440 441 spin_lock_irqsave(&rmpp_recv->lock, flags); 442 if ((rmpp_recv->state == RMPP_STATE_TIMEOUT) || 443 (seg_num > rmpp_recv->newwin)) 444 goto drop3; 445 446 if ((seg_num <= rmpp_recv->last_ack) || 447 (rmpp_recv->state == RMPP_STATE_COMPLETE)) { 448 spin_unlock_irqrestore(&rmpp_recv->lock, flags); 449 ack_recv(rmpp_recv, mad_recv_wc); 450 goto drop2; 451 } 452 453 prev_buf = find_seg_location(&rmpp_recv->rmpp_wc->rmpp_list, seg_num); 454 if (!prev_buf) 455 goto drop3; 456 457 done_wc = NULL; 458 list_add(&mad_recv_wc->recv_buf.list, &prev_buf->list); 459 if (rmpp_recv->cur_seg_buf == prev_buf) { 460 update_seg_num(rmpp_recv, &mad_recv_wc->recv_buf); 461 if (get_last_flag(rmpp_recv->cur_seg_buf)) { 462 rmpp_recv->state = RMPP_STATE_COMPLETE; 463 spin_unlock_irqrestore(&rmpp_recv->lock, flags); 464 done_wc = complete_rmpp(rmpp_recv); 465 goto out; 466 } else if (rmpp_recv->seg_num == rmpp_recv->newwin) { 467 rmpp_recv->newwin += window_size(agent); 468 spin_unlock_irqrestore(&rmpp_recv->lock, flags); 469 ack_recv(rmpp_recv, mad_recv_wc); 470 goto out; 471 } 472 } 473 spin_unlock_irqrestore(&rmpp_recv->lock, flags); 474 out: 475 deref_rmpp_recv(rmpp_recv); 476 return done_wc; 477 478 drop3: spin_unlock_irqrestore(&rmpp_recv->lock, flags); 479 drop2: deref_rmpp_recv(rmpp_recv); 480 drop1: ib_free_recv_mad(mad_recv_wc); 481 return NULL; 482 } 483 484 static struct ib_mad_recv_wc * 485 start_rmpp(struct ib_mad_agent_private *agent, 486 struct ib_mad_recv_wc *mad_recv_wc) 487 { 488 struct mad_rmpp_recv *rmpp_recv; 489 unsigned long flags; 490 491 rmpp_recv = create_rmpp_recv(agent, mad_recv_wc); 492 if (!rmpp_recv) { 493 ib_free_recv_mad(mad_recv_wc); 494 return NULL; 495 } 496 497 spin_lock_irqsave(&agent->lock, flags); 498 if (insert_rmpp_recv(agent, rmpp_recv)) { 499 spin_unlock_irqrestore(&agent->lock, flags); 500 /* duplicate first MAD */ 501 destroy_rmpp_recv(rmpp_recv); 502 return continue_rmpp(agent, mad_recv_wc); 503 } 504 atomic_inc(&rmpp_recv->refcount); 505 506 if (get_last_flag(&mad_recv_wc->recv_buf)) { 507 rmpp_recv->state = RMPP_STATE_COMPLETE; 508 spin_unlock_irqrestore(&agent->lock, flags); 509 complete_rmpp(rmpp_recv); 510 } else { 511 spin_unlock_irqrestore(&agent->lock, flags); 512 /* 40 seconds until we can find the packet lifetimes */ 513 queue_delayed_work(agent->qp_info->port_priv->wq, 514 &rmpp_recv->timeout_work, 515 msecs_to_jiffies(40000)); 516 rmpp_recv->newwin += window_size(agent); 517 ack_recv(rmpp_recv, mad_recv_wc); 518 mad_recv_wc = NULL; 519 } 520 deref_rmpp_recv(rmpp_recv); 521 return mad_recv_wc; 522 } 523 524 static int send_next_seg(struct ib_mad_send_wr_private *mad_send_wr) 525 { 526 struct ib_rmpp_mad *rmpp_mad; 527 int timeout; 528 u32 paylen = 0; 529 530 rmpp_mad = mad_send_wr->send_buf.mad; 531 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 532 rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(++mad_send_wr->seg_num); 533 534 if (mad_send_wr->seg_num == 1) { 535 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_FIRST; 536 paylen = mad_send_wr->send_buf.seg_count * IB_MGMT_RMPP_DATA - 537 mad_send_wr->pad; 538 } 539 540 if (mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) { 541 rmpp_mad->rmpp_hdr.rmpp_rtime_flags |= IB_MGMT_RMPP_FLAG_LAST; 542 paylen = IB_MGMT_RMPP_DATA - mad_send_wr->pad; 543 } 544 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(paylen); 545 546 /* 2 seconds for an ACK until we can find the packet lifetime */ 547 timeout = mad_send_wr->send_buf.timeout_ms; 548 if (!timeout || timeout > 2000) 549 mad_send_wr->timeout = msecs_to_jiffies(2000); 550 551 return ib_send_mad(mad_send_wr); 552 } 553 554 static void abort_send(struct ib_mad_agent_private *agent, 555 struct ib_mad_recv_wc *mad_recv_wc, u8 rmpp_status) 556 { 557 struct ib_mad_send_wr_private *mad_send_wr; 558 struct ib_mad_send_wc wc; 559 unsigned long flags; 560 561 spin_lock_irqsave(&agent->lock, flags); 562 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); 563 if (!mad_send_wr) 564 goto out; /* Unmatched send */ 565 566 if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || 567 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) 568 goto out; /* Send is already done */ 569 570 ib_mark_mad_done(mad_send_wr); 571 spin_unlock_irqrestore(&agent->lock, flags); 572 573 wc.status = IB_WC_REM_ABORT_ERR; 574 wc.vendor_err = rmpp_status; 575 wc.send_buf = &mad_send_wr->send_buf; 576 ib_mad_complete_send_wr(mad_send_wr, &wc); 577 return; 578 out: 579 spin_unlock_irqrestore(&agent->lock, flags); 580 } 581 582 static inline void adjust_last_ack(struct ib_mad_send_wr_private *wr, 583 int seg_num) 584 { 585 struct list_head *list; 586 587 wr->last_ack = seg_num; 588 list = &wr->last_ack_seg->list; 589 list_for_each_entry(wr->last_ack_seg, list, list) 590 if (wr->last_ack_seg->num == seg_num) 591 break; 592 } 593 594 static void process_rmpp_ack(struct ib_mad_agent_private *agent, 595 struct ib_mad_recv_wc *mad_recv_wc) 596 { 597 struct ib_mad_send_wr_private *mad_send_wr; 598 struct ib_rmpp_mad *rmpp_mad; 599 unsigned long flags; 600 int seg_num, newwin, ret; 601 602 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 603 if (rmpp_mad->rmpp_hdr.rmpp_status) { 604 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 605 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 606 return; 607 } 608 609 seg_num = be32_to_cpu(rmpp_mad->rmpp_hdr.seg_num); 610 newwin = be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 611 if (newwin < seg_num) { 612 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); 613 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_W2S); 614 return; 615 } 616 617 spin_lock_irqsave(&agent->lock, flags); 618 mad_send_wr = ib_find_send_mad(agent, mad_recv_wc); 619 if (!mad_send_wr) 620 goto out; /* Unmatched ACK */ 621 622 if ((mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) || 623 (!mad_send_wr->timeout) || (mad_send_wr->status != IB_WC_SUCCESS)) 624 goto out; /* Send is already done */ 625 626 if (seg_num > mad_send_wr->send_buf.seg_count || 627 seg_num > mad_send_wr->newwin) { 628 spin_unlock_irqrestore(&agent->lock, flags); 629 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); 630 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_S2B); 631 return; 632 } 633 634 if (newwin < mad_send_wr->newwin || seg_num < mad_send_wr->last_ack) 635 goto out; /* Old ACK */ 636 637 if (seg_num > mad_send_wr->last_ack) { 638 adjust_last_ack(mad_send_wr, seg_num); 639 mad_send_wr->retries = mad_send_wr->send_buf.retries; 640 } 641 mad_send_wr->newwin = newwin; 642 if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) { 643 /* If no response is expected, the ACK completes the send */ 644 if (!mad_send_wr->send_buf.timeout_ms) { 645 struct ib_mad_send_wc wc; 646 647 ib_mark_mad_done(mad_send_wr); 648 spin_unlock_irqrestore(&agent->lock, flags); 649 650 wc.status = IB_WC_SUCCESS; 651 wc.vendor_err = 0; 652 wc.send_buf = &mad_send_wr->send_buf; 653 ib_mad_complete_send_wr(mad_send_wr, &wc); 654 return; 655 } 656 if (mad_send_wr->refcount == 1) 657 ib_reset_mad_timeout(mad_send_wr, 658 mad_send_wr->send_buf.timeout_ms); 659 } else if (mad_send_wr->refcount == 1 && 660 mad_send_wr->seg_num < mad_send_wr->newwin && 661 mad_send_wr->seg_num < mad_send_wr->send_buf.seg_count) { 662 /* Send failure will just result in a timeout/retry */ 663 ret = send_next_seg(mad_send_wr); 664 if (ret) 665 goto out; 666 667 mad_send_wr->refcount++; 668 list_move_tail(&mad_send_wr->agent_list, 669 &mad_send_wr->mad_agent_priv->send_list); 670 } 671 out: 672 spin_unlock_irqrestore(&agent->lock, flags); 673 } 674 675 static struct ib_mad_recv_wc * 676 process_rmpp_data(struct ib_mad_agent_private *agent, 677 struct ib_mad_recv_wc *mad_recv_wc) 678 { 679 struct ib_rmpp_hdr *rmpp_hdr; 680 u8 rmpp_status; 681 682 rmpp_hdr = &((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr; 683 684 if (rmpp_hdr->rmpp_status) { 685 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_STATUS; 686 goto bad; 687 } 688 689 if (rmpp_hdr->seg_num == __constant_htonl(1)) { 690 if (!(ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST)) { 691 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; 692 goto bad; 693 } 694 return start_rmpp(agent, mad_recv_wc); 695 } else { 696 if (ib_get_rmpp_flags(rmpp_hdr) & IB_MGMT_RMPP_FLAG_FIRST) { 697 rmpp_status = IB_MGMT_RMPP_STATUS_BAD_SEG; 698 goto bad; 699 } 700 return continue_rmpp(agent, mad_recv_wc); 701 } 702 bad: 703 nack_recv(agent, mad_recv_wc, rmpp_status); 704 ib_free_recv_mad(mad_recv_wc); 705 return NULL; 706 } 707 708 static void process_rmpp_stop(struct ib_mad_agent_private *agent, 709 struct ib_mad_recv_wc *mad_recv_wc) 710 { 711 struct ib_rmpp_mad *rmpp_mad; 712 713 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 714 715 if (rmpp_mad->rmpp_hdr.rmpp_status != IB_MGMT_RMPP_STATUS_RESX) { 716 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 717 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 718 } else 719 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); 720 } 721 722 static void process_rmpp_abort(struct ib_mad_agent_private *agent, 723 struct ib_mad_recv_wc *mad_recv_wc) 724 { 725 struct ib_rmpp_mad *rmpp_mad; 726 727 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 728 729 if (rmpp_mad->rmpp_hdr.rmpp_status < IB_MGMT_RMPP_STATUS_ABORT_MIN || 730 rmpp_mad->rmpp_hdr.rmpp_status > IB_MGMT_RMPP_STATUS_ABORT_MAX) { 731 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 732 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BAD_STATUS); 733 } else 734 abort_send(agent, mad_recv_wc, rmpp_mad->rmpp_hdr.rmpp_status); 735 } 736 737 struct ib_mad_recv_wc * 738 ib_process_rmpp_recv_wc(struct ib_mad_agent_private *agent, 739 struct ib_mad_recv_wc *mad_recv_wc) 740 { 741 struct ib_rmpp_mad *rmpp_mad; 742 743 rmpp_mad = (struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad; 744 if (!(rmpp_mad->rmpp_hdr.rmpp_rtime_flags & IB_MGMT_RMPP_FLAG_ACTIVE)) 745 return mad_recv_wc; 746 747 if (rmpp_mad->rmpp_hdr.rmpp_version != IB_MGMT_RMPP_VERSION) { 748 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); 749 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_UNV); 750 goto out; 751 } 752 753 switch (rmpp_mad->rmpp_hdr.rmpp_type) { 754 case IB_MGMT_RMPP_TYPE_DATA: 755 return process_rmpp_data(agent, mad_recv_wc); 756 case IB_MGMT_RMPP_TYPE_ACK: 757 process_rmpp_ack(agent, mad_recv_wc); 758 break; 759 case IB_MGMT_RMPP_TYPE_STOP: 760 process_rmpp_stop(agent, mad_recv_wc); 761 break; 762 case IB_MGMT_RMPP_TYPE_ABORT: 763 process_rmpp_abort(agent, mad_recv_wc); 764 break; 765 default: 766 abort_send(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); 767 nack_recv(agent, mad_recv_wc, IB_MGMT_RMPP_STATUS_BADT); 768 break; 769 } 770 out: 771 ib_free_recv_mad(mad_recv_wc); 772 return NULL; 773 } 774 775 int ib_send_rmpp_mad(struct ib_mad_send_wr_private *mad_send_wr) 776 { 777 struct ib_rmpp_mad *rmpp_mad; 778 int ret; 779 780 rmpp_mad = mad_send_wr->send_buf.mad; 781 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 782 IB_MGMT_RMPP_FLAG_ACTIVE)) 783 return IB_RMPP_RESULT_UNHANDLED; 784 785 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { 786 mad_send_wr->seg_num = 1; 787 return IB_RMPP_RESULT_INTERNAL; 788 } 789 790 mad_send_wr->newwin = 1; 791 792 /* We need to wait for the final ACK even if there isn't a response */ 793 mad_send_wr->refcount += (mad_send_wr->timeout == 0); 794 ret = send_next_seg(mad_send_wr); 795 if (!ret) 796 return IB_RMPP_RESULT_CONSUMED; 797 return ret; 798 } 799 800 int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, 801 struct ib_mad_send_wc *mad_send_wc) 802 { 803 struct ib_rmpp_mad *rmpp_mad; 804 int ret; 805 806 rmpp_mad = mad_send_wr->send_buf.mad; 807 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 808 IB_MGMT_RMPP_FLAG_ACTIVE)) 809 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ 810 811 if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) 812 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ 813 814 if (mad_send_wc->status != IB_WC_SUCCESS || 815 mad_send_wr->status != IB_WC_SUCCESS) 816 return IB_RMPP_RESULT_PROCESSED; /* Canceled or send error */ 817 818 if (!mad_send_wr->timeout) 819 return IB_RMPP_RESULT_PROCESSED; /* Response received */ 820 821 if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) { 822 mad_send_wr->timeout = 823 msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); 824 return IB_RMPP_RESULT_PROCESSED; /* Send done */ 825 } 826 827 if (mad_send_wr->seg_num == mad_send_wr->newwin || 828 mad_send_wr->seg_num == mad_send_wr->send_buf.seg_count) 829 return IB_RMPP_RESULT_PROCESSED; /* Wait for ACK */ 830 831 ret = send_next_seg(mad_send_wr); 832 if (ret) { 833 mad_send_wc->status = IB_WC_GENERAL_ERR; 834 return IB_RMPP_RESULT_PROCESSED; 835 } 836 return IB_RMPP_RESULT_CONSUMED; 837 } 838 839 int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr) 840 { 841 struct ib_rmpp_mad *rmpp_mad; 842 int ret; 843 844 rmpp_mad = mad_send_wr->send_buf.mad; 845 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 846 IB_MGMT_RMPP_FLAG_ACTIVE)) 847 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ 848 849 if (mad_send_wr->last_ack == mad_send_wr->send_buf.seg_count) 850 return IB_RMPP_RESULT_PROCESSED; 851 852 mad_send_wr->seg_num = mad_send_wr->last_ack; 853 mad_send_wr->cur_seg = mad_send_wr->last_ack_seg; 854 855 ret = send_next_seg(mad_send_wr); 856 if (ret) 857 return IB_RMPP_RESULT_PROCESSED; 858 859 return IB_RMPP_RESULT_CONSUMED; 860 } 861