verbs.c (d6989d4bbe6c4d1c2a76696833a07f044e85694d) verbs.c (ed082d36a7b2c27d1cda55fdfb28af18040c4a89)
1/*
2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:

--- 37 unchanged lines hidden (view full) ---

46 * o connections
47 * o buffer memory
48 */
49
50#include <linux/interrupt.h>
51#include <linux/slab.h>
52#include <linux/prefetch.h>
53#include <linux/sunrpc/addr.h>
1/*
2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
8 * license below:

--- 37 unchanged lines hidden (view full) ---

46 * o connections
47 * o buffer memory
48 */
49
50#include <linux/interrupt.h>
51#include <linux/slab.h>
52#include <linux/prefetch.h>
53#include <linux/sunrpc/addr.h>
54#include <linux/sunrpc/svc_rdma.h>
55#include <asm/bitops.h>
56#include <linux/module.h> /* try_module_get()/module_put() */
57
58#include "xprt_rdma.h"
59
60/*
61 * Globals/Macros
62 */

--- 319 unchanged lines hidden (view full) ---

382
383 ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
384 if (IS_ERR(ia->ri_id)) {
385 rc = PTR_ERR(ia->ri_id);
386 goto out1;
387 }
388 ia->ri_device = ia->ri_id->device;
389
54#include <asm/bitops.h>
55#include <linux/module.h> /* try_module_get()/module_put() */
56
57#include "xprt_rdma.h"
58
59/*
60 * Globals/Macros
61 */

--- 319 unchanged lines hidden (view full) ---

381
382 ia->ri_id = rpcrdma_create_id(xprt, ia, addr);
383 if (IS_ERR(ia->ri_id)) {
384 rc = PTR_ERR(ia->ri_id);
385 goto out1;
386 }
387 ia->ri_device = ia->ri_id->device;
388
390 ia->ri_pd = ib_alloc_pd(ia->ri_device);
389 ia->ri_pd = ib_alloc_pd(ia->ri_device, 0);
391 if (IS_ERR(ia->ri_pd)) {
392 rc = PTR_ERR(ia->ri_pd);
393 pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
394 goto out2;
395 }
396
397 switch (memreg) {
398 case RPCRDMA_FRMR:

--- 520 unchanged lines hidden (view full) ---

919 rc = PTR_ERR(req);
920 goto out;
921 }
922 req->rl_backchannel = false;
923 list_add(&req->rl_free, &buf->rb_send_bufs);
924 }
925
926 INIT_LIST_HEAD(&buf->rb_recv_bufs);
390 if (IS_ERR(ia->ri_pd)) {
391 rc = PTR_ERR(ia->ri_pd);
392 pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
393 goto out2;
394 }
395
396 switch (memreg) {
397 case RPCRDMA_FRMR:

--- 520 unchanged lines hidden (view full) ---

918 rc = PTR_ERR(req);
919 goto out;
920 }
921 req->rl_backchannel = false;
922 list_add(&req->rl_free, &buf->rb_send_bufs);
923 }
924
925 INIT_LIST_HEAD(&buf->rb_recv_bufs);
927 for (i = 0; i < buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; i++) {
926 for (i = 0; i < buf->rb_max_requests; i++) {
928 struct rpcrdma_rep *rep;
929
930 rep = rpcrdma_create_rep(r_xprt);
931 if (IS_ERR(rep)) {
932 dprintk("RPC: %s: reply buffer %d alloc failed\n",
933 __func__, i);
934 rc = PTR_ERR(rep);
935 goto out;

--- 78 unchanged lines hidden (view full) ---

1014 cancel_delayed_work_sync(&buf->rb_recovery_worker);
1015
1016 while (!list_empty(&buf->rb_recv_bufs)) {
1017 struct rpcrdma_rep *rep;
1018
1019 rep = rpcrdma_buffer_get_rep_locked(buf);
1020 rpcrdma_destroy_rep(ia, rep);
1021 }
927 struct rpcrdma_rep *rep;
928
929 rep = rpcrdma_create_rep(r_xprt);
930 if (IS_ERR(rep)) {
931 dprintk("RPC: %s: reply buffer %d alloc failed\n",
932 __func__, i);
933 rc = PTR_ERR(rep);
934 goto out;

--- 78 unchanged lines hidden (view full) ---

1013 cancel_delayed_work_sync(&buf->rb_recovery_worker);
1014
1015 while (!list_empty(&buf->rb_recv_bufs)) {
1016 struct rpcrdma_rep *rep;
1017
1018 rep = rpcrdma_buffer_get_rep_locked(buf);
1019 rpcrdma_destroy_rep(ia, rep);
1020 }
1022 buf->rb_send_count = 0;
1023
1024 spin_lock(&buf->rb_reqslock);
1025 while (!list_empty(&buf->rb_allreqs)) {
1026 struct rpcrdma_req *req;
1027
1028 req = list_first_entry(&buf->rb_allreqs,
1029 struct rpcrdma_req, rl_all);
1030 list_del(&req->rl_all);
1031
1032 spin_unlock(&buf->rb_reqslock);
1033 rpcrdma_destroy_req(ia, req);
1034 spin_lock(&buf->rb_reqslock);
1035 }
1036 spin_unlock(&buf->rb_reqslock);
1021
1022 spin_lock(&buf->rb_reqslock);
1023 while (!list_empty(&buf->rb_allreqs)) {
1024 struct rpcrdma_req *req;
1025
1026 req = list_first_entry(&buf->rb_allreqs,
1027 struct rpcrdma_req, rl_all);
1028 list_del(&req->rl_all);
1029
1030 spin_unlock(&buf->rb_reqslock);
1031 rpcrdma_destroy_req(ia, req);
1032 spin_lock(&buf->rb_reqslock);
1033 }
1034 spin_unlock(&buf->rb_reqslock);
1037 buf->rb_recv_count = 0;
1038
1039 rpcrdma_destroy_mrs(buf);
1040}
1041
1042struct rpcrdma_mw *
1043rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
1044{
1045 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;

--- 26 unchanged lines hidden (view full) ---

1072{
1073 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1074
1075 spin_lock(&buf->rb_mwlock);
1076 list_add_tail(&mw->mw_list, &buf->rb_mws);
1077 spin_unlock(&buf->rb_mwlock);
1078}
1079
1035
1036 rpcrdma_destroy_mrs(buf);
1037}
1038
1039struct rpcrdma_mw *
1040rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
1041{
1042 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;

--- 26 unchanged lines hidden (view full) ---

1069{
1070 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1071
1072 spin_lock(&buf->rb_mwlock);
1073 list_add_tail(&mw->mw_list, &buf->rb_mws);
1074 spin_unlock(&buf->rb_mwlock);
1075}
1076
1080static struct rpcrdma_rep *
1081rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers)
1082{
1083 /* If an RPC previously completed without a reply (say, a
1084 * credential problem or a soft timeout occurs) then hold off
1085 * on supplying more Receive buffers until the number of new
1086 * pending RPCs catches up to the number of posted Receives.
1087 */
1088 if (unlikely(buffers->rb_send_count < buffers->rb_recv_count))
1089 return NULL;
1090
1091 if (unlikely(list_empty(&buffers->rb_recv_bufs)))
1092 return NULL;
1093 buffers->rb_recv_count++;
1094 return rpcrdma_buffer_get_rep_locked(buffers);
1095}
1096
1097/*
1098 * Get a set of request/reply buffers.
1077/*
1078 * Get a set of request/reply buffers.
1099 *
1100 * Reply buffer (if available) is attached to send buffer upon return.
1101 */
1102struct rpcrdma_req *
1103rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1104{
1105 struct rpcrdma_req *req;
1106
1107 spin_lock(&buffers->rb_lock);
1108 if (list_empty(&buffers->rb_send_bufs))
1109 goto out_reqbuf;
1079 */
1080struct rpcrdma_req *
1081rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1082{
1083 struct rpcrdma_req *req;
1084
1085 spin_lock(&buffers->rb_lock);
1086 if (list_empty(&buffers->rb_send_bufs))
1087 goto out_reqbuf;
1110 buffers->rb_send_count++;
1111 req = rpcrdma_buffer_get_req_locked(buffers);
1088 req = rpcrdma_buffer_get_req_locked(buffers);
1112 req->rl_reply = rpcrdma_buffer_get_rep(buffers);
1089 if (list_empty(&buffers->rb_recv_bufs))
1090 goto out_repbuf;
1091 req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
1113 spin_unlock(&buffers->rb_lock);
1114 return req;
1115
1116out_reqbuf:
1117 spin_unlock(&buffers->rb_lock);
1092 spin_unlock(&buffers->rb_lock);
1093 return req;
1094
1095out_reqbuf:
1096 spin_unlock(&buffers->rb_lock);
1118 pr_warn("RPC: %s: out of request buffers\n", __func__);
1097 pr_warn("rpcrdma: out of request buffers (%p)\n", buffers);
1119 return NULL;
1098 return NULL;
1099out_repbuf:
1100 list_add(&req->rl_free, &buffers->rb_send_bufs);
1101 spin_unlock(&buffers->rb_lock);
1102 pr_warn("rpcrdma: out of reply buffers (%p)\n", buffers);
1103 return NULL;
1120}
1121
1122/*
1123 * Put request/reply buffers back into pool.
1124 * Pre-decrement counter/array index.
1125 */
1126void
1127rpcrdma_buffer_put(struct rpcrdma_req *req)
1128{
1129 struct rpcrdma_buffer *buffers = req->rl_buffer;
1130 struct rpcrdma_rep *rep = req->rl_reply;
1131
1132 req->rl_niovs = 0;
1133 req->rl_reply = NULL;
1134
1135 spin_lock(&buffers->rb_lock);
1104}
1105
1106/*
1107 * Put request/reply buffers back into pool.
1108 * Pre-decrement counter/array index.
1109 */
1110void
1111rpcrdma_buffer_put(struct rpcrdma_req *req)
1112{
1113 struct rpcrdma_buffer *buffers = req->rl_buffer;
1114 struct rpcrdma_rep *rep = req->rl_reply;
1115
1116 req->rl_niovs = 0;
1117 req->rl_reply = NULL;
1118
1119 spin_lock(&buffers->rb_lock);
1136 buffers->rb_send_count--;
1137 list_add_tail(&req->rl_free, &buffers->rb_send_bufs);
1120 list_add_tail(&req->rl_free, &buffers->rb_send_bufs);
1138 if (rep) {
1139 buffers->rb_recv_count--;
1121 if (rep)
1140 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1122 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1141 }
1142 spin_unlock(&buffers->rb_lock);
1143}
1144
1145/*
1146 * Recover reply buffers from pool.
1147 * This happens when recovering from disconnect.
1148 */
1149void
1150rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1151{
1152 struct rpcrdma_buffer *buffers = req->rl_buffer;
1153
1154 spin_lock(&buffers->rb_lock);
1123 spin_unlock(&buffers->rb_lock);
1124}
1125
1126/*
1127 * Recover reply buffers from pool.
1128 * This happens when recovering from disconnect.
1129 */
1130void
1131rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1132{
1133 struct rpcrdma_buffer *buffers = req->rl_buffer;
1134
1135 spin_lock(&buffers->rb_lock);
1155 req->rl_reply = rpcrdma_buffer_get_rep(buffers);
1136 if (!list_empty(&buffers->rb_recv_bufs))
1137 req->rl_reply = rpcrdma_buffer_get_rep_locked(buffers);
1156 spin_unlock(&buffers->rb_lock);
1157}
1158
1159/*
1160 * Put reply buffers back into pool when not attached to
1161 * request. This happens in error conditions.
1162 */
1163void
1164rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1165{
1166 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1167
1168 spin_lock(&buffers->rb_lock);
1138 spin_unlock(&buffers->rb_lock);
1139}
1140
1141/*
1142 * Put reply buffers back into pool when not attached to
1143 * request. This happens in error conditions.
1144 */
1145void
1146rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1147{
1148 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1149
1150 spin_lock(&buffers->rb_lock);
1169 buffers->rb_recv_count--;
1170 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1171 spin_unlock(&buffers->rb_lock);
1172}
1173
1174/*
1175 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1176 */
1177

--- 185 unchanged lines hidden ---
1151 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1152 spin_unlock(&buffers->rb_lock);
1153}
1154
1155/*
1156 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1157 */
1158

--- 185 unchanged lines hidden ---