Lines Matching full:sc

18 	struct smbdirect_socket *sc = &conn->socket;  in smbd_get_parameters()  local
20 return &sc->parameters; in smbd_get_parameters()
24 struct smbdirect_socket *sc);
26 struct smbdirect_socket *sc,
28 static int allocate_receive_buffers(struct smbdirect_socket *sc, int num_buf);
29 static void destroy_receive_buffers(struct smbdirect_socket *sc);
32 struct smbdirect_socket *sc,
35 struct smbdirect_socket *sc);
38 struct smbdirect_socket *sc,
41 static int smbd_post_send_empty(struct smbdirect_socket *sc);
43 static void destroy_mr_list(struct smbdirect_socket *sc);
44 static int allocate_mr_list(struct smbdirect_socket *sc);
168 static void smbd_disconnect_wake_up_all(struct smbdirect_socket *sc) in smbd_disconnect_wake_up_all() argument
174 wake_up_all(&sc->status_wait); in smbd_disconnect_wake_up_all()
175 wake_up_all(&sc->send_io.lcredits.wait_queue); in smbd_disconnect_wake_up_all()
176 wake_up_all(&sc->send_io.credits.wait_queue); in smbd_disconnect_wake_up_all()
177 wake_up_all(&sc->send_io.pending.dec_wait_queue); in smbd_disconnect_wake_up_all()
178 wake_up_all(&sc->send_io.pending.zero_wait_queue); in smbd_disconnect_wake_up_all()
179 wake_up_all(&sc->recv_io.reassembly.wait_queue); in smbd_disconnect_wake_up_all()
180 wake_up_all(&sc->mr_io.ready.wait_queue); in smbd_disconnect_wake_up_all()
181 wake_up_all(&sc->mr_io.cleanup.wait_queue); in smbd_disconnect_wake_up_all()
186 struct smbdirect_socket *sc = in smbd_disconnect_rdma_work() local
194 disable_work(&sc->disconnect_work); in smbd_disconnect_rdma_work()
195 disable_work(&sc->recv_io.posted.refill_work); in smbd_disconnect_rdma_work()
196 disable_work(&sc->mr_io.recovery_work); in smbd_disconnect_rdma_work()
197 disable_work(&sc->idle.immediate_work); in smbd_disconnect_rdma_work()
198 disable_delayed_work(&sc->idle.timer_work); in smbd_disconnect_rdma_work()
200 if (sc->first_error == 0) in smbd_disconnect_rdma_work()
201 sc->first_error = -ECONNABORTED; in smbd_disconnect_rdma_work()
203 switch (sc->status) { in smbd_disconnect_rdma_work()
209 sc->status = SMBDIRECT_SOCKET_DISCONNECTING; in smbd_disconnect_rdma_work()
210 rdma_disconnect(sc->rdma.cm_id); in smbd_disconnect_rdma_work()
227 sc->status = SMBDIRECT_SOCKET_DISCONNECTED; in smbd_disconnect_rdma_work()
240 smbd_disconnect_wake_up_all(sc); in smbd_disconnect_rdma_work()
243 static void smbd_disconnect_rdma_connection(struct smbdirect_socket *sc) in smbd_disconnect_rdma_connection() argument
250 disable_work(&sc->recv_io.posted.refill_work); in smbd_disconnect_rdma_connection()
251 disable_work(&sc->mr_io.recovery_work); in smbd_disconnect_rdma_connection()
252 disable_work(&sc->idle.immediate_work); in smbd_disconnect_rdma_connection()
253 disable_delayed_work(&sc->idle.timer_work); in smbd_disconnect_rdma_connection()
255 if (sc->first_error == 0) in smbd_disconnect_rdma_connection()
256 sc->first_error = -ECONNABORTED; in smbd_disconnect_rdma_connection()
258 switch (sc->status) { in smbd_disconnect_rdma_connection()
274 sc->status = SMBDIRECT_SOCKET_RESOLVE_ADDR_FAILED; in smbd_disconnect_rdma_connection()
279 sc->status = SMBDIRECT_SOCKET_RESOLVE_ROUTE_FAILED; in smbd_disconnect_rdma_connection()
284 sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_FAILED; in smbd_disconnect_rdma_connection()
289 sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED; in smbd_disconnect_rdma_connection()
293 sc->status = SMBDIRECT_SOCKET_DISCONNECTED; in smbd_disconnect_rdma_connection()
297 sc->status = SMBDIRECT_SOCKET_ERROR; in smbd_disconnect_rdma_connection()
305 smbd_disconnect_wake_up_all(sc); in smbd_disconnect_rdma_connection()
307 queue_work(sc->workqueue, &sc->disconnect_work); in smbd_disconnect_rdma_connection()
314 struct smbdirect_socket *sc = id->context; in smbd_conn_upcall() local
315 struct smbdirect_socket_parameters *sp = &sc->parameters; in smbd_conn_upcall()
325 WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING); in smbd_conn_upcall()
326 sc->status = SMBDIRECT_SOCKET_RESOLVE_ROUTE_NEEDED; in smbd_conn_upcall()
327 wake_up(&sc->status_wait); in smbd_conn_upcall()
331 WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING); in smbd_conn_upcall()
332 sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED; in smbd_conn_upcall()
333 wake_up(&sc->status_wait); in smbd_conn_upcall()
338 WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING); in smbd_conn_upcall()
339 sc->status = SMBDIRECT_SOCKET_RESOLVE_ADDR_FAILED; in smbd_conn_upcall()
340 smbd_disconnect_rdma_work(&sc->disconnect_work); in smbd_conn_upcall()
345 WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING); in smbd_conn_upcall()
346 sc->status = SMBDIRECT_SOCKET_RESOLVE_ROUTE_FAILED; in smbd_conn_upcall()
347 smbd_disconnect_rdma_work(&sc->disconnect_work); in smbd_conn_upcall()
411 sc->rdma.legacy_iwarp = true; in smbd_conn_upcall()
431 WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING); in smbd_conn_upcall()
432 sc->status = SMBDIRECT_SOCKET_NEGOTIATE_NEEDED; in smbd_conn_upcall()
433 wake_up(&sc->status_wait); in smbd_conn_upcall()
440 WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING); in smbd_conn_upcall()
441 sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_FAILED; in smbd_conn_upcall()
442 smbd_disconnect_rdma_work(&sc->disconnect_work); in smbd_conn_upcall()
448 if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_FAILED) { in smbd_conn_upcall()
452 sc->status = SMBDIRECT_SOCKET_DISCONNECTED; in smbd_conn_upcall()
453 smbd_disconnect_rdma_work(&sc->disconnect_work); in smbd_conn_upcall()
469 struct smbdirect_socket *sc = context; in smbd_qp_async_error_upcall() local
472 ib_event_msg(event->event), event->device->name, sc); in smbd_qp_async_error_upcall()
477 smbd_disconnect_rdma_connection(sc); in smbd_qp_async_error_upcall()
501 struct smbdirect_socket *sc = request->socket; in send_done() local
508 ib_dma_unmap_single(sc->ib.dev, in send_done()
512 mempool_free(request, sc->send_io.mem.pool); in send_done()
519 smbd_disconnect_rdma_connection(sc); in send_done()
523 atomic_add(lcredits, &sc->send_io.lcredits.count); in send_done()
524 wake_up(&sc->send_io.lcredits.wait_queue); in send_done()
526 if (atomic_dec_and_test(&sc->send_io.pending.count)) in send_done()
527 wake_up(&sc->send_io.pending.zero_wait_queue); in send_done()
529 wake_up(&sc->send_io.pending.dec_wait_queue); in send_done()
550 struct smbdirect_socket *sc = response->socket; in process_negotiation_response() local
551 struct smbdirect_socket_parameters *sp = &sc->parameters; in process_negotiation_response()
570 sc->recv_io.credits.target = le16_to_cpu(packet->credits_requested); in process_negotiation_response()
571 sc->recv_io.credits.target = min_t(u16, sc->recv_io.credits.target, sp->recv_credit_max); in process_negotiation_response()
577 atomic_set(&sc->send_io.lcredits.count, sp->send_credit_target); in process_negotiation_response()
578 atomic_set(&sc->send_io.credits.count, le16_to_cpu(packet->credits_granted)); in process_negotiation_response()
610 sc->recv_io.expected = SMBDIRECT_EXPECT_DATA_TRANSFER; in process_negotiation_response()
618 struct smbdirect_socket *sc = in smbd_post_send_credits() local
621 if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { in smbd_post_send_credits()
625 if (sc->recv_io.credits.target > in smbd_post_send_credits()
626 atomic_read(&sc->recv_io.credits.count)) { in smbd_post_send_credits()
628 response = get_receive_buffer(sc); in smbd_post_send_credits()
633 rc = smbd_post_recv(sc, response); in smbd_post_send_credits()
637 put_receive_buffer(sc, response); in smbd_post_send_credits()
641 atomic_inc(&sc->recv_io.posted.count); in smbd_post_send_credits()
646 if (atomic_read(&sc->recv_io.credits.count) < in smbd_post_send_credits()
647 sc->recv_io.credits.target - 1) { in smbd_post_send_credits()
649 queue_work(sc->workqueue, &sc->idle.immediate_work); in smbd_post_send_credits()
659 struct smbdirect_socket *sc = response->socket; in recv_done() local
660 struct smbdirect_socket_parameters *sp = &sc->parameters; in recv_done()
669 response, sc->recv_io.expected, in recv_done()
690 sc->idle.keepalive = SMBDIRECT_KEEPALIVE_NONE; in recv_done()
691 mod_delayed_work(sc->workqueue, &sc->idle.timer_work, in recv_done()
694 switch (sc->recv_io.expected) { in recv_done()
698 sc->recv_io.reassembly.full_packet_received = true; in recv_done()
701 put_receive_buffer(sc, response); in recv_done()
702 WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_NEGOTIATE_RUNNING); in recv_done()
704 sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED; in recv_done()
705 smbd_disconnect_rdma_connection(sc); in recv_done()
707 sc->status = SMBDIRECT_SOCKET_CONNECTED; in recv_done()
708 wake_up(&sc->status_wait); in recv_done()
734 if (sc->recv_io.reassembly.full_packet_received) in recv_done()
738 sc->recv_io.reassembly.full_packet_received = false; in recv_done()
740 sc->recv_io.reassembly.full_packet_received = true; in recv_done()
743 atomic_dec(&sc->recv_io.posted.count); in recv_done()
744 atomic_dec(&sc->recv_io.credits.count); in recv_done()
745 old_recv_credit_target = sc->recv_io.credits.target; in recv_done()
746 sc->recv_io.credits.target = in recv_done()
748 sc->recv_io.credits.target = in recv_done()
749 min_t(u16, sc->recv_io.credits.target, sp->recv_credit_max); in recv_done()
750 sc->recv_io.credits.target = in recv_done()
751 max_t(u16, sc->recv_io.credits.target, 1); in recv_done()
754 &sc->send_io.credits.count); in recv_done()
759 wake_up(&sc->send_io.credits.wait_queue); in recv_done()
772 queue_work(sc->workqueue, &sc->idle.immediate_work); in recv_done()
780 if (sc->recv_io.credits.target > old_recv_credit_target) in recv_done()
781 queue_work(sc->workqueue, &sc->recv_io.posted.refill_work); in recv_done()
783 enqueue_reassembly(sc, response, data_length); in recv_done()
784 wake_up(&sc->recv_io.reassembly.wait_queue); in recv_done()
786 put_receive_buffer(sc, response); in recv_done()
798 log_rdma_recv(ERR, "unexpected response type=%d\n", sc->recv_io.expected); in recv_done()
799 WARN_ON_ONCE(sc->recv_io.expected != SMBDIRECT_EXPECT_DATA_TRANSFER); in recv_done()
801 put_receive_buffer(sc, response); in recv_done()
802 smbd_disconnect_rdma_connection(sc); in recv_done()
806 struct smbdirect_socket *sc, in smbd_create_id() argument
809 struct smbdirect_socket_parameters *sp = &sc->parameters; in smbd_create_id()
814 id = rdma_create_id(&init_net, smbd_conn_upcall, sc, in smbd_create_id()
829 WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RESOLVE_ADDR_NEEDED); in smbd_create_id()
830 sc->status = SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING; in smbd_create_id()
838 sc->status_wait, in smbd_create_id()
839 sc->status != SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING, in smbd_create_id()
846 if (sc->status == SMBDIRECT_SOCKET_RESOLVE_ADDR_RUNNING) { in smbd_create_id()
851 if (sc->status != SMBDIRECT_SOCKET_RESOLVE_ROUTE_NEEDED) { in smbd_create_id()
857 WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RESOLVE_ROUTE_NEEDED); in smbd_create_id()
858 sc->status = SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING; in smbd_create_id()
865 sc->status_wait, in smbd_create_id()
866 sc->status != SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING, in smbd_create_id()
873 if (sc->status == SMBDIRECT_SOCKET_RESOLVE_ROUTE_RUNNING) { in smbd_create_id()
878 if (sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED) { in smbd_create_id()
906 struct smbdirect_socket *sc, in smbd_ia_open() argument
909 struct smbdirect_socket_parameters *sp = &sc->parameters; in smbd_ia_open()
912 WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_CREATED); in smbd_ia_open()
913 sc->status = SMBDIRECT_SOCKET_RESOLVE_ADDR_NEEDED; in smbd_ia_open()
915 sc->rdma.cm_id = smbd_create_id(sc, dstaddr, port); in smbd_ia_open()
916 if (IS_ERR(sc->rdma.cm_id)) { in smbd_ia_open()
917 rc = PTR_ERR(sc->rdma.cm_id); in smbd_ia_open()
920 sc->ib.dev = sc->rdma.cm_id->device; in smbd_ia_open()
922 if (!frwr_is_supported(&sc->ib.dev->attrs)) { in smbd_ia_open()
925 sc->ib.dev->attrs.device_cap_flags, in smbd_ia_open()
926 sc->ib.dev->attrs.max_fast_reg_page_list_len); in smbd_ia_open()
932 sc->ib.dev->attrs.max_fast_reg_page_list_len); in smbd_ia_open()
933 sc->mr_io.type = IB_MR_TYPE_MEM_REG; in smbd_ia_open()
934 if (sc->ib.dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) in smbd_ia_open()
935 sc->mr_io.type = IB_MR_TYPE_SG_GAPS; in smbd_ia_open()
940 rdma_destroy_id(sc->rdma.cm_id); in smbd_ia_open()
941 sc->rdma.cm_id = NULL; in smbd_ia_open()
953 static int smbd_post_send_negotiate_req(struct smbdirect_socket *sc) in smbd_post_send_negotiate_req() argument
955 struct smbdirect_socket_parameters *sp = &sc->parameters; in smbd_post_send_negotiate_req()
961 request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL); in smbd_post_send_negotiate_req()
965 request->socket = sc; in smbd_post_send_negotiate_req()
979 sc->ib.dev, (void *)packet, in smbd_post_send_negotiate_req()
981 if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) { in smbd_post_send_negotiate_req()
987 request->sge[0].lkey = sc->ib.pd->local_dma_lkey; in smbd_post_send_negotiate_req()
990 sc->ib.dev, request->sge[0].addr, in smbd_post_send_negotiate_req()
1006 atomic_inc(&sc->send_io.pending.count); in smbd_post_send_negotiate_req()
1007 rc = ib_post_send(sc->ib.qp, &send_wr, NULL); in smbd_post_send_negotiate_req()
1013 atomic_dec(&sc->send_io.pending.count); in smbd_post_send_negotiate_req()
1014 ib_dma_unmap_single(sc->ib.dev, request->sge[0].addr, in smbd_post_send_negotiate_req()
1017 smbd_disconnect_rdma_connection(sc); in smbd_post_send_negotiate_req()
1020 mempool_free(request, sc->send_io.mem.pool); in smbd_post_send_negotiate_req()
1032 static int manage_credits_prior_sending(struct smbdirect_socket *sc) in manage_credits_prior_sending() argument
1036 if (atomic_read(&sc->recv_io.credits.count) >= sc->recv_io.credits.target) in manage_credits_prior_sending()
1039 new_credits = atomic_read(&sc->recv_io.posted.count); in manage_credits_prior_sending()
1043 new_credits -= atomic_read(&sc->recv_io.credits.count); in manage_credits_prior_sending()
1059 static int manage_keep_alive_before_sending(struct smbdirect_socket *sc) in manage_keep_alive_before_sending() argument
1061 struct smbdirect_socket_parameters *sp = &sc->parameters; in manage_keep_alive_before_sending()
1063 if (sc->idle.keepalive == SMBDIRECT_KEEPALIVE_PENDING) { in manage_keep_alive_before_sending()
1064 sc->idle.keepalive = SMBDIRECT_KEEPALIVE_SENT; in manage_keep_alive_before_sending()
1069 mod_delayed_work(sc->workqueue, &sc->idle.timer_work, in manage_keep_alive_before_sending()
1077 static int smbd_post_send(struct smbdirect_socket *sc, in smbd_post_send() argument
1088 sc->ib.dev, in smbd_post_send()
1103 rc = ib_post_send(sc->ib.qp, &send_wr, NULL); in smbd_post_send()
1106 smbd_disconnect_rdma_connection(sc); in smbd_post_send()
1113 static int smbd_post_send_iter(struct smbdirect_socket *sc, in smbd_post_send_iter() argument
1117 struct smbdirect_socket_parameters *sp = &sc->parameters; in smbd_post_send_iter()
1127 rc = wait_event_interruptible(sc->send_io.lcredits.wait_queue, in smbd_post_send_iter()
1128 atomic_read(&sc->send_io.lcredits.count) > 0 || in smbd_post_send_iter()
1129 sc->status != SMBDIRECT_SOCKET_CONNECTED); in smbd_post_send_iter()
1133 if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { in smbd_post_send_iter()
1138 if (unlikely(atomic_dec_return(&sc->send_io.lcredits.count) < 0)) { in smbd_post_send_iter()
1139 atomic_inc(&sc->send_io.lcredits.count); in smbd_post_send_iter()
1145 rc = wait_event_interruptible(sc->send_io.credits.wait_queue, in smbd_post_send_iter()
1146 atomic_read(&sc->send_io.credits.count) > 0 || in smbd_post_send_iter()
1147 sc->status != SMBDIRECT_SOCKET_CONNECTED); in smbd_post_send_iter()
1151 if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { in smbd_post_send_iter()
1156 if (unlikely(atomic_dec_return(&sc->send_io.credits.count) < 0)) { in smbd_post_send_iter()
1157 atomic_inc(&sc->send_io.credits.count); in smbd_post_send_iter()
1161 request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL); in smbd_post_send_iter()
1167 request->socket = sc; in smbd_post_send_iter()
1177 request->sge[0].addr = ib_dma_map_single(sc->ib.dev, in smbd_post_send_iter()
1181 if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) { in smbd_post_send_iter()
1187 request->sge[0].lkey = sc->ib.pd->local_dma_lkey; in smbd_post_send_iter()
1196 .device = sc->ib.dev, in smbd_post_send_iter()
1197 .local_dma_lkey = sc->ib.pd->local_dma_lkey, in smbd_post_send_iter()
1217 new_credits = manage_credits_prior_sending(sc); in smbd_post_send_iter()
1218 atomic_add(new_credits, &sc->recv_io.credits.count); in smbd_post_send_iter()
1222 if (manage_keep_alive_before_sending(sc)) in smbd_post_send_iter()
1245 atomic_inc(&sc->send_io.pending.count); in smbd_post_send_iter()
1247 rc = smbd_post_send(sc, request); in smbd_post_send_iter()
1251 if (atomic_dec_and_test(&sc->send_io.pending.count)) in smbd_post_send_iter()
1252 wake_up(&sc->send_io.pending.zero_wait_queue); in smbd_post_send_iter()
1254 wake_up(&sc->send_io.pending.dec_wait_queue); in smbd_post_send_iter()
1259 ib_dma_unmap_single(sc->ib.dev, in smbd_post_send_iter()
1263 mempool_free(request, sc->send_io.mem.pool); in smbd_post_send_iter()
1266 atomic_sub(new_credits, &sc->recv_io.credits.count); in smbd_post_send_iter()
1269 atomic_inc(&sc->send_io.credits.count); in smbd_post_send_iter()
1270 wake_up(&sc->send_io.credits.wait_queue); in smbd_post_send_iter()
1273 atomic_inc(&sc->send_io.lcredits.count); in smbd_post_send_iter()
1274 wake_up(&sc->send_io.lcredits.wait_queue); in smbd_post_send_iter()
1285 static int smbd_post_send_empty(struct smbdirect_socket *sc) in smbd_post_send_empty() argument
1289 sc->statistics.send_empty++; in smbd_post_send_empty()
1290 return smbd_post_send_iter(sc, NULL, &remaining_data_length); in smbd_post_send_empty()
1293 static int smbd_post_send_full_iter(struct smbdirect_socket *sc, in smbd_post_send_full_iter() argument
1306 rc = smbd_post_send_iter(sc, iter, _remaining_data_length); in smbd_post_send_full_iter()
1320 struct smbdirect_socket *sc, struct smbdirect_recv_io *response) in smbd_post_recv() argument
1322 struct smbdirect_socket_parameters *sp = &sc->parameters; in smbd_post_recv()
1327 sc->ib.dev, response->packet, in smbd_post_recv()
1329 if (ib_dma_mapping_error(sc->ib.dev, response->sge.addr)) in smbd_post_recv()
1333 response->sge.lkey = sc->ib.pd->local_dma_lkey; in smbd_post_recv()
1342 rc = ib_post_recv(sc->ib.qp, &recv_wr, NULL); in smbd_post_recv()
1344 ib_dma_unmap_single(sc->ib.dev, response->sge.addr, in smbd_post_recv()
1347 smbd_disconnect_rdma_connection(sc); in smbd_post_recv()
1355 static int smbd_negotiate(struct smbdirect_socket *sc) in smbd_negotiate() argument
1357 struct smbdirect_socket_parameters *sp = &sc->parameters; in smbd_negotiate()
1359 struct smbdirect_recv_io *response = get_receive_buffer(sc); in smbd_negotiate()
1361 WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_NEGOTIATE_NEEDED); in smbd_negotiate()
1362 sc->status = SMBDIRECT_SOCKET_NEGOTIATE_RUNNING; in smbd_negotiate()
1364 sc->recv_io.expected = SMBDIRECT_EXPECT_NEGOTIATE_REP; in smbd_negotiate()
1365 rc = smbd_post_recv(sc, response); in smbd_negotiate()
1370 put_receive_buffer(sc, response); in smbd_negotiate()
1374 rc = smbd_post_send_negotiate_req(sc); in smbd_negotiate()
1379 sc->status_wait, in smbd_negotiate()
1380 sc->status != SMBDIRECT_SOCKET_NEGOTIATE_RUNNING, in smbd_negotiate()
1384 if (sc->status == SMBDIRECT_SOCKET_CONNECTED) in smbd_negotiate()
1408 struct smbdirect_socket *sc, in enqueue_reassembly() argument
1414 spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags); in enqueue_reassembly()
1415 list_add_tail(&response->list, &sc->recv_io.reassembly.list); in enqueue_reassembly()
1416 sc->recv_io.reassembly.queue_length++; in enqueue_reassembly()
1424 sc->recv_io.reassembly.data_length += data_length; in enqueue_reassembly()
1425 spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags); in enqueue_reassembly()
1426 sc->statistics.enqueue_reassembly_queue++; in enqueue_reassembly()
1434 static struct smbdirect_recv_io *_get_first_reassembly(struct smbdirect_socket *sc) in _get_first_reassembly() argument
1438 if (!list_empty(&sc->recv_io.reassembly.list)) { in _get_first_reassembly()
1440 &sc->recv_io.reassembly.list, in _get_first_reassembly()
1452 static struct smbdirect_recv_io *get_receive_buffer(struct smbdirect_socket *sc) in get_receive_buffer() argument
1457 spin_lock_irqsave(&sc->recv_io.free.lock, flags); in get_receive_buffer()
1458 if (!list_empty(&sc->recv_io.free.list)) { in get_receive_buffer()
1460 &sc->recv_io.free.list, in get_receive_buffer()
1463 sc->statistics.get_receive_buffer++; in get_receive_buffer()
1465 spin_unlock_irqrestore(&sc->recv_io.free.lock, flags); in get_receive_buffer()
1477 struct smbdirect_socket *sc, struct smbdirect_recv_io *response) in put_receive_buffer() argument
1482 ib_dma_unmap_single(sc->ib.dev, in put_receive_buffer()
1489 spin_lock_irqsave(&sc->recv_io.free.lock, flags); in put_receive_buffer()
1490 list_add_tail(&response->list, &sc->recv_io.free.list); in put_receive_buffer()
1491 sc->statistics.put_receive_buffer++; in put_receive_buffer()
1492 spin_unlock_irqrestore(&sc->recv_io.free.lock, flags); in put_receive_buffer()
1494 queue_work(sc->workqueue, &sc->recv_io.posted.refill_work); in put_receive_buffer()
1498 static int allocate_receive_buffers(struct smbdirect_socket *sc, int num_buf) in allocate_receive_buffers() argument
1504 response = mempool_alloc(sc->recv_io.mem.pool, GFP_KERNEL); in allocate_receive_buffers()
1508 response->socket = sc; in allocate_receive_buffers()
1510 list_add_tail(&response->list, &sc->recv_io.free.list); in allocate_receive_buffers()
1516 while (!list_empty(&sc->recv_io.free.list)) { in allocate_receive_buffers()
1518 &sc->recv_io.free.list, in allocate_receive_buffers()
1522 mempool_free(response, sc->recv_io.mem.pool); in allocate_receive_buffers()
1527 static void destroy_receive_buffers(struct smbdirect_socket *sc) in destroy_receive_buffers() argument
1531 while ((response = get_receive_buffer(sc))) in destroy_receive_buffers()
1532 mempool_free(response, sc->recv_io.mem.pool); in destroy_receive_buffers()
1537 struct smbdirect_socket *sc = in send_immediate_empty_message() local
1540 if (sc->status != SMBDIRECT_SOCKET_CONNECTED) in send_immediate_empty_message()
1544 smbd_post_send_empty(sc); in send_immediate_empty_message()
1550 struct smbdirect_socket *sc = in idle_connection_timer() local
1552 struct smbdirect_socket_parameters *sp = &sc->parameters; in idle_connection_timer()
1554 if (sc->idle.keepalive != SMBDIRECT_KEEPALIVE_NONE) { in idle_connection_timer()
1556 "error status sc->idle.keepalive=%d\n", in idle_connection_timer()
1557 sc->idle.keepalive); in idle_connection_timer()
1558 smbd_disconnect_rdma_connection(sc); in idle_connection_timer()
1562 if (sc->status != SMBDIRECT_SOCKET_CONNECTED) in idle_connection_timer()
1569 sc->idle.keepalive = SMBDIRECT_KEEPALIVE_PENDING; in idle_connection_timer()
1570 mod_delayed_work(sc->workqueue, &sc->idle.timer_work, in idle_connection_timer()
1573 queue_work(sc->workqueue, &sc->idle.immediate_work); in idle_connection_timer()
1584 struct smbdirect_socket *sc; in smbd_destroy() local
1592 sc = &info->socket; in smbd_destroy()
1595 disable_work_sync(&sc->disconnect_work); in smbd_destroy()
1598 if (sc->status < SMBDIRECT_SOCKET_DISCONNECTING) in smbd_destroy()
1599 smbd_disconnect_rdma_work(&sc->disconnect_work); in smbd_destroy()
1600 if (sc->status < SMBDIRECT_SOCKET_DISCONNECTED) { in smbd_destroy()
1602 wait_event(sc->status_wait, sc->status == SMBDIRECT_SOCKET_DISCONNECTED); in smbd_destroy()
1613 smbd_disconnect_wake_up_all(sc); in smbd_destroy()
1616 disable_work_sync(&sc->recv_io.posted.refill_work); in smbd_destroy()
1619 ib_drain_qp(sc->ib.qp); in smbd_destroy()
1620 rdma_destroy_qp(sc->rdma.cm_id); in smbd_destroy()
1621 sc->ib.qp = NULL; in smbd_destroy()
1624 disable_delayed_work_sync(&sc->idle.timer_work); in smbd_destroy()
1626 disable_work_sync(&sc->idle.immediate_work); in smbd_destroy()
1631 spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags); in smbd_destroy()
1632 response = _get_first_reassembly(sc); in smbd_destroy()
1636 &sc->recv_io.reassembly.lock, flags); in smbd_destroy()
1637 put_receive_buffer(sc, response); in smbd_destroy()
1640 &sc->recv_io.reassembly.lock, flags); in smbd_destroy()
1642 sc->recv_io.reassembly.data_length = 0; in smbd_destroy()
1645 destroy_receive_buffers(sc); in smbd_destroy()
1648 destroy_mr_list(sc); in smbd_destroy()
1650 ib_free_cq(sc->ib.send_cq); in smbd_destroy()
1651 ib_free_cq(sc->ib.recv_cq); in smbd_destroy()
1652 ib_dealloc_pd(sc->ib.pd); in smbd_destroy()
1653 rdma_destroy_id(sc->rdma.cm_id); in smbd_destroy()
1656 mempool_destroy(sc->send_io.mem.pool); in smbd_destroy()
1657 kmem_cache_destroy(sc->send_io.mem.cache); in smbd_destroy()
1659 mempool_destroy(sc->recv_io.mem.pool); in smbd_destroy()
1660 kmem_cache_destroy(sc->recv_io.mem.cache); in smbd_destroy()
1662 sc->status = SMBDIRECT_SOCKET_DESTROYED; in smbd_destroy()
1664 destroy_workqueue(sc->workqueue); in smbd_destroy()
1706 static void destroy_caches(struct smbdirect_socket *sc) in destroy_caches() argument
1708 destroy_receive_buffers(sc); in destroy_caches()
1709 mempool_destroy(sc->recv_io.mem.pool); in destroy_caches()
1710 kmem_cache_destroy(sc->recv_io.mem.cache); in destroy_caches()
1711 mempool_destroy(sc->send_io.mem.pool); in destroy_caches()
1712 kmem_cache_destroy(sc->send_io.mem.cache); in destroy_caches()
1716 static int allocate_caches(struct smbdirect_socket *sc) in allocate_caches() argument
1718 struct smbdirect_socket_parameters *sp = &sc->parameters; in allocate_caches()
1725 scnprintf(name, MAX_NAME_LEN, "smbdirect_send_io_%p", sc); in allocate_caches()
1726 sc->send_io.mem.cache = in allocate_caches()
1732 if (!sc->send_io.mem.cache) in allocate_caches()
1735 sc->send_io.mem.pool = in allocate_caches()
1737 mempool_free_slab, sc->send_io.mem.cache); in allocate_caches()
1738 if (!sc->send_io.mem.pool) in allocate_caches()
1741 scnprintf(name, MAX_NAME_LEN, "smbdirect_recv_io_%p", sc); in allocate_caches()
1749 sc->recv_io.mem.cache = in allocate_caches()
1753 if (!sc->recv_io.mem.cache) in allocate_caches()
1756 sc->recv_io.mem.pool = in allocate_caches()
1758 mempool_free_slab, sc->recv_io.mem.cache); in allocate_caches()
1759 if (!sc->recv_io.mem.pool) in allocate_caches()
1762 rc = allocate_receive_buffers(sc, sp->recv_credit_max); in allocate_caches()
1771 mempool_destroy(sc->recv_io.mem.pool); in allocate_caches()
1773 kmem_cache_destroy(sc->recv_io.mem.cache); in allocate_caches()
1775 mempool_destroy(sc->send_io.mem.pool); in allocate_caches()
1777 kmem_cache_destroy(sc->send_io.mem.cache); in allocate_caches()
1787 struct smbdirect_socket *sc; in _smbd_get_connection() local
1801 sc = &info->socket; in _smbd_get_connection()
1802 scnprintf(wq_name, ARRAY_SIZE(wq_name), "smbd_%p", sc); in _smbd_get_connection()
1806 smbdirect_socket_init(sc); in _smbd_get_connection()
1807 sc->workqueue = workqueue; in _smbd_get_connection()
1808 sp = &sc->parameters; in _smbd_get_connection()
1810 INIT_WORK(&sc->disconnect_work, smbd_disconnect_rdma_work); in _smbd_get_connection()
1827 rc = smbd_ia_open(sc, dstaddr, port); in _smbd_get_connection()
1833 if (sp->send_credit_target > sc->ib.dev->attrs.max_cqe || in _smbd_get_connection()
1834 sp->send_credit_target > sc->ib.dev->attrs.max_qp_wr) { in _smbd_get_connection()
1837 sc->ib.dev->attrs.max_cqe, in _smbd_get_connection()
1838 sc->ib.dev->attrs.max_qp_wr); in _smbd_get_connection()
1842 if (sp->recv_credit_max > sc->ib.dev->attrs.max_cqe || in _smbd_get_connection()
1843 sp->recv_credit_max > sc->ib.dev->attrs.max_qp_wr) { in _smbd_get_connection()
1846 sc->ib.dev->attrs.max_cqe, in _smbd_get_connection()
1847 sc->ib.dev->attrs.max_qp_wr); in _smbd_get_connection()
1851 if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_SEND_IO_MAX_SGE || in _smbd_get_connection()
1852 sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_RECV_IO_MAX_SGE) { in _smbd_get_connection()
1856 sc->ib.dev->name, in _smbd_get_connection()
1857 sc->ib.dev->attrs.max_send_sge, in _smbd_get_connection()
1858 sc->ib.dev->attrs.max_recv_sge); in _smbd_get_connection()
1864 sc->ib.dev->attrs.max_qp_rd_atom); in _smbd_get_connection()
1881 sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0); in _smbd_get_connection()
1882 if (IS_ERR(sc->ib.pd)) { in _smbd_get_connection()
1883 rc = PTR_ERR(sc->ib.pd); in _smbd_get_connection()
1884 sc->ib.pd = NULL; in _smbd_get_connection()
1889 sc->ib.send_cq = in _smbd_get_connection()
1890 ib_alloc_cq_any(sc->ib.dev, sc, in _smbd_get_connection()
1892 if (IS_ERR(sc->ib.send_cq)) { in _smbd_get_connection()
1893 sc->ib.send_cq = NULL; in _smbd_get_connection()
1897 sc->ib.recv_cq = in _smbd_get_connection()
1898 ib_alloc_cq_any(sc->ib.dev, sc, in _smbd_get_connection()
1900 if (IS_ERR(sc->ib.recv_cq)) { in _smbd_get_connection()
1901 sc->ib.recv_cq = NULL; in _smbd_get_connection()
1907 qp_attr.qp_context = sc; in _smbd_get_connection()
1911 qp_attr.send_cq = sc->ib.send_cq; in _smbd_get_connection()
1912 qp_attr.recv_cq = sc->ib.recv_cq; in _smbd_get_connection()
1915 rc = rdma_create_qp(sc->rdma.cm_id, sc->ib.pd, &qp_attr); in _smbd_get_connection()
1920 sc->ib.qp = sc->rdma.cm_id->qp; in _smbd_get_connection()
1927 sc->ib.dev->ops.get_port_immutable( in _smbd_get_connection()
1928 sc->ib.dev, sc->rdma.cm_id->port_num, &port_immutable); in _smbd_get_connection()
1946 WARN_ON_ONCE(sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_NEEDED); in _smbd_get_connection()
1947 sc->status = SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING; in _smbd_get_connection()
1948 rc = rdma_connect(sc->rdma.cm_id, &conn_param); in _smbd_get_connection()
1955 sc->status_wait, in _smbd_get_connection()
1956 sc->status != SMBDIRECT_SOCKET_RDMA_CONNECT_RUNNING, in _smbd_get_connection()
1959 if (sc->status != SMBDIRECT_SOCKET_NEGOTIATE_NEEDED) { in _smbd_get_connection()
1966 rc = allocate_caches(sc); in _smbd_get_connection()
1972 INIT_WORK(&sc->idle.immediate_work, send_immediate_empty_message); in _smbd_get_connection()
1973 INIT_DELAYED_WORK(&sc->idle.timer_work, idle_connection_timer); in _smbd_get_connection()
1978 sc->idle.keepalive = SMBDIRECT_KEEPALIVE_PENDING; in _smbd_get_connection()
1979 mod_delayed_work(sc->workqueue, &sc->idle.timer_work, in _smbd_get_connection()
1982 INIT_WORK(&sc->recv_io.posted.refill_work, smbd_post_send_credits); in _smbd_get_connection()
1984 rc = smbd_negotiate(sc); in _smbd_get_connection()
1990 rc = allocate_mr_list(sc); in _smbd_get_connection()
2005 disable_delayed_work_sync(&sc->idle.timer_work); in _smbd_get_connection()
2006 destroy_caches(sc); in _smbd_get_connection()
2007 sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED; in _smbd_get_connection()
2008 rdma_disconnect(sc->rdma.cm_id); in _smbd_get_connection()
2009 wait_event(sc->status_wait, in _smbd_get_connection()
2010 sc->status == SMBDIRECT_SOCKET_DISCONNECTED); in _smbd_get_connection()
2014 rdma_destroy_qp(sc->rdma.cm_id); in _smbd_get_connection()
2018 if (sc->ib.send_cq) in _smbd_get_connection()
2019 ib_free_cq(sc->ib.send_cq); in _smbd_get_connection()
2020 if (sc->ib.recv_cq) in _smbd_get_connection()
2021 ib_free_cq(sc->ib.recv_cq); in _smbd_get_connection()
2023 ib_dealloc_pd(sc->ib.pd); in _smbd_get_connection()
2027 rdma_destroy_id(sc->rdma.cm_id); in _smbd_get_connection()
2030 destroy_workqueue(sc->workqueue); in _smbd_get_connection()
2080 struct smbdirect_socket *sc = &info->socket; in smbd_recv() local
2097 log_read(INFO, "size=%zd sc->recv_io.reassembly.data_length=%d\n", size, in smbd_recv()
2098 sc->recv_io.reassembly.data_length); in smbd_recv()
2099 if (sc->recv_io.reassembly.data_length >= size) { in smbd_recv()
2112 queue_length = sc->recv_io.reassembly.queue_length; in smbd_recv()
2115 offset = sc->recv_io.reassembly.first_entry_offset; in smbd_recv()
2117 response = _get_first_reassembly(sc); in smbd_recv()
2163 &sc->recv_io.reassembly.lock, flags); in smbd_recv()
2166 &sc->recv_io.reassembly.lock, flags); in smbd_recv()
2169 sc->statistics.dequeue_reassembly_queue++; in smbd_recv()
2170 put_receive_buffer(sc, response); in smbd_recv()
2184 spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags); in smbd_recv()
2185 sc->recv_io.reassembly.data_length -= data_read; in smbd_recv()
2186 sc->recv_io.reassembly.queue_length -= queue_removed; in smbd_recv()
2187 spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags); in smbd_recv()
2189 sc->recv_io.reassembly.first_entry_offset = offset; in smbd_recv()
2191 data_read, sc->recv_io.reassembly.data_length, in smbd_recv()
2192 sc->recv_io.reassembly.first_entry_offset); in smbd_recv()
2199 sc->recv_io.reassembly.wait_queue, in smbd_recv()
2200 sc->recv_io.reassembly.data_length >= size || in smbd_recv()
2201 sc->status != SMBDIRECT_SOCKET_CONNECTED); in smbd_recv()
2206 if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { in smbd_recv()
2224 struct smbdirect_socket *sc = &info->socket; in smbd_send() local
2225 struct smbdirect_socket_parameters *sp = &sc->parameters; in smbd_send()
2231 if (sc->status != SMBDIRECT_SOCKET_CONNECTED) in smbd_send()
2272 rc = smbd_post_send_full_iter(sc, &iter, &remaining_data_length); in smbd_send()
2278 rc = smbd_post_send_full_iter(sc, &rqst->rq_iter, in smbd_send()
2293 wait_event(sc->send_io.pending.zero_wait_queue, in smbd_send()
2294 atomic_read(&sc->send_io.pending.count) == 0 || in smbd_send()
2295 sc->status != SMBDIRECT_SOCKET_CONNECTED); in smbd_send()
2297 if (sc->status != SMBDIRECT_SOCKET_CONNECTED && rc == 0) in smbd_send()
2307 struct smbdirect_socket *sc = mr->socket; in register_mr_done() local
2311 smbd_disconnect_rdma_connection(sc); in register_mr_done()
2326 struct smbdirect_socket *sc = in smbd_mr_recovery_work() local
2328 struct smbdirect_socket_parameters *sp = &sc->parameters; in smbd_mr_recovery_work()
2332 list_for_each_entry(smbdirect_mr, &sc->mr_io.all.list, list) { in smbd_mr_recovery_work()
2341 smbd_disconnect_rdma_connection(sc); in smbd_mr_recovery_work()
2346 sc->ib.pd, sc->mr_io.type, in smbd_mr_recovery_work()
2350 sc->mr_io.type, in smbd_mr_recovery_work()
2352 smbd_disconnect_rdma_connection(sc); in smbd_mr_recovery_work()
2368 if (atomic_inc_return(&sc->mr_io.ready.count) == 1) in smbd_mr_recovery_work()
2369 wake_up(&sc->mr_io.ready.wait_queue); in smbd_mr_recovery_work()
2375 struct smbdirect_socket *sc = mr->socket; in smbd_mr_disable_locked() local
2385 ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir); in smbd_mr_disable_locked()
2413 static void destroy_mr_list(struct smbdirect_socket *sc) in destroy_mr_list() argument
2419 disable_work_sync(&sc->mr_io.recovery_work); in destroy_mr_list()
2421 spin_lock_irqsave(&sc->mr_io.all.lock, flags); in destroy_mr_list()
2422 list_splice_tail_init(&sc->mr_io.all.list, &all_list); in destroy_mr_list()
2423 spin_unlock_irqrestore(&sc->mr_io.all.lock, flags); in destroy_mr_list()
2461 static int allocate_mr_list(struct smbdirect_socket *sc) in allocate_mr_list() argument
2463 struct smbdirect_socket_parameters *sp = &sc->parameters; in allocate_mr_list()
2484 mr->mr = ib_alloc_mr(sc->ib.pd, in allocate_mr_list()
2485 sc->mr_io.type, in allocate_mr_list()
2490 sc->mr_io.type, sp->max_frmr_depth); in allocate_mr_list()
2503 mr->socket = sc; in allocate_mr_list()
2505 list_add_tail(&mr->list, &sc->mr_io.all.list); in allocate_mr_list()
2506 atomic_inc(&sc->mr_io.ready.count); in allocate_mr_list()
2509 INIT_WORK(&sc->mr_io.recovery_work, smbd_mr_recovery_work); in allocate_mr_list()
2519 destroy_mr_list(sc); in allocate_mr_list()
2531 static struct smbdirect_mr_io *get_mr(struct smbdirect_socket *sc) in get_mr() argument
2537 rc = wait_event_interruptible(sc->mr_io.ready.wait_queue, in get_mr()
2538 atomic_read(&sc->mr_io.ready.count) || in get_mr()
2539 sc->status != SMBDIRECT_SOCKET_CONNECTED); in get_mr()
2545 if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { in get_mr()
2546 log_rdma_mr(ERR, "sc->status=%x\n", sc->status); in get_mr()
2550 spin_lock_irqsave(&sc->mr_io.all.lock, flags); in get_mr()
2551 list_for_each_entry(ret, &sc->mr_io.all.list, list) { in get_mr()
2555 spin_unlock_irqrestore(&sc->mr_io.all.lock, flags); in get_mr()
2556 atomic_dec(&sc->mr_io.ready.count); in get_mr()
2557 atomic_inc(&sc->mr_io.used.count); in get_mr()
2562 spin_unlock_irqrestore(&sc->mr_io.all.lock, flags); in get_mr()
2599 struct smbdirect_socket *sc = &info->socket; in smbd_register_mr() local
2600 struct smbdirect_socket_parameters *sp = &sc->parameters; in smbd_register_mr()
2613 mr = get_mr(sc); in smbd_register_mr()
2630 rc = ib_dma_map_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir); in smbd_register_mr()
2663 rc = ib_post_send(sc->ib.qp, &reg_wr->wr, NULL); in smbd_register_mr()
2680 ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir); in smbd_register_mr()
2685 if (atomic_dec_and_test(&sc->mr_io.used.count)) in smbd_register_mr()
2686 wake_up(&sc->mr_io.cleanup.wait_queue); in smbd_register_mr()
2688 smbd_disconnect_rdma_connection(sc); in smbd_register_mr()
2735 struct smbdirect_socket *sc = mr->socket; in smbd_deregister_mr() local
2741 if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { in smbd_deregister_mr()
2759 rc = ib_post_send(sc->ib.qp, wr, NULL); in smbd_deregister_mr()
2763 smbd_disconnect_rdma_connection(sc); in smbd_deregister_mr()
2776 ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl, mr->sgt.nents, mr->dir); in smbd_deregister_mr()
2782 if (atomic_inc_return(&sc->mr_io.ready.count) == 1) in smbd_deregister_mr()
2783 wake_up(&sc->mr_io.ready.wait_queue); in smbd_deregister_mr()
2789 queue_work(sc->workqueue, &sc->mr_io.recovery_work); in smbd_deregister_mr()
2792 if (atomic_dec_and_test(&sc->mr_io.used.count)) in smbd_deregister_mr()
2793 wake_up(&sc->mr_io.cleanup.wait_queue); in smbd_deregister_mr()