Lines Matching +full:no +full:- +full:memory +full:- +full:wc

1 // SPDX-License-Identifier: GPL-2.0-or-later
66 /* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
78 /* No need to retry on Receiver Not Ready since SMBD manages credits */
83 * as defined in [MS-SMBD] 3.1.1.1
95 /* The maximum fragmented upper-layer payload receive size supported */
98 /* The maximum single-message size which can be received */
169 if (info->transport_status == SMBD_CONNECTED) { in smbd_disconnect_rdma_work()
170 info->transport_status = SMBD_DISCONNECTING; in smbd_disconnect_rdma_work()
171 rdma_disconnect(info->id); in smbd_disconnect_rdma_work()
177 queue_work(info->workqueue, &info->disconnect_work); in smbd_disconnect_rdma_connection()
184 struct smbd_connection *info = id->context; in smbd_conn_upcall()
187 event->event, event->status); in smbd_conn_upcall()
189 switch (event->event) { in smbd_conn_upcall()
192 info->ri_rc = 0; in smbd_conn_upcall()
193 complete(&info->ri_done); in smbd_conn_upcall()
197 info->ri_rc = -EHOSTUNREACH; in smbd_conn_upcall()
198 complete(&info->ri_done); in smbd_conn_upcall()
202 info->ri_rc = -ENETUNREACH; in smbd_conn_upcall()
203 complete(&info->ri_done); in smbd_conn_upcall()
207 log_rdma_event(INFO, "connected event=%d\n", event->event); in smbd_conn_upcall()
208 info->transport_status = SMBD_CONNECTED; in smbd_conn_upcall()
209 wake_up_interruptible(&info->conn_wait); in smbd_conn_upcall()
215 log_rdma_event(INFO, "connecting failed event=%d\n", event->event); in smbd_conn_upcall()
216 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
217 wake_up_interruptible(&info->conn_wait); in smbd_conn_upcall()
223 if (info->transport_status == SMBD_NEGOTIATE_FAILED) { in smbd_conn_upcall()
224 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
225 wake_up(&info->conn_wait); in smbd_conn_upcall()
229 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
230 wake_up_interruptible(&info->disconn_wait); in smbd_conn_upcall()
231 wake_up_interruptible(&info->wait_reassembly_queue); in smbd_conn_upcall()
232 wake_up_interruptible_all(&info->wait_send_queue); in smbd_conn_upcall()
249 ib_event_msg(event->event), event->device->name, info); in smbd_qp_async_error_upcall()
251 switch (event->event) { in smbd_qp_async_error_upcall()
264 return (void *)request->packet; in smbd_request_payload()
269 return (void *)response->packet; in smbd_response_payload()
273 static void send_done(struct ib_cq *cq, struct ib_wc *wc) in send_done() argument
277 container_of(wc->wr_cqe, struct smbd_request, cqe); in send_done()
279 log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n", in send_done()
280 request, wc->status); in send_done()
282 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { in send_done()
283 log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n", in send_done()
284 wc->status, wc->opcode); in send_done()
285 smbd_disconnect_rdma_connection(request->info); in send_done()
288 for (i = 0; i < request->num_sge; i++) in send_done()
289 ib_dma_unmap_single(request->info->id->device, in send_done()
290 request->sge[i].addr, in send_done()
291 request->sge[i].length, in send_done()
294 if (atomic_dec_and_test(&request->info->send_pending)) in send_done()
295 wake_up(&request->info->wait_send_pending); in send_done()
297 wake_up(&request->info->wait_post_send); in send_done()
299 mempool_free(request, request->info->request_mempool); in send_done()
305 resp->min_version, resp->max_version, in dump_smbd_negotiate_resp()
306 resp->negotiated_version, resp->credits_requested, in dump_smbd_negotiate_resp()
307 resp->credits_granted, resp->status, in dump_smbd_negotiate_resp()
308 resp->max_readwrite_size, resp->preferred_send_size, in dump_smbd_negotiate_resp()
309 resp->max_receive_size, resp->max_fragmented_size); in dump_smbd_negotiate_resp()
313 * Process a negotiation response message, according to [MS-SMBD]3.1.5.7
320 struct smbd_connection *info = response->info; in process_negotiation_response()
329 if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) { in process_negotiation_response()
331 le16_to_cpu(packet->negotiated_version)); in process_negotiation_response()
334 info->protocol = le16_to_cpu(packet->negotiated_version); in process_negotiation_response()
336 if (packet->credits_requested == 0) { in process_negotiation_response()
340 info->receive_credit_target = le16_to_cpu(packet->credits_requested); in process_negotiation_response()
342 if (packet->credits_granted == 0) { in process_negotiation_response()
346 atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted)); in process_negotiation_response()
348 atomic_set(&info->receive_credits, 0); in process_negotiation_response()
350 if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) { in process_negotiation_response()
352 le32_to_cpu(packet->preferred_send_size)); in process_negotiation_response()
355 info->max_receive_size = le32_to_cpu(packet->preferred_send_size); in process_negotiation_response()
357 if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) { in process_negotiation_response()
359 le32_to_cpu(packet->max_receive_size)); in process_negotiation_response()
362 info->max_send_size = min_t(int, info->max_send_size, in process_negotiation_response()
363 le32_to_cpu(packet->max_receive_size)); in process_negotiation_response()
365 if (le32_to_cpu(packet->max_fragmented_size) < in process_negotiation_response()
368 le32_to_cpu(packet->max_fragmented_size)); in process_negotiation_response()
371 info->max_fragmented_send_size = in process_negotiation_response()
372 le32_to_cpu(packet->max_fragmented_size); in process_negotiation_response()
373 info->rdma_readwrite_threshold = in process_negotiation_response()
374 rdma_readwrite_threshold > info->max_fragmented_send_size ? in process_negotiation_response()
375 info->max_fragmented_send_size : in process_negotiation_response()
379 info->max_readwrite_size = min_t(u32, in process_negotiation_response()
380 le32_to_cpu(packet->max_readwrite_size), in process_negotiation_response()
381 info->max_frmr_depth * PAGE_SIZE); in process_negotiation_response()
382 info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE; in process_negotiation_response()
397 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_credits()
398 wake_up(&info->wait_receive_queues); in smbd_post_send_credits()
402 if (info->receive_credit_target > in smbd_post_send_credits()
403 atomic_read(&info->receive_credits)) { in smbd_post_send_credits()
418 response->type = SMBD_TRANSFER_DATA; in smbd_post_send_credits()
419 response->first_segment = false; in smbd_post_send_credits()
432 spin_lock(&info->lock_new_credits_offered); in smbd_post_send_credits()
433 info->new_credits_offered += ret; in smbd_post_send_credits()
434 spin_unlock(&info->lock_new_credits_offered); in smbd_post_send_credits()
436 /* Promptly send an immediate packet as defined in [MS-SMBD] 3.1.1.1 */ in smbd_post_send_credits()
437 info->send_immediate = true; in smbd_post_send_credits()
438 if (atomic_read(&info->receive_credits) < in smbd_post_send_credits()
439 info->receive_credit_target - 1) { in smbd_post_send_credits()
440 if (info->keep_alive_requested == KEEP_ALIVE_PENDING || in smbd_post_send_credits()
441 info->send_immediate) { in smbd_post_send_credits()
449 static void recv_done(struct ib_cq *cq, struct ib_wc *wc) in recv_done() argument
453 container_of(wc->wr_cqe, struct smbd_response, cqe); in recv_done()
454 struct smbd_connection *info = response->info; in recv_done()
457 log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n", in recv_done()
458 response, response->type, wc->status, wc->opcode, in recv_done()
459 wc->byte_len, wc->pkey_index); in recv_done()
461 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) { in recv_done()
462 log_rdma_recv(INFO, "wc->status=%d opcode=%d\n", in recv_done()
463 wc->status, wc->opcode); in recv_done()
469 wc->qp->device, in recv_done()
470 response->sge.addr, in recv_done()
471 response->sge.length, in recv_done()
474 switch (response->type) { in recv_done()
478 info->full_packet_received = true; in recv_done()
479 info->negotiate_done = in recv_done()
480 process_negotiation_response(response, wc->byte_len); in recv_done()
481 complete(&info->negotiate_completion); in recv_done()
487 data_length = le32_to_cpu(data_transfer->data_length); in recv_done()
494 if (info->full_packet_received) in recv_done()
495 response->first_segment = true; in recv_done()
497 if (le32_to_cpu(data_transfer->remaining_data_length)) in recv_done()
498 info->full_packet_received = false; in recv_done()
500 info->full_packet_received = true; in recv_done()
510 wake_up_interruptible(&info->wait_reassembly_queue); in recv_done()
512 atomic_dec(&info->receive_credits); in recv_done()
513 info->receive_credit_target = in recv_done()
514 le16_to_cpu(data_transfer->credits_requested); in recv_done()
515 if (le16_to_cpu(data_transfer->credits_granted)) { in recv_done()
516 atomic_add(le16_to_cpu(data_transfer->credits_granted), in recv_done()
517 &info->send_credits); in recv_done()
522 wake_up_interruptible(&info->wait_send_queue); in recv_done()
526 le16_to_cpu(data_transfer->flags), in recv_done()
527 le32_to_cpu(data_transfer->data_offset), in recv_done()
528 le32_to_cpu(data_transfer->data_length), in recv_done()
529 le32_to_cpu(data_transfer->remaining_data_length)); in recv_done()
532 info->keep_alive_requested = KEEP_ALIVE_NONE; in recv_done()
533 if (le16_to_cpu(data_transfer->flags) & in recv_done()
535 info->keep_alive_requested = KEEP_ALIVE_PENDING; in recv_done()
542 "unexpected response type=%d\n", response->type); in recv_done()
565 if (dstaddr->sa_family == AF_INET6) in smbd_create_id()
566 sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port; in smbd_create_id()
568 sport = &((struct sockaddr_in *)dstaddr)->sin_port; in smbd_create_id()
572 init_completion(&info->ri_done); in smbd_create_id()
573 info->ri_rc = -ETIMEDOUT; in smbd_create_id()
582 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); in smbd_create_id()
583 /* e.g. if interrupted returns -ERESTARTSYS */ in smbd_create_id()
588 rc = info->ri_rc; in smbd_create_id()
594 info->ri_rc = -ETIMEDOUT; in smbd_create_id()
601 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); in smbd_create_id()
602 /* e.g. if interrupted returns -ERESTARTSYS */ in smbd_create_id()
607 rc = info->ri_rc; in smbd_create_id()
627 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) in frwr_is_supported()
629 if (attrs->max_fast_reg_page_list_len == 0) in frwr_is_supported()
640 info->id = smbd_create_id(info, dstaddr, port); in smbd_ia_open()
641 if (IS_ERR(info->id)) { in smbd_ia_open()
642 rc = PTR_ERR(info->id); in smbd_ia_open()
646 if (!frwr_is_supported(&info->id->device->attrs)) { in smbd_ia_open()
649 info->id->device->attrs.device_cap_flags, in smbd_ia_open()
650 info->id->device->attrs.max_fast_reg_page_list_len); in smbd_ia_open()
651 rc = -EPROTONOSUPPORT; in smbd_ia_open()
654 info->max_frmr_depth = min_t(int, in smbd_ia_open()
656 info->id->device->attrs.max_fast_reg_page_list_len); in smbd_ia_open()
657 info->mr_type = IB_MR_TYPE_MEM_REG; in smbd_ia_open()
658 if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) in smbd_ia_open()
659 info->mr_type = IB_MR_TYPE_SG_GAPS; in smbd_ia_open()
661 info->pd = ib_alloc_pd(info->id->device, 0); in smbd_ia_open()
662 if (IS_ERR(info->pd)) { in smbd_ia_open()
663 rc = PTR_ERR(info->pd); in smbd_ia_open()
671 rdma_destroy_id(info->id); in smbd_ia_open()
672 info->id = NULL; in smbd_ia_open()
680 * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3
687 int rc = -ENOMEM; in smbd_post_send_negotiate_req()
691 request = mempool_alloc(info->request_mempool, GFP_KERNEL); in smbd_post_send_negotiate_req()
695 request->info = info; in smbd_post_send_negotiate_req()
698 packet->min_version = cpu_to_le16(SMBD_V1); in smbd_post_send_negotiate_req()
699 packet->max_version = cpu_to_le16(SMBD_V1); in smbd_post_send_negotiate_req()
700 packet->reserved = 0; in smbd_post_send_negotiate_req()
701 packet->credits_requested = cpu_to_le16(info->send_credit_target); in smbd_post_send_negotiate_req()
702 packet->preferred_send_size = cpu_to_le32(info->max_send_size); in smbd_post_send_negotiate_req()
703 packet->max_receive_size = cpu_to_le32(info->max_receive_size); in smbd_post_send_negotiate_req()
704 packet->max_fragmented_size = in smbd_post_send_negotiate_req()
705 cpu_to_le32(info->max_fragmented_recv_size); in smbd_post_send_negotiate_req()
707 request->num_sge = 1; in smbd_post_send_negotiate_req()
708 request->sge[0].addr = ib_dma_map_single( in smbd_post_send_negotiate_req()
709 info->id->device, (void *)packet, in smbd_post_send_negotiate_req()
711 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { in smbd_post_send_negotiate_req()
712 rc = -EIO; in smbd_post_send_negotiate_req()
716 request->sge[0].length = sizeof(*packet); in smbd_post_send_negotiate_req()
717 request->sge[0].lkey = info->pd->local_dma_lkey; in smbd_post_send_negotiate_req()
720 info->id->device, request->sge[0].addr, in smbd_post_send_negotiate_req()
721 request->sge[0].length, DMA_TO_DEVICE); in smbd_post_send_negotiate_req()
723 request->cqe.done = send_done; in smbd_post_send_negotiate_req()
726 send_wr.wr_cqe = &request->cqe; in smbd_post_send_negotiate_req()
727 send_wr.sg_list = request->sge; in smbd_post_send_negotiate_req()
728 send_wr.num_sge = request->num_sge; in smbd_post_send_negotiate_req()
733 request->sge[0].addr, in smbd_post_send_negotiate_req()
734 request->sge[0].length, request->sge[0].lkey); in smbd_post_send_negotiate_req()
736 atomic_inc(&info->send_pending); in smbd_post_send_negotiate_req()
737 rc = ib_post_send(info->id->qp, &send_wr, NULL); in smbd_post_send_negotiate_req()
743 atomic_dec(&info->send_pending); in smbd_post_send_negotiate_req()
744 ib_dma_unmap_single(info->id->device, request->sge[0].addr, in smbd_post_send_negotiate_req()
745 request->sge[0].length, DMA_TO_DEVICE); in smbd_post_send_negotiate_req()
750 mempool_free(request, info->request_mempool); in smbd_post_send_negotiate_req()
756 * This implements [MS-SMBD] 3.1.5.9
766 spin_lock(&info->lock_new_credits_offered); in manage_credits_prior_sending()
767 new_credits = info->new_credits_offered; in manage_credits_prior_sending()
768 info->new_credits_offered = 0; in manage_credits_prior_sending()
769 spin_unlock(&info->lock_new_credits_offered); in manage_credits_prior_sending()
785 if (info->keep_alive_requested == KEEP_ALIVE_PENDING) { in manage_keep_alive_before_sending()
786 info->keep_alive_requested = KEEP_ALIVE_SENT; in manage_keep_alive_before_sending()
799 for (i = 0; i < request->num_sge; i++) { in smbd_post_send()
802 i, request->sge[i].addr, request->sge[i].length); in smbd_post_send()
804 info->id->device, in smbd_post_send()
805 request->sge[i].addr, in smbd_post_send()
806 request->sge[i].length, in smbd_post_send()
810 request->cqe.done = send_done; in smbd_post_send()
813 send_wr.wr_cqe = &request->cqe; in smbd_post_send()
814 send_wr.sg_list = request->sge; in smbd_post_send()
815 send_wr.num_sge = request->num_sge; in smbd_post_send()
819 rc = ib_post_send(info->id->qp, &send_wr, NULL); in smbd_post_send()
823 rc = -EAGAIN; in smbd_post_send()
826 mod_delayed_work(info->workqueue, &info->idle_timer_work, in smbd_post_send()
827 info->keep_alive_interval*HZ); in smbd_post_send()
845 rc = wait_event_interruptible(info->wait_send_queue, in smbd_post_send_iter()
846 atomic_read(&info->send_credits) > 0 || in smbd_post_send_iter()
847 info->transport_status != SMBD_CONNECTED); in smbd_post_send_iter()
851 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_iter()
853 rc = -EAGAIN; in smbd_post_send_iter()
856 if (unlikely(atomic_dec_return(&info->send_credits) < 0)) { in smbd_post_send_iter()
857 atomic_inc(&info->send_credits); in smbd_post_send_iter()
862 wait_event(info->wait_post_send, in smbd_post_send_iter()
863 atomic_read(&info->send_pending) < info->send_credit_target || in smbd_post_send_iter()
864 info->transport_status != SMBD_CONNECTED); in smbd_post_send_iter()
866 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_iter()
868 rc = -EAGAIN; in smbd_post_send_iter()
872 if (unlikely(atomic_inc_return(&info->send_pending) > in smbd_post_send_iter()
873 info->send_credit_target)) { in smbd_post_send_iter()
874 atomic_dec(&info->send_pending); in smbd_post_send_iter()
878 request = mempool_alloc(info->request_mempool, GFP_KERNEL); in smbd_post_send_iter()
880 rc = -ENOMEM; in smbd_post_send_iter()
884 request->info = info; in smbd_post_send_iter()
885 memset(request->sge, 0, sizeof(request->sge)); in smbd_post_send_iter()
892 .sge = request->sge, in smbd_post_send_iter()
893 .device = info->id->device, in smbd_post_send_iter()
894 .local_dma_lkey = info->pd->local_dma_lkey, in smbd_post_send_iter()
903 request->num_sge = extract.nr_sge; in smbd_post_send_iter()
904 *_remaining_data_length -= data_length; in smbd_post_send_iter()
907 request->num_sge = 1; in smbd_post_send_iter()
912 packet->credits_requested = cpu_to_le16(info->send_credit_target); in smbd_post_send_iter()
915 atomic_add(new_credits, &info->receive_credits); in smbd_post_send_iter()
916 packet->credits_granted = cpu_to_le16(new_credits); in smbd_post_send_iter()
918 info->send_immediate = false; in smbd_post_send_iter()
920 packet->flags = 0; in smbd_post_send_iter()
922 packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED); in smbd_post_send_iter()
924 packet->reserved = 0; in smbd_post_send_iter()
926 packet->data_offset = 0; in smbd_post_send_iter()
928 packet->data_offset = cpu_to_le32(24); in smbd_post_send_iter()
929 packet->data_length = cpu_to_le32(data_length); in smbd_post_send_iter()
930 packet->remaining_data_length = cpu_to_le32(*_remaining_data_length); in smbd_post_send_iter()
931 packet->padding = 0; in smbd_post_send_iter()
934 le16_to_cpu(packet->credits_requested), in smbd_post_send_iter()
935 le16_to_cpu(packet->credits_granted), in smbd_post_send_iter()
936 le32_to_cpu(packet->data_offset), in smbd_post_send_iter()
937 le32_to_cpu(packet->data_length), in smbd_post_send_iter()
938 le32_to_cpu(packet->remaining_data_length)); in smbd_post_send_iter()
946 request->sge[0].addr = ib_dma_map_single(info->id->device, in smbd_post_send_iter()
950 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { in smbd_post_send_iter()
951 rc = -EIO; in smbd_post_send_iter()
952 request->sge[0].addr = 0; in smbd_post_send_iter()
956 request->sge[0].length = header_length; in smbd_post_send_iter()
957 request->sge[0].lkey = info->pd->local_dma_lkey; in smbd_post_send_iter()
964 for (i = 0; i < request->num_sge; i++) in smbd_post_send_iter()
965 if (request->sge[i].addr) in smbd_post_send_iter()
966 ib_dma_unmap_single(info->id->device, in smbd_post_send_iter()
967 request->sge[i].addr, in smbd_post_send_iter()
968 request->sge[i].length, in smbd_post_send_iter()
970 mempool_free(request, info->request_mempool); in smbd_post_send_iter()
973 spin_lock(&info->lock_new_credits_offered); in smbd_post_send_iter()
974 info->new_credits_offered += new_credits; in smbd_post_send_iter()
975 spin_unlock(&info->lock_new_credits_offered); in smbd_post_send_iter()
976 atomic_sub(new_credits, &info->receive_credits); in smbd_post_send_iter()
979 if (atomic_dec_and_test(&info->send_pending)) in smbd_post_send_iter()
980 wake_up(&info->wait_send_pending); in smbd_post_send_iter()
984 atomic_inc(&info->send_credits); in smbd_post_send_iter()
993 * while there is no upper layer payload to send at the time
999 info->count_send_empty++; in smbd_post_send_empty()
1012 int rc = -EIO; in smbd_post_recv()
1014 response->sge.addr = ib_dma_map_single( in smbd_post_recv()
1015 info->id->device, response->packet, in smbd_post_recv()
1016 info->max_receive_size, DMA_FROM_DEVICE); in smbd_post_recv()
1017 if (ib_dma_mapping_error(info->id->device, response->sge.addr)) in smbd_post_recv()
1020 response->sge.length = info->max_receive_size; in smbd_post_recv()
1021 response->sge.lkey = info->pd->local_dma_lkey; in smbd_post_recv()
1023 response->cqe.done = recv_done; in smbd_post_recv()
1025 recv_wr.wr_cqe = &response->cqe; in smbd_post_recv()
1027 recv_wr.sg_list = &response->sge; in smbd_post_recv()
1030 rc = ib_post_recv(info->id->qp, &recv_wr, NULL); in smbd_post_recv()
1032 ib_dma_unmap_single(info->id->device, response->sge.addr, in smbd_post_recv()
1033 response->sge.length, DMA_FROM_DEVICE); in smbd_post_recv()
1041 /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
1047 response->type = SMBD_NEGOTIATE_RESP; in smbd_negotiate()
1050 rc, response->sge.addr, in smbd_negotiate()
1051 response->sge.length, response->sge.lkey); in smbd_negotiate()
1055 init_completion(&info->negotiate_completion); in smbd_negotiate()
1056 info->negotiate_done = false; in smbd_negotiate()
1062 &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ); in smbd_negotiate()
1065 if (info->negotiate_done) in smbd_negotiate()
1069 rc = -ETIMEDOUT; in smbd_negotiate()
1070 else if (rc == -ERESTARTSYS) in smbd_negotiate()
1071 rc = -EINTR; in smbd_negotiate()
1073 rc = -ENOTCONN; in smbd_negotiate()
1081 spin_lock(&info->empty_packet_queue_lock); in put_empty_packet()
1082 list_add_tail(&response->list, &info->empty_packet_queue); in put_empty_packet()
1083 info->count_empty_packet_queue++; in put_empty_packet()
1084 spin_unlock(&info->empty_packet_queue_lock); in put_empty_packet()
1086 queue_work(info->workqueue, &info->post_send_credits_work); in put_empty_packet()
1090 * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
1104 spin_lock(&info->reassembly_queue_lock); in enqueue_reassembly()
1105 list_add_tail(&response->list, &info->reassembly_queue); in enqueue_reassembly()
1106 info->reassembly_queue_length++; in enqueue_reassembly()
1114 info->reassembly_data_length += data_length; in enqueue_reassembly()
1115 spin_unlock(&info->reassembly_queue_lock); in enqueue_reassembly()
1116 info->count_reassembly_queue++; in enqueue_reassembly()
1117 info->count_enqueue_reassembly_queue++; in enqueue_reassembly()
1129 if (!list_empty(&info->reassembly_queue)) { in _get_first_reassembly()
1131 &info->reassembly_queue, in _get_first_reassembly()
1143 spin_lock_irqsave(&info->empty_packet_queue_lock, flags); in get_empty_queue_buffer()
1144 if (!list_empty(&info->empty_packet_queue)) { in get_empty_queue_buffer()
1146 &info->empty_packet_queue, in get_empty_queue_buffer()
1148 list_del(&ret->list); in get_empty_queue_buffer()
1149 info->count_empty_packet_queue--; in get_empty_queue_buffer()
1151 spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags); in get_empty_queue_buffer()
1159 * pre-allocated in advance.
1167 spin_lock_irqsave(&info->receive_queue_lock, flags); in get_receive_buffer()
1168 if (!list_empty(&info->receive_queue)) { in get_receive_buffer()
1170 &info->receive_queue, in get_receive_buffer()
1172 list_del(&ret->list); in get_receive_buffer()
1173 info->count_receive_queue--; in get_receive_buffer()
1174 info->count_get_receive_buffer++; in get_receive_buffer()
1176 spin_unlock_irqrestore(&info->receive_queue_lock, flags); in get_receive_buffer()
1192 ib_dma_unmap_single(info->id->device, response->sge.addr, in put_receive_buffer()
1193 response->sge.length, DMA_FROM_DEVICE); in put_receive_buffer()
1195 spin_lock_irqsave(&info->receive_queue_lock, flags); in put_receive_buffer()
1196 list_add_tail(&response->list, &info->receive_queue); in put_receive_buffer()
1197 info->count_receive_queue++; in put_receive_buffer()
1198 info->count_put_receive_buffer++; in put_receive_buffer()
1199 spin_unlock_irqrestore(&info->receive_queue_lock, flags); in put_receive_buffer()
1201 queue_work(info->workqueue, &info->post_send_credits_work); in put_receive_buffer()
1210 INIT_LIST_HEAD(&info->reassembly_queue); in allocate_receive_buffers()
1211 spin_lock_init(&info->reassembly_queue_lock); in allocate_receive_buffers()
1212 info->reassembly_data_length = 0; in allocate_receive_buffers()
1213 info->reassembly_queue_length = 0; in allocate_receive_buffers()
1215 INIT_LIST_HEAD(&info->receive_queue); in allocate_receive_buffers()
1216 spin_lock_init(&info->receive_queue_lock); in allocate_receive_buffers()
1217 info->count_receive_queue = 0; in allocate_receive_buffers()
1219 INIT_LIST_HEAD(&info->empty_packet_queue); in allocate_receive_buffers()
1220 spin_lock_init(&info->empty_packet_queue_lock); in allocate_receive_buffers()
1221 info->count_empty_packet_queue = 0; in allocate_receive_buffers()
1223 init_waitqueue_head(&info->wait_receive_queues); in allocate_receive_buffers()
1226 response = mempool_alloc(info->response_mempool, GFP_KERNEL); in allocate_receive_buffers()
1230 response->info = info; in allocate_receive_buffers()
1231 list_add_tail(&response->list, &info->receive_queue); in allocate_receive_buffers()
1232 info->count_receive_queue++; in allocate_receive_buffers()
1238 while (!list_empty(&info->receive_queue)) { in allocate_receive_buffers()
1240 &info->receive_queue, in allocate_receive_buffers()
1242 list_del(&response->list); in allocate_receive_buffers()
1243 info->count_receive_queue--; in allocate_receive_buffers()
1245 mempool_free(response, info->response_mempool); in allocate_receive_buffers()
1247 return -ENOMEM; in allocate_receive_buffers()
1255 mempool_free(response, info->response_mempool); in destroy_receive_buffers()
1258 mempool_free(response, info->response_mempool); in destroy_receive_buffers()
1261 /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
1268 if (info->keep_alive_requested != KEEP_ALIVE_NONE) { in idle_connection_timer()
1270 "error status info->keep_alive_requested=%d\n", in idle_connection_timer()
1271 info->keep_alive_requested); in idle_connection_timer()
1280 queue_delayed_work(info->workqueue, &info->idle_timer_work, in idle_connection_timer()
1281 info->keep_alive_interval*HZ); in idle_connection_timer()
1285 * Destroy the transport and related RDMA and memory resources
1291 struct smbd_connection *info = server->smbd_conn; in smbd_destroy()
1301 if (info->transport_status != SMBD_DISCONNECTED) { in smbd_destroy()
1302 rdma_disconnect(server->smbd_conn->id); in smbd_destroy()
1305 info->disconn_wait, in smbd_destroy()
1306 info->transport_status == SMBD_DISCONNECTED); in smbd_destroy()
1310 ib_drain_qp(info->id->qp); in smbd_destroy()
1311 rdma_destroy_qp(info->id); in smbd_destroy()
1314 cancel_delayed_work_sync(&info->idle_timer_work); in smbd_destroy()
1317 wait_event(info->wait_send_pending, in smbd_destroy()
1318 atomic_read(&info->send_pending) == 0); in smbd_destroy()
1323 spin_lock_irqsave(&info->reassembly_queue_lock, flags); in smbd_destroy()
1326 list_del(&response->list); in smbd_destroy()
1328 &info->reassembly_queue_lock, flags); in smbd_destroy()
1332 &info->reassembly_queue_lock, flags); in smbd_destroy()
1334 info->reassembly_data_length = 0; in smbd_destroy()
1337 wait_event(info->wait_receive_queues, in smbd_destroy()
1338 info->count_receive_queue + info->count_empty_packet_queue in smbd_destroy()
1339 == info->receive_credit_max); in smbd_destroy()
1343 * For performance reasons, memory registration and deregistration in smbd_destroy()
1345 * blocked on transport srv_mutex while holding memory registration. in smbd_destroy()
1347 * path when sending data, and then release memory registrations. in smbd_destroy()
1350 wake_up_interruptible_all(&info->wait_mr); in smbd_destroy()
1351 while (atomic_read(&info->mr_used_count)) { in smbd_destroy()
1358 ib_free_cq(info->send_cq); in smbd_destroy()
1359 ib_free_cq(info->recv_cq); in smbd_destroy()
1360 ib_dealloc_pd(info->pd); in smbd_destroy()
1361 rdma_destroy_id(info->id); in smbd_destroy()
1364 mempool_destroy(info->request_mempool); in smbd_destroy()
1365 kmem_cache_destroy(info->request_cache); in smbd_destroy()
1367 mempool_destroy(info->response_mempool); in smbd_destroy()
1368 kmem_cache_destroy(info->response_cache); in smbd_destroy()
1370 info->transport_status = SMBD_DESTROYED; in smbd_destroy()
1372 destroy_workqueue(info->workqueue); in smbd_destroy()
1375 server->smbd_conn = NULL; in smbd_destroy()
1386 if (!server->smbd_conn) { in smbd_reconnect()
1395 if (server->smbd_conn->transport_status == SMBD_CONNECTED) { in smbd_reconnect()
1402 server->smbd_conn = smbd_get_connection( in smbd_reconnect()
1403 server, (struct sockaddr *) &server->dstaddr); in smbd_reconnect()
1405 if (server->smbd_conn) { in smbd_reconnect()
1406 cifs_dbg(VFS, "RDMA transport re-established\n"); in smbd_reconnect()
1407 trace_smb3_smbd_connect_done(server->hostname, server->conn_id, &server->dstaddr); in smbd_reconnect()
1410 trace_smb3_smbd_connect_err(server->hostname, server->conn_id, &server->dstaddr); in smbd_reconnect()
1411 return -ENOENT; in smbd_reconnect()
1417 destroy_workqueue(info->workqueue); in destroy_caches_and_workqueue()
1418 mempool_destroy(info->response_mempool); in destroy_caches_and_workqueue()
1419 kmem_cache_destroy(info->response_cache); in destroy_caches_and_workqueue()
1420 mempool_destroy(info->request_mempool); in destroy_caches_and_workqueue()
1421 kmem_cache_destroy(info->request_cache); in destroy_caches_and_workqueue()
1431 info->request_cache = in allocate_caches_and_workqueue()
1437 if (!info->request_cache) in allocate_caches_and_workqueue()
1438 return -ENOMEM; in allocate_caches_and_workqueue()
1440 info->request_mempool = in allocate_caches_and_workqueue()
1441 mempool_create(info->send_credit_target, mempool_alloc_slab, in allocate_caches_and_workqueue()
1442 mempool_free_slab, info->request_cache); in allocate_caches_and_workqueue()
1443 if (!info->request_mempool) in allocate_caches_and_workqueue()
1447 info->response_cache = in allocate_caches_and_workqueue()
1451 info->max_receive_size, in allocate_caches_and_workqueue()
1453 if (!info->response_cache) in allocate_caches_and_workqueue()
1456 info->response_mempool = in allocate_caches_and_workqueue()
1457 mempool_create(info->receive_credit_max, mempool_alloc_slab, in allocate_caches_and_workqueue()
1458 mempool_free_slab, info->response_cache); in allocate_caches_and_workqueue()
1459 if (!info->response_mempool) in allocate_caches_and_workqueue()
1463 info->workqueue = create_workqueue(name); in allocate_caches_and_workqueue()
1464 if (!info->workqueue) in allocate_caches_and_workqueue()
1467 rc = allocate_receive_buffers(info, info->receive_credit_max); in allocate_caches_and_workqueue()
1476 destroy_workqueue(info->workqueue); in allocate_caches_and_workqueue()
1478 mempool_destroy(info->response_mempool); in allocate_caches_and_workqueue()
1480 kmem_cache_destroy(info->response_cache); in allocate_caches_and_workqueue()
1482 mempool_destroy(info->request_mempool); in allocate_caches_and_workqueue()
1484 kmem_cache_destroy(info->request_cache); in allocate_caches_and_workqueue()
1485 return -ENOMEM; in allocate_caches_and_workqueue()
1504 info->transport_status = SMBD_CONNECTING; in _smbd_get_connection()
1511 if (smbd_send_credit_target > info->id->device->attrs.max_cqe || in _smbd_get_connection()
1512 smbd_send_credit_target > info->id->device->attrs.max_qp_wr) { in _smbd_get_connection()
1515 info->id->device->attrs.max_cqe, in _smbd_get_connection()
1516 info->id->device->attrs.max_qp_wr); in _smbd_get_connection()
1520 if (smbd_receive_credit_max > info->id->device->attrs.max_cqe || in _smbd_get_connection()
1521 smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) { in _smbd_get_connection()
1524 info->id->device->attrs.max_cqe, in _smbd_get_connection()
1525 info->id->device->attrs.max_qp_wr); in _smbd_get_connection()
1529 info->receive_credit_max = smbd_receive_credit_max; in _smbd_get_connection()
1530 info->send_credit_target = smbd_send_credit_target; in _smbd_get_connection()
1531 info->max_send_size = smbd_max_send_size; in _smbd_get_connection()
1532 info->max_fragmented_recv_size = smbd_max_fragmented_recv_size; in _smbd_get_connection()
1533 info->max_receive_size = smbd_max_receive_size; in _smbd_get_connection()
1534 info->keep_alive_interval = smbd_keep_alive_interval; in _smbd_get_connection()
1536 if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE || in _smbd_get_connection()
1537 info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) { in _smbd_get_connection()
1541 info->id->device->name, in _smbd_get_connection()
1542 info->id->device->attrs.max_send_sge, in _smbd_get_connection()
1543 info->id->device->attrs.max_recv_sge); in _smbd_get_connection()
1547 info->send_cq = NULL; in _smbd_get_connection()
1548 info->recv_cq = NULL; in _smbd_get_connection()
1549 info->send_cq = in _smbd_get_connection()
1550 ib_alloc_cq_any(info->id->device, info, in _smbd_get_connection()
1551 info->send_credit_target, IB_POLL_SOFTIRQ); in _smbd_get_connection()
1552 if (IS_ERR(info->send_cq)) { in _smbd_get_connection()
1553 info->send_cq = NULL; in _smbd_get_connection()
1557 info->recv_cq = in _smbd_get_connection()
1558 ib_alloc_cq_any(info->id->device, info, in _smbd_get_connection()
1559 info->receive_credit_max, IB_POLL_SOFTIRQ); in _smbd_get_connection()
1560 if (IS_ERR(info->recv_cq)) { in _smbd_get_connection()
1561 info->recv_cq = NULL; in _smbd_get_connection()
1568 qp_attr.cap.max_send_wr = info->send_credit_target; in _smbd_get_connection()
1569 qp_attr.cap.max_recv_wr = info->receive_credit_max; in _smbd_get_connection()
1575 qp_attr.send_cq = info->send_cq; in _smbd_get_connection()
1576 qp_attr.recv_cq = info->recv_cq; in _smbd_get_connection()
1579 rc = rdma_create_qp(info->id, info->pd, &qp_attr); in _smbd_get_connection()
1589 min(info->id->device->attrs.max_qp_rd_atom, in _smbd_get_connection()
1591 info->responder_resources = conn_param.responder_resources; in _smbd_get_connection()
1593 info->responder_resources); in _smbd_get_connection()
1596 info->id->device->ops.get_port_immutable( in _smbd_get_connection()
1597 info->id->device, info->id->port_num, &port_immutable); in _smbd_get_connection()
1599 ird_ord_hdr[0] = info->responder_resources; in _smbd_get_connection()
1613 &addr_in->sin_addr, port); in _smbd_get_connection()
1615 init_waitqueue_head(&info->conn_wait); in _smbd_get_connection()
1616 init_waitqueue_head(&info->disconn_wait); in _smbd_get_connection()
1617 init_waitqueue_head(&info->wait_reassembly_queue); in _smbd_get_connection()
1618 rc = rdma_connect(info->id, &conn_param); in _smbd_get_connection()
1625 info->conn_wait, info->transport_status != SMBD_CONNECTING); in _smbd_get_connection()
1627 if (info->transport_status != SMBD_CONNECTED) { in _smbd_get_connection()
1640 init_waitqueue_head(&info->wait_send_queue); in _smbd_get_connection()
1641 INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer); in _smbd_get_connection()
1642 queue_delayed_work(info->workqueue, &info->idle_timer_work, in _smbd_get_connection()
1643 info->keep_alive_interval*HZ); in _smbd_get_connection()
1645 init_waitqueue_head(&info->wait_send_pending); in _smbd_get_connection()
1646 atomic_set(&info->send_pending, 0); in _smbd_get_connection()
1648 init_waitqueue_head(&info->wait_post_send); in _smbd_get_connection()
1650 INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work); in _smbd_get_connection()
1651 INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits); in _smbd_get_connection()
1652 info->new_credits_offered = 0; in _smbd_get_connection()
1653 spin_lock_init(&info->lock_new_credits_offered); in _smbd_get_connection()
1663 log_rdma_mr(ERR, "memory registration allocation failed\n"); in _smbd_get_connection()
1671 server->smbd_conn = info; in _smbd_get_connection()
1676 cancel_delayed_work_sync(&info->idle_timer_work); in _smbd_get_connection()
1678 info->transport_status = SMBD_NEGOTIATE_FAILED; in _smbd_get_connection()
1679 init_waitqueue_head(&info->conn_wait); in _smbd_get_connection()
1680 rdma_disconnect(info->id); in _smbd_get_connection()
1681 wait_event(info->conn_wait, in _smbd_get_connection()
1682 info->transport_status == SMBD_DISCONNECTED); in _smbd_get_connection()
1686 rdma_destroy_qp(info->id); in _smbd_get_connection()
1690 if (info->send_cq) in _smbd_get_connection()
1691 ib_free_cq(info->send_cq); in _smbd_get_connection()
1692 if (info->recv_cq) in _smbd_get_connection()
1693 ib_free_cq(info->recv_cq); in _smbd_get_connection()
1696 ib_dealloc_pd(info->pd); in _smbd_get_connection()
1697 rdma_destroy_id(info->id); in _smbd_get_connection()
1745 * No need to hold the reassembly queue lock all the time as we are in smbd_recv_buf()
1749 log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size, in smbd_recv_buf()
1750 info->reassembly_data_length); in smbd_recv_buf()
1751 if (info->reassembly_data_length >= size) { in smbd_recv_buf()
1763 queue_length = info->reassembly_queue_length; in smbd_recv_buf()
1766 offset = info->first_entry_offset; in smbd_recv_buf()
1770 data_length = le32_to_cpu(data_transfer->data_length); in smbd_recv_buf()
1773 data_transfer->remaining_data_length); in smbd_recv_buf()
1774 data_offset = le32_to_cpu(data_transfer->data_offset); in smbd_recv_buf()
1784 if (response->first_segment && size == 4) { in smbd_recv_buf()
1789 response->first_segment = false; in smbd_recv_buf()
1795 to_copy = min_t(int, data_length - offset, to_read); in smbd_recv_buf()
1802 if (to_copy == data_length - offset) { in smbd_recv_buf()
1803 queue_length--; in smbd_recv_buf()
1805 * No need to lock if we are not at the in smbd_recv_buf()
1809 list_del(&response->list); in smbd_recv_buf()
1812 &info->reassembly_queue_lock); in smbd_recv_buf()
1813 list_del(&response->list); in smbd_recv_buf()
1815 &info->reassembly_queue_lock); in smbd_recv_buf()
1818 info->count_reassembly_queue--; in smbd_recv_buf()
1819 info->count_dequeue_reassembly_queue++; in smbd_recv_buf()
1826 to_read -= to_copy; in smbd_recv_buf()
1829 …log_read(INFO, "_get_first_reassembly memcpy %d bytes data_transfer_length-offset=%d after that to… in smbd_recv_buf()
1830 to_copy, data_length - offset, in smbd_recv_buf()
1834 spin_lock_irq(&info->reassembly_queue_lock); in smbd_recv_buf()
1835 info->reassembly_data_length -= data_read; in smbd_recv_buf()
1836 info->reassembly_queue_length -= queue_removed; in smbd_recv_buf()
1837 spin_unlock_irq(&info->reassembly_queue_lock); in smbd_recv_buf()
1839 info->first_entry_offset = offset; in smbd_recv_buf()
1841 data_read, info->reassembly_data_length, in smbd_recv_buf()
1842 info->first_entry_offset); in smbd_recv_buf()
1849 info->wait_reassembly_queue, in smbd_recv_buf()
1850 info->reassembly_data_length >= size || in smbd_recv_buf()
1851 info->transport_status != SMBD_CONNECTED); in smbd_recv_buf()
1856 if (info->transport_status != SMBD_CONNECTED) { in smbd_recv_buf()
1858 return -ECONNABORTED; in smbd_recv_buf()
1880 info->wait_reassembly_queue, in smbd_recv_page()
1881 info->reassembly_data_length >= to_read || in smbd_recv_page()
1882 info->transport_status != SMBD_CONNECTED); in smbd_recv_page()
1911 if (iov_iter_rw(&msg->msg_iter) == WRITE) { in smbd_recv()
1914 iov_iter_rw(&msg->msg_iter)); in smbd_recv()
1915 rc = -EINVAL; in smbd_recv()
1919 switch (iov_iter_type(&msg->msg_iter)) { in smbd_recv()
1921 buf = msg->msg_iter.kvec->iov_base; in smbd_recv()
1922 to_read = msg->msg_iter.kvec->iov_len; in smbd_recv()
1927 page = msg->msg_iter.bvec->bv_page; in smbd_recv()
1928 page_offset = msg->msg_iter.bvec->bv_offset; in smbd_recv()
1929 to_read = msg->msg_iter.bvec->bv_len; in smbd_recv()
1936 iov_iter_type(&msg->msg_iter)); in smbd_recv()
1937 rc = -EINVAL; in smbd_recv()
1943 msg->msg_iter.count = 0; in smbd_recv()
1956 struct smbd_connection *info = server->smbd_conn; in smbd_send()
1962 if (info->transport_status != SMBD_CONNECTED) in smbd_send()
1963 return -EAGAIN; in smbd_send()
1974 if (unlikely(remaining_data_length > info->max_fragmented_send_size)) { in smbd_send()
1977 remaining_data_length, info->max_fragmented_send_size); in smbd_send()
1978 return -EINVAL; in smbd_send()
1990 for (i = 0; i < rqst->rq_nvec; i++) in smbd_send()
1991 dump_smb(rqst->rq_iov[i].iov_base, rqst->rq_iov[i].iov_len); in smbd_send()
1993 log_write(INFO, "RDMA-WR[%u] nvec=%d len=%u iter=%zu rqlen=%lu\n", in smbd_send()
1994 rqst_idx, rqst->rq_nvec, remaining_data_length, in smbd_send()
1995 iov_iter_count(&rqst->rq_iter), smb_rqst_len(server, rqst)); in smbd_send()
1999 for (i = 0; i < rqst->rq_nvec; i++) in smbd_send()
2000 klen += rqst->rq_iov[i].iov_len; in smbd_send()
2001 iov_iter_kvec(&iter, ITER_SOURCE, rqst->rq_iov, rqst->rq_nvec, klen); in smbd_send()
2007 if (iov_iter_count(&rqst->rq_iter) > 0) { in smbd_send()
2009 rc = smbd_post_send_iter(info, &rqst->rq_iter, in smbd_send()
2024 wait_event(info->wait_send_pending, in smbd_send()
2025 atomic_read(&info->send_pending) == 0); in smbd_send()
2030 static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc) in register_mr_done() argument
2035 if (wc->status) { in register_mr_done()
2036 log_rdma_mr(ERR, "status=%d\n", wc->status); in register_mr_done()
2037 cqe = wc->wr_cqe; in register_mr_done()
2039 smbd_disconnect_rdma_connection(mr->conn); in register_mr_done()
2048 * There is one workqueue that recovers MRs, there is no need to lock as the
2059 list_for_each_entry(smbdirect_mr, &info->mr_list, list) { in smbd_mr_recovery_work()
2060 if (smbdirect_mr->state == MR_ERROR) { in smbd_mr_recovery_work()
2063 rc = ib_dereg_mr(smbdirect_mr->mr); in smbd_mr_recovery_work()
2072 smbdirect_mr->mr = ib_alloc_mr( in smbd_mr_recovery_work()
2073 info->pd, info->mr_type, in smbd_mr_recovery_work()
2074 info->max_frmr_depth); in smbd_mr_recovery_work()
2075 if (IS_ERR(smbdirect_mr->mr)) { in smbd_mr_recovery_work()
2077 info->mr_type, in smbd_mr_recovery_work()
2078 info->max_frmr_depth); in smbd_mr_recovery_work()
2086 smbdirect_mr->state = MR_READY; in smbd_mr_recovery_work()
2088 /* smbdirect_mr->state is updated by this function in smbd_mr_recovery_work()
2091 * implicates a memory barrier and guarantees this in smbd_mr_recovery_work()
2095 if (atomic_inc_return(&info->mr_ready_count) == 1) in smbd_mr_recovery_work()
2096 wake_up_interruptible(&info->wait_mr); in smbd_mr_recovery_work()
2104 cancel_work_sync(&info->mr_recovery_work); in destroy_mr_list()
2105 list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { in destroy_mr_list()
2106 if (mr->state == MR_INVALIDATED) in destroy_mr_list()
2107 ib_dma_unmap_sg(info->id->device, mr->sgt.sgl, in destroy_mr_list()
2108 mr->sgt.nents, mr->dir); in destroy_mr_list()
2109 ib_dereg_mr(mr->mr); in destroy_mr_list()
2110 kfree(mr->sgt.sgl); in destroy_mr_list()
2127 INIT_LIST_HEAD(&info->mr_list); in allocate_mr_list()
2128 init_waitqueue_head(&info->wait_mr); in allocate_mr_list()
2129 spin_lock_init(&info->mr_list_lock); in allocate_mr_list()
2130 atomic_set(&info->mr_ready_count, 0); in allocate_mr_list()
2131 atomic_set(&info->mr_used_count, 0); in allocate_mr_list()
2132 init_waitqueue_head(&info->wait_for_mr_cleanup); in allocate_mr_list()
2133 INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work); in allocate_mr_list()
2135 for (i = 0; i < info->responder_resources * 2; i++) { in allocate_mr_list()
2139 smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type, in allocate_mr_list()
2140 info->max_frmr_depth); in allocate_mr_list()
2141 if (IS_ERR(smbdirect_mr->mr)) { in allocate_mr_list()
2143 info->mr_type, info->max_frmr_depth); in allocate_mr_list()
2146 smbdirect_mr->sgt.sgl = kcalloc(info->max_frmr_depth, in allocate_mr_list()
2149 if (!smbdirect_mr->sgt.sgl) { in allocate_mr_list()
2151 ib_dereg_mr(smbdirect_mr->mr); in allocate_mr_list()
2154 smbdirect_mr->state = MR_READY; in allocate_mr_list()
2155 smbdirect_mr->conn = info; in allocate_mr_list()
2157 list_add_tail(&smbdirect_mr->list, &info->mr_list); in allocate_mr_list()
2158 atomic_inc(&info->mr_ready_count); in allocate_mr_list()
2165 list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) { in allocate_mr_list()
2166 list_del(&smbdirect_mr->list); in allocate_mr_list()
2167 ib_dereg_mr(smbdirect_mr->mr); in allocate_mr_list()
2168 kfree(smbdirect_mr->sgt.sgl); in allocate_mr_list()
2171 return -ENOMEM; in allocate_mr_list()
2187 rc = wait_event_interruptible(info->wait_mr, in get_mr()
2188 atomic_read(&info->mr_ready_count) || in get_mr()
2189 info->transport_status != SMBD_CONNECTED); in get_mr()
2195 if (info->transport_status != SMBD_CONNECTED) { in get_mr()
2196 log_rdma_mr(ERR, "info->transport_status=%x\n", in get_mr()
2197 info->transport_status); in get_mr()
2201 spin_lock(&info->mr_list_lock); in get_mr()
2202 list_for_each_entry(ret, &info->mr_list, list) { in get_mr()
2203 if (ret->state == MR_READY) { in get_mr()
2204 ret->state = MR_REGISTERED; in get_mr()
2205 spin_unlock(&info->mr_list_lock); in get_mr()
2206 atomic_dec(&info->mr_ready_count); in get_mr()
2207 atomic_inc(&info->mr_used_count); in get_mr()
2212 spin_unlock(&info->mr_list_lock); in get_mr()
2230 memset(sgt->sgl, 0, max_sg * sizeof(struct scatterlist)); in smbd_iter_to_mr()
2234 if (sgt->nents > 0) in smbd_iter_to_mr()
2235 sg_mark_end(&sgt->sgl[sgt->nents - 1]); in smbd_iter_to_mr()
2240 * Register memory for RDMA read/write
2241 * iter: the buffer to register memory with
2255 num_pages = iov_iter_npages(iter, info->max_frmr_depth + 1); in smbd_register_mr()
2256 if (num_pages > info->max_frmr_depth) { in smbd_register_mr()
2258 num_pages, info->max_frmr_depth); in smbd_register_mr()
2270 smbdirect_mr->dir = dir; in smbd_register_mr()
2271 smbdirect_mr->need_invalidate = need_invalidate; in smbd_register_mr()
2272 smbdirect_mr->sgt.nents = 0; in smbd_register_mr()
2273 smbdirect_mr->sgt.orig_nents = 0; in smbd_register_mr()
2276 num_pages, iov_iter_count(iter), info->max_frmr_depth); in smbd_register_mr()
2277 smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, info->max_frmr_depth); in smbd_register_mr()
2279 rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgt.sgl, in smbd_register_mr()
2280 smbdirect_mr->sgt.nents, dir); in smbd_register_mr()
2287 rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgt.sgl, in smbd_register_mr()
2288 smbdirect_mr->sgt.nents, NULL, PAGE_SIZE); in smbd_register_mr()
2289 if (rc != smbdirect_mr->sgt.nents) { in smbd_register_mr()
2292 rc, smbdirect_mr->sgt.nents); in smbd_register_mr()
2296 ib_update_fast_reg_key(smbdirect_mr->mr, in smbd_register_mr()
2297 ib_inc_rkey(smbdirect_mr->mr->rkey)); in smbd_register_mr()
2298 reg_wr = &smbdirect_mr->wr; in smbd_register_mr()
2299 reg_wr->wr.opcode = IB_WR_REG_MR; in smbd_register_mr()
2300 smbdirect_mr->cqe.done = register_mr_done; in smbd_register_mr()
2301 reg_wr->wr.wr_cqe = &smbdirect_mr->cqe; in smbd_register_mr()
2302 reg_wr->wr.num_sge = 0; in smbd_register_mr()
2303 reg_wr->wr.send_flags = IB_SEND_SIGNALED; in smbd_register_mr()
2304 reg_wr->mr = smbdirect_mr->mr; in smbd_register_mr()
2305 reg_wr->key = smbdirect_mr->mr->rkey; in smbd_register_mr()
2306 reg_wr->access = writing ? in smbd_register_mr()
2311 * There is no need for waiting for complemtion on ib_post_send in smbd_register_mr()
2315 rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL); in smbd_register_mr()
2319 log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n", in smbd_register_mr()
2320 rc, reg_wr->key); in smbd_register_mr()
2324 ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgt.sgl, in smbd_register_mr()
2325 smbdirect_mr->sgt.nents, smbdirect_mr->dir); in smbd_register_mr()
2328 smbdirect_mr->state = MR_ERROR; in smbd_register_mr()
2329 if (atomic_dec_and_test(&info->mr_used_count)) in smbd_register_mr()
2330 wake_up(&info->wait_for_mr_cleanup); in smbd_register_mr()
2337 static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc) in local_inv_done() argument
2342 cqe = wc->wr_cqe; in local_inv_done()
2344 smbdirect_mr->state = MR_INVALIDATED; in local_inv_done()
2345 if (wc->status != IB_WC_SUCCESS) { in local_inv_done()
2346 log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status); in local_inv_done()
2347 smbdirect_mr->state = MR_ERROR; in local_inv_done()
2349 complete(&smbdirect_mr->invalidate_done); in local_inv_done()
2361 struct smbd_connection *info = smbdirect_mr->conn; in smbd_deregister_mr()
2364 if (smbdirect_mr->need_invalidate) { in smbd_deregister_mr()
2366 wr = &smbdirect_mr->inv_wr; in smbd_deregister_mr()
2367 wr->opcode = IB_WR_LOCAL_INV; in smbd_deregister_mr()
2368 smbdirect_mr->cqe.done = local_inv_done; in smbd_deregister_mr()
2369 wr->wr_cqe = &smbdirect_mr->cqe; in smbd_deregister_mr()
2370 wr->num_sge = 0; in smbd_deregister_mr()
2371 wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey; in smbd_deregister_mr()
2372 wr->send_flags = IB_SEND_SIGNALED; in smbd_deregister_mr()
2374 init_completion(&smbdirect_mr->invalidate_done); in smbd_deregister_mr()
2375 rc = ib_post_send(info->id->qp, wr, NULL); in smbd_deregister_mr()
2381 wait_for_completion(&smbdirect_mr->invalidate_done); in smbd_deregister_mr()
2382 smbdirect_mr->need_invalidate = false; in smbd_deregister_mr()
2388 smbdirect_mr->state = MR_INVALIDATED; in smbd_deregister_mr()
2390 if (smbdirect_mr->state == MR_INVALIDATED) { in smbd_deregister_mr()
2392 info->id->device, smbdirect_mr->sgt.sgl, in smbd_deregister_mr()
2393 smbdirect_mr->sgt.nents, in smbd_deregister_mr()
2394 smbdirect_mr->dir); in smbd_deregister_mr()
2395 smbdirect_mr->state = MR_READY; in smbd_deregister_mr()
2396 if (atomic_inc_return(&info->mr_ready_count) == 1) in smbd_deregister_mr()
2397 wake_up_interruptible(&info->wait_mr); in smbd_deregister_mr()
2403 queue_work(info->workqueue, &info->mr_recovery_work); in smbd_deregister_mr()
2406 if (atomic_dec_and_test(&info->mr_used_count)) in smbd_deregister_mr()
2407 wake_up(&info->wait_for_mr_cleanup); in smbd_deregister_mr()
2415 struct ib_sge *sge = &rdma->sge[rdma->nr_sge]; in smb_set_sge()
2418 addr = ib_dma_map_page(rdma->device, lowest_page, in smb_set_sge()
2419 off, len, rdma->direction); in smb_set_sge()
2420 if (ib_dma_mapping_error(rdma->device, addr)) in smb_set_sge()
2423 sge->addr = addr; in smb_set_sge()
2424 sge->length = len; in smb_set_sge()
2425 sge->lkey = rdma->local_dma_lkey; in smb_set_sge()
2426 rdma->nr_sge++; in smb_set_sge()
2431 * Extract page fragments from a BVEC-class iterator and add them to an RDMA
2438 const struct bio_vec *bv = iter->bvec; in smb_extract_bvec_to_rdma()
2439 unsigned long start = iter->iov_offset; in smb_extract_bvec_to_rdma()
2443 for (i = 0; i < iter->nr_segs; i++) { in smb_extract_bvec_to_rdma()
2448 start -= len; in smb_extract_bvec_to_rdma()
2452 len = min_t(size_t, maxsize, len - start); in smb_extract_bvec_to_rdma()
2456 return -EIO; in smb_extract_bvec_to_rdma()
2459 maxsize -= len; in smb_extract_bvec_to_rdma()
2460 if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) in smb_extract_bvec_to_rdma()
2471 * Extract fragments from a KVEC-class iterator and add them to an RDMA list.
2479 const struct kvec *kv = iter->kvec; in smb_extract_kvec_to_rdma()
2480 unsigned long start = iter->iov_offset; in smb_extract_kvec_to_rdma()
2484 for (i = 0; i < iter->nr_segs; i++) { in smb_extract_kvec_to_rdma()
2491 start -= len; in smb_extract_kvec_to_rdma()
2497 len = min_t(size_t, maxsize, len - start); in smb_extract_kvec_to_rdma()
2500 maxsize -= len; in smb_extract_kvec_to_rdma()
2502 seg = min_t(size_t, len, PAGE_SIZE - off); in smb_extract_kvec_to_rdma()
2510 return -EIO; in smb_extract_kvec_to_rdma()
2513 len -= seg; in smb_extract_kvec_to_rdma()
2516 } while (len > 0 && rdma->nr_sge < rdma->max_sge); in smb_extract_kvec_to_rdma()
2518 if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0) in smb_extract_kvec_to_rdma()
2529 * Extract folio fragments from a FOLIOQ-class iterator and add them to an RDMA
2536 const struct folio_queue *folioq = iter->folioq; in smb_extract_folioq_to_rdma()
2537 unsigned int slot = iter->folioq_slot; in smb_extract_folioq_to_rdma()
2539 size_t offset = iter->iov_offset; in smb_extract_folioq_to_rdma()
2544 folioq = folioq->next; in smb_extract_folioq_to_rdma()
2546 return -EIO; in smb_extract_folioq_to_rdma()
2555 size_t part = umin(maxsize - ret, fsize - offset); in smb_extract_folioq_to_rdma()
2558 return -EIO; in smb_extract_folioq_to_rdma()
2568 if (!folioq->next) { in smb_extract_folioq_to_rdma()
2569 WARN_ON_ONCE(ret < iter->count); in smb_extract_folioq_to_rdma()
2572 folioq = folioq->next; in smb_extract_folioq_to_rdma()
2576 } while (rdma->nr_sge < rdma->max_sge || maxsize > 0); in smb_extract_folioq_to_rdma()
2578 iter->folioq = folioq; in smb_extract_folioq_to_rdma()
2579 iter->folioq_slot = slot; in smb_extract_folioq_to_rdma()
2580 iter->iov_offset = offset; in smb_extract_folioq_to_rdma()
2581 iter->count -= ret; in smb_extract_folioq_to_rdma()
2592 * IOVEC/UBUF-type iterator is to be used, it should be converted to a
2593 * BVEC-type iterator and the pages pinned, ref'd or otherwise held in some
2600 int before = rdma->nr_sge; in smb_extract_iter_to_rdma()
2614 return -EIO; in smb_extract_iter_to_rdma()
2618 while (rdma->nr_sge > before) { in smb_extract_iter_to_rdma()
2619 struct ib_sge *sge = &rdma->sge[rdma->nr_sge--]; in smb_extract_iter_to_rdma()
2621 ib_dma_unmap_single(rdma->device, sge->addr, sge->length, in smb_extract_iter_to_rdma()
2622 rdma->direction); in smb_extract_iter_to_rdma()
2623 sge->addr = 0; in smb_extract_iter_to_rdma()