qp.c (858a0d7eb5300b5f620d98ab3c4b96c9d5f19131) qp.c (2b31f7ae5f645edd852addfca445895b5806f3f9)
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 461 unchanged lines hidden (view full) ---

470 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
471 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
472 !attr->cap.max_recv_wr)
473 return 0;
474
475 return 1;
476}
477
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:

--- 461 unchanged lines hidden (view full) ---

470 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
471 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
472 !attr->cap.max_recv_wr)
473 return 0;
474
475 return 1;
476}
477
478static int first_med_uuar(void)
478static int first_med_bfreg(void)
479{
480 return 1;
481}
482
479{
480 return 1;
481}
482
483static int next_uuar(int n)
484{
485 n++;
483enum {
484 /* this is the first blue flame register in the array of bfregs assigned
485 * to a processes. Since we do not use it for blue flame but rather
486 * regular 64 bit doorbells, we do not need a lock for maintaiing
487 * "odd/even" order
488 */
489 NUM_NON_BLUE_FLAME_BFREGS = 1,
490};
486
491
487 while (((n % 4) & 2))
488 n++;
489
490 return n;
492static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
493{
494 return get_num_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
491}
492
495}
496
493static int num_med_uuar(struct mlx5_uuar_info *uuari)
497static int num_med_bfreg(struct mlx5_ib_dev *dev,
498 struct mlx5_bfreg_info *bfregi)
494{
495 int n;
496
499{
500 int n;
501
497 n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
498 uuari->num_low_latency_uuars - 1;
502 n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs -
503 NUM_NON_BLUE_FLAME_BFREGS;
499
500 return n >= 0 ? n : 0;
501}
502
504
505 return n >= 0 ? n : 0;
506}
507
503static int max_uuari(struct mlx5_uuar_info *uuari)
508static int first_hi_bfreg(struct mlx5_ib_dev *dev,
509 struct mlx5_bfreg_info *bfregi)
504{
510{
505 return uuari->num_uars * 4;
506}
507
508static int first_hi_uuar(struct mlx5_uuar_info *uuari)
509{
510 int med;
511 int med;
511 int i;
512 int t;
513
512
514 med = num_med_uuar(uuari);
515 for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
516 t++;
517 if (t == med)
518 return next_uuar(i);
519 }
520
521 return 0;
513 med = num_med_bfreg(dev, bfregi);
514 return ++med;
522}
523
515}
516
524static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
517static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
518 struct mlx5_bfreg_info *bfregi)
525{
526 int i;
527
519{
520 int i;
521
528 for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
529 if (!test_bit(i, uuari->bitmap)) {
530 set_bit(i, uuari->bitmap);
531 uuari->count[i]++;
522 for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) {
523 if (!bfregi->count[i]) {
524 bfregi->count[i]++;
532 return i;
533 }
534 }
535
536 return -ENOMEM;
537}
538
525 return i;
526 }
527 }
528
529 return -ENOMEM;
530}
531
539static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
532static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
533 struct mlx5_bfreg_info *bfregi)
540{
534{
541 int minidx = first_med_uuar();
535 int minidx = first_med_bfreg();
542 int i;
543
536 int i;
537
544 for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
545 if (uuari->count[i] < uuari->count[minidx])
538 for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) {
539 if (bfregi->count[i] < bfregi->count[minidx])
546 minidx = i;
540 minidx = i;
541 if (!bfregi->count[minidx])
542 break;
547 }
548
543 }
544
549 uuari->count[minidx]++;
545 bfregi->count[minidx]++;
550 return minidx;
551}
552
546 return minidx;
547}
548
553static int alloc_uuar(struct mlx5_uuar_info *uuari,
554 enum mlx5_ib_latency_class lat)
549static int alloc_bfreg(struct mlx5_ib_dev *dev,
550 struct mlx5_bfreg_info *bfregi,
551 enum mlx5_ib_latency_class lat)
555{
552{
556 int uuarn = -EINVAL;
553 int bfregn = -EINVAL;
557
554
558 mutex_lock(&uuari->lock);
555 mutex_lock(&bfregi->lock);
559 switch (lat) {
560 case MLX5_IB_LATENCY_CLASS_LOW:
556 switch (lat) {
557 case MLX5_IB_LATENCY_CLASS_LOW:
561 uuarn = 0;
562 uuari->count[uuarn]++;
558 BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
559 bfregn = 0;
560 bfregi->count[bfregn]++;
563 break;
564
565 case MLX5_IB_LATENCY_CLASS_MEDIUM:
561 break;
562
563 case MLX5_IB_LATENCY_CLASS_MEDIUM:
566 if (uuari->ver < 2)
567 uuarn = -ENOMEM;
564 if (bfregi->ver < 2)
565 bfregn = -ENOMEM;
568 else
566 else
569 uuarn = alloc_med_class_uuar(uuari);
567 bfregn = alloc_med_class_bfreg(dev, bfregi);
570 break;
571
572 case MLX5_IB_LATENCY_CLASS_HIGH:
568 break;
569
570 case MLX5_IB_LATENCY_CLASS_HIGH:
573 if (uuari->ver < 2)
574 uuarn = -ENOMEM;
571 if (bfregi->ver < 2)
572 bfregn = -ENOMEM;
575 else
573 else
576 uuarn = alloc_high_class_uuar(uuari);
574 bfregn = alloc_high_class_bfreg(dev, bfregi);
577 break;
575 break;
578
579 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
580 uuarn = 2;
581 break;
582 }
576 }
583 mutex_unlock(&uuari->lock);
577 mutex_unlock(&bfregi->lock);
584
578
585 return uuarn;
579 return bfregn;
586}
587
580}
581
588static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
582static void free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
589{
583{
590 clear_bit(uuarn, uuari->bitmap);
591 --uuari->count[uuarn];
584 mutex_lock(&bfregi->lock);
585 bfregi->count[bfregn]--;
586 mutex_unlock(&bfregi->lock);
592}
593
587}
588
594static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
595{
596 clear_bit(uuarn, uuari->bitmap);
597 --uuari->count[uuarn];
598}
599
600static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
601{
602 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
603 int high_uuar = nuuars - uuari->num_low_latency_uuars;
604
605 mutex_lock(&uuari->lock);
606 if (uuarn == 0) {
607 --uuari->count[uuarn];
608 goto out;
609 }
610
611 if (uuarn < high_uuar) {
612 free_med_class_uuar(uuari, uuarn);
613 goto out;
614 }
615
616 free_high_class_uuar(uuari, uuarn);
617
618out:
619 mutex_unlock(&uuari->lock);
620}
621
622static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
623{
624 switch (state) {
625 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
626 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
627 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
628 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
629 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;

--- 22 unchanged lines hidden (view full) ---

652 }
653}
654
655static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
656 struct mlx5_ib_cq *recv_cq);
657static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
658 struct mlx5_ib_cq *recv_cq);
659
589static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
590{
591 switch (state) {
592 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
593 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
594 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
595 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
596 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;

--- 22 unchanged lines hidden (view full) ---

619 }
620}
621
622static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
623 struct mlx5_ib_cq *recv_cq);
624static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
625 struct mlx5_ib_cq *recv_cq);
626
660static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
627static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
628 struct mlx5_bfreg_info *bfregi, int bfregn)
661{
629{
662 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
630 int bfregs_per_sys_page;
631 int index_of_sys_page;
632 int offset;
633
634 bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
635 MLX5_NON_FP_BFREGS_PER_UAR;
636 index_of_sys_page = bfregn / bfregs_per_sys_page;
637
638 offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
639
640 return bfregi->sys_pages[index_of_sys_page] + offset;
663}
664
665static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
666 struct ib_pd *pd,
667 unsigned long addr, size_t size,
668 struct ib_umem **umem,
669 int *npages, int *page_shift, int *ncont,
670 u32 *offset)

--- 86 unchanged lines hidden (view full) ---

757 rwq->create_type = MLX5_WQ_USER;
758 return 0;
759
760err_umem:
761 ib_umem_release(rwq->umem);
762 return err;
763}
764
641}
642
643static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
644 struct ib_pd *pd,
645 unsigned long addr, size_t size,
646 struct ib_umem **umem,
647 int *npages, int *page_shift, int *ncont,
648 u32 *offset)

--- 86 unchanged lines hidden (view full) ---

735 rwq->create_type = MLX5_WQ_USER;
736 return 0;
737
738err_umem:
739 ib_umem_release(rwq->umem);
740 return err;
741}
742
743static int adjust_bfregn(struct mlx5_ib_dev *dev,
744 struct mlx5_bfreg_info *bfregi, int bfregn)
745{
746 return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR +
747 bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
748}
749
765static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
766 struct mlx5_ib_qp *qp, struct ib_udata *udata,
767 struct ib_qp_init_attr *attr,
768 u32 **in,
769 struct mlx5_ib_create_qp_resp *resp, int *inlen,
770 struct mlx5_ib_qp_base *base)
771{
772 struct mlx5_ib_ucontext *context;
773 struct mlx5_ib_create_qp ucmd;
774 struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
775 int page_shift = 0;
776 int uar_index;
777 int npages;
778 u32 offset = 0;
750static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
751 struct mlx5_ib_qp *qp, struct ib_udata *udata,
752 struct ib_qp_init_attr *attr,
753 u32 **in,
754 struct mlx5_ib_create_qp_resp *resp, int *inlen,
755 struct mlx5_ib_qp_base *base)
756{
757 struct mlx5_ib_ucontext *context;
758 struct mlx5_ib_create_qp ucmd;
759 struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
760 int page_shift = 0;
761 int uar_index;
762 int npages;
763 u32 offset = 0;
779 int uuarn;
764 int bfregn;
780 int ncont = 0;
781 __be64 *pas;
782 void *qpc;
783 int err;
784
785 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
786 if (err) {
787 mlx5_ib_dbg(dev, "copy failed\n");
788 return err;
789 }
790
791 context = to_mucontext(pd->uobject->context);
792 /*
793 * TBD: should come from the verbs when we have the API
794 */
795 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
796 /* In CROSS_CHANNEL CQ and QP must use the same UAR */
765 int ncont = 0;
766 __be64 *pas;
767 void *qpc;
768 int err;
769
770 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
771 if (err) {
772 mlx5_ib_dbg(dev, "copy failed\n");
773 return err;
774 }
775
776 context = to_mucontext(pd->uobject->context);
777 /*
778 * TBD: should come from the verbs when we have the API
779 */
780 if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
781 /* In CROSS_CHANNEL CQ and QP must use the same UAR */
797 uuarn = MLX5_CROSS_CHANNEL_UUAR;
782 bfregn = MLX5_CROSS_CHANNEL_BFREG;
798 else {
783 else {
799 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
800 if (uuarn < 0) {
801 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
784 bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH);
785 if (bfregn < 0) {
786 mlx5_ib_dbg(dev, "failed to allocate low latency BFREG\n");
802 mlx5_ib_dbg(dev, "reverting to medium latency\n");
787 mlx5_ib_dbg(dev, "reverting to medium latency\n");
803 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
804 if (uuarn < 0) {
805 mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
788 bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_MEDIUM);
789 if (bfregn < 0) {
790 mlx5_ib_dbg(dev, "failed to allocate medium latency BFREG\n");
806 mlx5_ib_dbg(dev, "reverting to high latency\n");
791 mlx5_ib_dbg(dev, "reverting to high latency\n");
807 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
808 if (uuarn < 0) {
809 mlx5_ib_warn(dev, "uuar allocation failed\n");
810 return uuarn;
792 bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_LOW);
793 if (bfregn < 0) {
794 mlx5_ib_warn(dev, "bfreg allocation failed\n");
795 return bfregn;
811 }
812 }
813 }
814 }
815
796 }
797 }
798 }
799 }
800
816 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
817 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
801 uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn);
802 mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
818
819 qp->rq.offset = 0;
820 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
821 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
822
823 err = set_user_buf_size(dev, qp, &ucmd, base, attr);
824 if (err)
803
804 qp->rq.offset = 0;
805 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
806 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
807
808 err = set_user_buf_size(dev, qp, &ucmd, base, attr);
809 if (err)
825 goto err_uuar;
810 goto err_bfreg;
826
827 if (ucmd.buf_addr && ubuffer->buf_size) {
828 ubuffer->buf_addr = ucmd.buf_addr;
829 err = mlx5_ib_umem_get(dev, pd, ubuffer->buf_addr,
830 ubuffer->buf_size,
831 &ubuffer->umem, &npages, &page_shift,
832 &ncont, &offset);
833 if (err)
811
812 if (ucmd.buf_addr && ubuffer->buf_size) {
813 ubuffer->buf_addr = ucmd.buf_addr;
814 err = mlx5_ib_umem_get(dev, pd, ubuffer->buf_addr,
815 ubuffer->buf_size,
816 &ubuffer->umem, &npages, &page_shift,
817 &ncont, &offset);
818 if (err)
834 goto err_uuar;
819 goto err_bfreg;
835 } else {
836 ubuffer->umem = NULL;
837 }
838
839 *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
840 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
841 *in = mlx5_vzalloc(*inlen);
842 if (!*in) {

--- 6 unchanged lines hidden (view full) ---

849 mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
850
851 qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
852
853 MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT);
854 MLX5_SET(qpc, qpc, page_offset, offset);
855
856 MLX5_SET(qpc, qpc, uar_page, uar_index);
820 } else {
821 ubuffer->umem = NULL;
822 }
823
824 *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
825 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
826 *in = mlx5_vzalloc(*inlen);
827 if (!*in) {

--- 6 unchanged lines hidden (view full) ---

834 mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
835
836 qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
837
838 MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT);
839 MLX5_SET(qpc, qpc, page_offset, offset);
840
841 MLX5_SET(qpc, qpc, uar_page, uar_index);
857 resp->uuar_index = uuarn;
858 qp->uuarn = uuarn;
842 resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
843 qp->bfregn = bfregn;
859
860 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
861 if (err) {
862 mlx5_ib_dbg(dev, "map failed\n");
863 goto err_free;
864 }
865
866 err = ib_copy_to_udata(udata, resp, sizeof(*resp));

--- 10 unchanged lines hidden (view full) ---

877
878err_free:
879 kvfree(*in);
880
881err_umem:
882 if (ubuffer->umem)
883 ib_umem_release(ubuffer->umem);
884
844
845 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
846 if (err) {
847 mlx5_ib_dbg(dev, "map failed\n");
848 goto err_free;
849 }
850
851 err = ib_copy_to_udata(udata, resp, sizeof(*resp));

--- 10 unchanged lines hidden (view full) ---

862
863err_free:
864 kvfree(*in);
865
866err_umem:
867 if (ubuffer->umem)
868 ib_umem_release(ubuffer->umem);
869
885err_uuar:
886 free_uuar(&context->uuari, uuarn);
870err_bfreg:
871 free_bfreg(dev, &context->bfregi, bfregn);
887 return err;
888}
889
872 return err;
873}
874
890static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
891 struct mlx5_ib_qp_base *base)
875static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
876 struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base)
892{
893 struct mlx5_ib_ucontext *context;
894
895 context = to_mucontext(pd->uobject->context);
896 mlx5_ib_db_unmap_user(context, &qp->db);
897 if (base->ubuffer.umem)
898 ib_umem_release(base->ubuffer.umem);
877{
878 struct mlx5_ib_ucontext *context;
879
880 context = to_mucontext(pd->uobject->context);
881 mlx5_ib_db_unmap_user(context, &qp->db);
882 if (base->ubuffer.umem)
883 ib_umem_release(base->ubuffer.umem);
899 free_uuar(&context->uuari, qp->uuarn);
884 free_bfreg(dev, &context->bfregi, qp->bfregn);
900}
901
902static int create_kernel_qp(struct mlx5_ib_dev *dev,
903 struct ib_qp_init_attr *init_attr,
904 struct mlx5_ib_qp *qp,
905 u32 **in, int *inlen,
906 struct mlx5_ib_qp_base *base)
907{
885}
886
887static int create_kernel_qp(struct mlx5_ib_dev *dev,
888 struct ib_qp_init_attr *init_attr,
889 struct mlx5_ib_qp *qp,
890 u32 **in, int *inlen,
891 struct mlx5_ib_qp_base *base)
892{
908 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
909 struct mlx5_uuar_info *uuari;
910 int uar_index;
911 void *qpc;
893 int uar_index;
894 void *qpc;
912 int uuarn;
913 int err;
914
895 int err;
896
915 uuari = &dev->mdev->priv.uuari;
916 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN |
917 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
918 IB_QP_CREATE_IPOIB_UD_LSO |
919 mlx5_ib_create_qp_sqpn_qp1()))
920 return -EINVAL;
921
922 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
897 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN |
898 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
899 IB_QP_CREATE_IPOIB_UD_LSO |
900 mlx5_ib_create_qp_sqpn_qp1()))
901 return -EINVAL;
902
903 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
923 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
904 qp->bf.bfreg = &dev->fp_bfreg;
905 else
906 qp->bf.bfreg = &dev->bfreg;
924
907
925 uuarn = alloc_uuar(uuari, lc);
926 if (uuarn < 0) {
927 mlx5_ib_dbg(dev, "\n");
928 return -ENOMEM;
929 }
908 qp->bf.buf_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
909 uar_index = qp->bf.bfreg->index;
930
910
931 qp->bf = &uuari->bfs[uuarn];
932 uar_index = qp->bf->uar->index;
933
934 err = calc_sq_size(dev, init_attr, qp);
935 if (err < 0) {
936 mlx5_ib_dbg(dev, "err %d\n", err);
911 err = calc_sq_size(dev, init_attr, qp);
912 if (err < 0) {
913 mlx5_ib_dbg(dev, "err %d\n", err);
937 goto err_uuar;
914 return err;
938 }
939
940 qp->rq.offset = 0;
941 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
942 base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
943
944 err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf);
945 if (err) {
946 mlx5_ib_dbg(dev, "err %d\n", err);
915 }
916
917 qp->rq.offset = 0;
918 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
919 base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
920
921 err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf);
922 if (err) {
923 mlx5_ib_dbg(dev, "err %d\n", err);
947 goto err_uuar;
924 return err;
948 }
949
950 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
951 *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
952 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
953 *in = mlx5_vzalloc(*inlen);
954 if (!*in) {
955 err = -ENOMEM;

--- 33 unchanged lines hidden (view full) ---

989 err = -ENOMEM;
990 goto err_wrid;
991 }
992 qp->create_type = MLX5_QP_KERNEL;
993
994 return 0;
995
996err_wrid:
925 }
926
927 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
928 *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
929 MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
930 *in = mlx5_vzalloc(*inlen);
931 if (!*in) {
932 err = -ENOMEM;

--- 33 unchanged lines hidden (view full) ---

966 err = -ENOMEM;
967 goto err_wrid;
968 }
969 qp->create_type = MLX5_QP_KERNEL;
970
971 return 0;
972
973err_wrid:
997 mlx5_db_free(dev->mdev, &qp->db);
998 kfree(qp->sq.wqe_head);
999 kfree(qp->sq.w_list);
1000 kfree(qp->sq.wrid);
1001 kfree(qp->sq.wr_data);
1002 kfree(qp->rq.wrid);
974 kfree(qp->sq.wqe_head);
975 kfree(qp->sq.w_list);
976 kfree(qp->sq.wrid);
977 kfree(qp->sq.wr_data);
978 kfree(qp->rq.wrid);
979 mlx5_db_free(dev->mdev, &qp->db);
1003
1004err_free:
1005 kvfree(*in);
1006
1007err_buf:
1008 mlx5_buf_free(dev->mdev, &qp->buf);
980
981err_free:
982 kvfree(*in);
983
984err_buf:
985 mlx5_buf_free(dev->mdev, &qp->buf);
1009
1010err_uuar:
1011 free_uuar(&dev->mdev->priv.uuari, uuarn);
1012 return err;
1013}
1014
1015static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1016{
986 return err;
987}
988
989static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
990{
1017 mlx5_db_free(dev->mdev, &qp->db);
1018 kfree(qp->sq.wqe_head);
1019 kfree(qp->sq.w_list);
1020 kfree(qp->sq.wrid);
1021 kfree(qp->sq.wr_data);
1022 kfree(qp->rq.wrid);
991 kfree(qp->sq.wqe_head);
992 kfree(qp->sq.w_list);
993 kfree(qp->sq.wrid);
994 kfree(qp->sq.wr_data);
995 kfree(qp->rq.wrid);
996 mlx5_db_free(dev->mdev, &qp->db);
1023 mlx5_buf_free(dev->mdev, &qp->buf);
997 mlx5_buf_free(dev->mdev, &qp->buf);
1024 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
1025}
1026
1027static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
1028{
1029 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
1030 (attr->qp_type == IB_QPT_XRC_INI))
1031 return MLX5_SRQ_RQ;
1032 else if (!qp->has_rq)

--- 315 unchanged lines hidden (view full) ---

1348 size_t required_cmd_sz;
1349
1350 if (init_attr->qp_type != IB_QPT_RAW_PACKET)
1351 return -EOPNOTSUPP;
1352
1353 if (init_attr->create_flags || init_attr->send_cq)
1354 return -EINVAL;
1355
998}
999
1000static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
1001{
1002 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
1003 (attr->qp_type == IB_QPT_XRC_INI))
1004 return MLX5_SRQ_RQ;
1005 else if (!qp->has_rq)

--- 315 unchanged lines hidden (view full) ---

1321 size_t required_cmd_sz;
1322
1323 if (init_attr->qp_type != IB_QPT_RAW_PACKET)
1324 return -EOPNOTSUPP;
1325
1326 if (init_attr->create_flags || init_attr->send_cq)
1327 return -EINVAL;
1328
1356 min_resp_len = offsetof(typeof(resp), uuar_index) + sizeof(resp.uuar_index);
1329 min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index);
1357 if (udata->outlen < min_resp_len)
1358 return -EINVAL;
1359
1360 required_cmd_sz = offsetof(typeof(ucmd), reserved1) + sizeof(ucmd.reserved1);
1361 if (udata->inlen < required_cmd_sz) {
1362 mlx5_ib_dbg(dev, "invalid inlen\n");
1363 return -EINVAL;
1364 }

--- 156 unchanged lines hidden (view full) ---

1521 void *qpc;
1522 u32 *in;
1523 int err;
1524
1525 base = init_attr->qp_type == IB_QPT_RAW_PACKET ?
1526 &qp->raw_packet_qp.rq.base :
1527 &qp->trans_qp.base;
1528
1330 if (udata->outlen < min_resp_len)
1331 return -EINVAL;
1332
1333 required_cmd_sz = offsetof(typeof(ucmd), reserved1) + sizeof(ucmd.reserved1);
1334 if (udata->inlen < required_cmd_sz) {
1335 mlx5_ib_dbg(dev, "invalid inlen\n");
1336 return -EINVAL;
1337 }

--- 156 unchanged lines hidden (view full) ---

1494 void *qpc;
1495 u32 *in;
1496 int err;
1497
1498 base = init_attr->qp_type == IB_QPT_RAW_PACKET ?
1499 &qp->raw_packet_qp.rq.base :
1500 &qp->trans_qp.base;
1501
1529 if (init_attr->qp_type != IB_QPT_RAW_PACKET)
1530 mlx5_ib_odp_create_qp(qp);
1531
1532 mutex_init(&qp->mutex);
1533 spin_lock_init(&qp->sq.lock);
1534 spin_lock_init(&qp->rq.lock);
1535
1536 if (init_attr->rwq_ind_tbl) {
1537 if (!udata)
1538 return -ENOSYS;
1539

--- 250 unchanged lines hidden (view full) ---

1790 list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
1791 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1792 spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
1793
1794 return 0;
1795
1796err_create:
1797 if (qp->create_type == MLX5_QP_USER)
1502 mutex_init(&qp->mutex);
1503 spin_lock_init(&qp->sq.lock);
1504 spin_lock_init(&qp->rq.lock);
1505
1506 if (init_attr->rwq_ind_tbl) {
1507 if (!udata)
1508 return -ENOSYS;
1509

--- 250 unchanged lines hidden (view full) ---

1760 list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
1761 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1762 spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
1763
1764 return 0;
1765
1766err_create:
1767 if (qp->create_type == MLX5_QP_USER)
1798 destroy_qp_user(pd, qp, base);
1768 destroy_qp_user(dev, pd, qp, base);
1799 else if (qp->create_type == MLX5_QP_KERNEL)
1800 destroy_qp_kernel(dev, qp);
1801
1802 kvfree(in);
1803 return err;
1804}
1805
1806static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)

--- 111 unchanged lines hidden (view full) ---

1918 }
1919
1920 base = qp->ibqp.qp_type == IB_QPT_RAW_PACKET ?
1921 &qp->raw_packet_qp.rq.base :
1922 &qp->trans_qp.base;
1923
1924 if (qp->state != IB_QPS_RESET) {
1925 if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) {
1769 else if (qp->create_type == MLX5_QP_KERNEL)
1770 destroy_qp_kernel(dev, qp);
1771
1772 kvfree(in);
1773 return err;
1774}
1775
1776static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)

--- 111 unchanged lines hidden (view full) ---

1888 }
1889
1890 base = qp->ibqp.qp_type == IB_QPT_RAW_PACKET ?
1891 &qp->raw_packet_qp.rq.base :
1892 &qp->trans_qp.base;
1893
1894 if (qp->state != IB_QPS_RESET) {
1895 if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET) {
1926 mlx5_ib_qp_disable_pagefaults(qp);
1927 err = mlx5_core_qp_modify(dev->mdev,
1928 MLX5_CMD_OP_2RST_QP, 0,
1929 NULL, &base->mqp);
1930 } else {
1931 struct mlx5_modify_raw_qp_param raw_qp_param = {
1932 .operation = MLX5_CMD_OP_2RST_QP
1933 };
1934

--- 34 unchanged lines hidden (view full) ---

1969 if (err)
1970 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
1971 base->mqp.qpn);
1972 }
1973
1974 if (qp->create_type == MLX5_QP_KERNEL)
1975 destroy_qp_kernel(dev, qp);
1976 else if (qp->create_type == MLX5_QP_USER)
1896 err = mlx5_core_qp_modify(dev->mdev,
1897 MLX5_CMD_OP_2RST_QP, 0,
1898 NULL, &base->mqp);
1899 } else {
1900 struct mlx5_modify_raw_qp_param raw_qp_param = {
1901 .operation = MLX5_CMD_OP_2RST_QP
1902 };
1903

--- 34 unchanged lines hidden (view full) ---

1938 if (err)
1939 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
1940 base->mqp.qpn);
1941 }
1942
1943 if (qp->create_type == MLX5_QP_KERNEL)
1944 destroy_qp_kernel(dev, qp);
1945 else if (qp->create_type == MLX5_QP_USER)
1977 destroy_qp_user(&get_pd(qp)->ibpd, qp, base);
1946 destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base);
1978}
1979
1980static const char *ib_qp_type_str(enum ib_qp_type type)
1981{
1982 switch (type) {
1983 case IB_QPT_SMI:
1984 return "IB_QPT_SMI";
1985 case IB_QPT_GSI:

--- 832 unchanged lines hidden (view full) ---

2818 context->deth_sqpn = cpu_to_be32(1);
2819
2820 mlx5_cur = to_mlx5_state(cur_state);
2821 mlx5_new = to_mlx5_state(new_state);
2822 mlx5_st = to_mlx5_st(ibqp->qp_type);
2823 if (mlx5_st < 0)
2824 goto out;
2825
1947}
1948
1949static const char *ib_qp_type_str(enum ib_qp_type type)
1950{
1951 switch (type) {
1952 case IB_QPT_SMI:
1953 return "IB_QPT_SMI";
1954 case IB_QPT_GSI:

--- 832 unchanged lines hidden (view full) ---

2787 context->deth_sqpn = cpu_to_be32(1);
2788
2789 mlx5_cur = to_mlx5_state(cur_state);
2790 mlx5_new = to_mlx5_state(new_state);
2791 mlx5_st = to_mlx5_st(ibqp->qp_type);
2792 if (mlx5_st < 0)
2793 goto out;
2794
2826 /* If moving to a reset or error state, we must disable page faults on
2827 * this QP and flush all current page faults. Otherwise a stale page
2828 * fault may attempt to work on this QP after it is reset and moved
2829 * again to RTS, and may cause the driver and the device to get out of
2830 * sync. */
2831 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
2832 (new_state == IB_QPS_RESET || new_state == IB_QPS_ERR) &&
2833 (qp->ibqp.qp_type != IB_QPT_RAW_PACKET))
2834 mlx5_ib_qp_disable_pagefaults(qp);
2835
2836 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
2837 !optab[mlx5_cur][mlx5_new])
2838 goto out;
2839
2840 op = optab[mlx5_cur][mlx5_new];
2841 optpar = ib_mask_to_mlx5_opt(attr_mask);
2842 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
2843

--- 15 unchanged lines hidden (view full) ---

2859 } else {
2860 err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
2861 &base->mqp);
2862 }
2863
2864 if (err)
2865 goto out;
2866
2795 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
2796 !optab[mlx5_cur][mlx5_new])
2797 goto out;
2798
2799 op = optab[mlx5_cur][mlx5_new];
2800 optpar = ib_mask_to_mlx5_opt(attr_mask);
2801 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
2802

--- 15 unchanged lines hidden (view full) ---

2818 } else {
2819 err = mlx5_core_qp_modify(dev->mdev, op, optpar, context,
2820 &base->mqp);
2821 }
2822
2823 if (err)
2824 goto out;
2825
2867 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT &&
2868 (qp->ibqp.qp_type != IB_QPT_RAW_PACKET))
2869 mlx5_ib_qp_enable_pagefaults(qp);
2870
2871 qp->state = new_state;
2872
2873 if (attr_mask & IB_QP_ACCESS_FLAGS)
2874 qp->trans_qp.atomic_rd_en = attr->qp_access_flags;
2875 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
2876 qp->trans_qp.resp_depth = attr->max_dest_rd_atomic;
2877 if (attr_mask & IB_QP_PORT)
2878 qp->port = attr->port_num;

--- 145 unchanged lines hidden (view full) ---

3024 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
3025 MLX5_ETH_WQE_L4_CSUM;
3026
3027 seg += sizeof(struct mlx5_wqe_eth_seg);
3028 *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
3029
3030 if (wr->opcode == IB_WR_LSO) {
3031 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
2826 qp->state = new_state;
2827
2828 if (attr_mask & IB_QP_ACCESS_FLAGS)
2829 qp->trans_qp.atomic_rd_en = attr->qp_access_flags;
2830 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
2831 qp->trans_qp.resp_depth = attr->max_dest_rd_atomic;
2832 if (attr_mask & IB_QP_PORT)
2833 qp->port = attr->port_num;

--- 145 unchanged lines hidden (view full) ---

2979 eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
2980 MLX5_ETH_WQE_L4_CSUM;
2981
2982 seg += sizeof(struct mlx5_wqe_eth_seg);
2983 *size += sizeof(struct mlx5_wqe_eth_seg) / 16;
2984
2985 if (wr->opcode == IB_WR_LSO) {
2986 struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
3032 int size_of_inl_hdr_start = sizeof(eseg->inline_hdr_start);
2987 int size_of_inl_hdr_start = sizeof(eseg->inline_hdr.start);
3033 u64 left, leftlen, copysz;
3034 void *pdata = ud_wr->header;
3035
3036 left = ud_wr->hlen;
3037 eseg->mss = cpu_to_be16(ud_wr->mss);
2988 u64 left, leftlen, copysz;
2989 void *pdata = ud_wr->header;
2990
2991 left = ud_wr->hlen;
2992 eseg->mss = cpu_to_be16(ud_wr->mss);
3038 eseg->inline_hdr_sz = cpu_to_be16(left);
2993 eseg->inline_hdr.sz = cpu_to_be16(left);
3039
3040 /*
3041 * check if there is space till the end of queue, if yes,
3042 * copy all in one shot, otherwise copy till the end of queue,
3043 * rollback and than the copy the left
3044 */
2994
2995 /*
2996 * check if there is space till the end of queue, if yes,
2997 * copy all in one shot, otherwise copy till the end of queue,
2998 * rollback and than the copy the left
2999 */
3045 leftlen = qend - (void *)eseg->inline_hdr_start;
3000 leftlen = qend - (void *)eseg->inline_hdr.start;
3046 copysz = min_t(u64, leftlen, left);
3047
3048 memcpy(seg - size_of_inl_hdr_start, pdata, copysz);
3049
3050 if (likely(copysz > size_of_inl_hdr_start)) {
3051 seg += ALIGN(copysz - size_of_inl_hdr_start, 16);
3052 *size += ALIGN(copysz - size_of_inl_hdr_start, 16) / 16;
3053 }

--- 21 unchanged lines hidden (view full) ---

3075
3076static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
3077{
3078 dseg->byte_count = cpu_to_be32(sg->length);
3079 dseg->lkey = cpu_to_be32(sg->lkey);
3080 dseg->addr = cpu_to_be64(sg->addr);
3081}
3082
3001 copysz = min_t(u64, leftlen, left);
3002
3003 memcpy(seg - size_of_inl_hdr_start, pdata, copysz);
3004
3005 if (likely(copysz > size_of_inl_hdr_start)) {
3006 seg += ALIGN(copysz - size_of_inl_hdr_start, 16);
3007 *size += ALIGN(copysz - size_of_inl_hdr_start, 16) / 16;
3008 }

--- 21 unchanged lines hidden (view full) ---

3030
3031static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
3032{
3033 dseg->byte_count = cpu_to_be32(sg->length);
3034 dseg->lkey = cpu_to_be32(sg->lkey);
3035 dseg->addr = cpu_to_be64(sg->addr);
3036}
3037
3083static __be16 get_klm_octo(int npages)
3038static u64 get_xlt_octo(u64 bytes)
3084{
3039{
3085 return cpu_to_be16(ALIGN(npages, 8) / 2);
3040 return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
3041 MLX5_IB_UMR_OCTOWORD;
3086}
3087
3088static __be64 frwr_mkey_mask(void)
3089{
3090 u64 result;
3091
3092 result = MLX5_MKEY_MASK_LEN |
3093 MLX5_MKEY_MASK_PAGE_SIZE |

--- 28 unchanged lines hidden (view full) ---

3122 MLX5_MKEY_MASK_SMALL_FENCE |
3123 MLX5_MKEY_MASK_FREE |
3124 MLX5_MKEY_MASK_BSF_EN;
3125
3126 return cpu_to_be64(result);
3127}
3128
3129static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
3042}
3043
3044static __be64 frwr_mkey_mask(void)
3045{
3046 u64 result;
3047
3048 result = MLX5_MKEY_MASK_LEN |
3049 MLX5_MKEY_MASK_PAGE_SIZE |

--- 28 unchanged lines hidden (view full) ---

3078 MLX5_MKEY_MASK_SMALL_FENCE |
3079 MLX5_MKEY_MASK_FREE |
3080 MLX5_MKEY_MASK_BSF_EN;
3081
3082 return cpu_to_be64(result);
3083}
3084
3085static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
3130 struct mlx5_ib_mr *mr)
3086 struct mlx5_ib_mr *mr)
3131{
3087{
3132 int ndescs = mr->ndescs;
3088 int size = mr->ndescs * mr->desc_size;
3133
3134 memset(umr, 0, sizeof(*umr));
3135
3089
3090 memset(umr, 0, sizeof(*umr));
3091
3136 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
3137 /* KLMs take twice the size of MTTs */
3138 ndescs *= 2;
3139
3140 umr->flags = MLX5_UMR_CHECK_NOT_FREE;
3092 umr->flags = MLX5_UMR_CHECK_NOT_FREE;
3141 umr->klm_octowords = get_klm_octo(ndescs);
3093 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
3142 umr->mkey_mask = frwr_mkey_mask();
3143}
3144
3145static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
3146{
3147 memset(umr, 0, sizeof(*umr));
3148 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
3149 umr->flags = MLX5_UMR_INLINE;
3150}
3151
3094 umr->mkey_mask = frwr_mkey_mask();
3095}
3096
3097static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
3098{
3099 memset(umr, 0, sizeof(*umr));
3100 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
3101 umr->flags = MLX5_UMR_INLINE;
3102}
3103
3152static __be64 get_umr_reg_mr_mask(int atomic)
3104static __be64 get_umr_enable_mr_mask(void)
3153{
3154 u64 result;
3155
3105{
3106 u64 result;
3107
3156 result = MLX5_MKEY_MASK_LEN |
3157 MLX5_MKEY_MASK_PAGE_SIZE |
3158 MLX5_MKEY_MASK_START_ADDR |
3159 MLX5_MKEY_MASK_PD |
3160 MLX5_MKEY_MASK_LR |
3161 MLX5_MKEY_MASK_LW |
3162 MLX5_MKEY_MASK_KEY |
3163 MLX5_MKEY_MASK_RR |
3164 MLX5_MKEY_MASK_RW |
3108 result = MLX5_MKEY_MASK_KEY |
3165 MLX5_MKEY_MASK_FREE;
3166
3109 MLX5_MKEY_MASK_FREE;
3110
3167 if (atomic)
3168 result |= MLX5_MKEY_MASK_A;
3169
3170 return cpu_to_be64(result);
3171}
3172
3111 return cpu_to_be64(result);
3112}
3113
3173static __be64 get_umr_unreg_mr_mask(void)
3114static __be64 get_umr_disable_mr_mask(void)
3174{
3175 u64 result;
3176
3177 result = MLX5_MKEY_MASK_FREE;
3178
3179 return cpu_to_be64(result);
3180}
3181
3115{
3116 u64 result;
3117
3118 result = MLX5_MKEY_MASK_FREE;
3119
3120 return cpu_to_be64(result);
3121}
3122
3182static __be64 get_umr_update_mtt_mask(void)
3183{
3184 u64 result;
3185
3186 result = MLX5_MKEY_MASK_FREE;
3187
3188 return cpu_to_be64(result);
3189}
3190
3191static __be64 get_umr_update_translation_mask(void)
3192{
3193 u64 result;
3194
3195 result = MLX5_MKEY_MASK_LEN |
3196 MLX5_MKEY_MASK_PAGE_SIZE |
3123static __be64 get_umr_update_translation_mask(void)
3124{
3125 u64 result;
3126
3127 result = MLX5_MKEY_MASK_LEN |
3128 MLX5_MKEY_MASK_PAGE_SIZE |
3197 MLX5_MKEY_MASK_START_ADDR |
3198 MLX5_MKEY_MASK_KEY |
3199 MLX5_MKEY_MASK_FREE;
3129 MLX5_MKEY_MASK_START_ADDR;
3200
3201 return cpu_to_be64(result);
3202}
3203
3130
3131 return cpu_to_be64(result);
3132}
3133
3204static __be64 get_umr_update_access_mask(void)
3134static __be64 get_umr_update_access_mask(int atomic)
3205{
3206 u64 result;
3207
3135{
3136 u64 result;
3137
3208 result = MLX5_MKEY_MASK_LW |
3138 result = MLX5_MKEY_MASK_LR |
3139 MLX5_MKEY_MASK_LW |
3209 MLX5_MKEY_MASK_RR |
3140 MLX5_MKEY_MASK_RR |
3210 MLX5_MKEY_MASK_RW |
3211 MLX5_MKEY_MASK_A |
3212 MLX5_MKEY_MASK_KEY |
3213 MLX5_MKEY_MASK_FREE;
3141 MLX5_MKEY_MASK_RW;
3214
3142
3143 if (atomic)
3144 result |= MLX5_MKEY_MASK_A;
3145
3215 return cpu_to_be64(result);
3216}
3217
3218static __be64 get_umr_update_pd_mask(void)
3219{
3220 u64 result;
3221
3146 return cpu_to_be64(result);
3147}
3148
3149static __be64 get_umr_update_pd_mask(void)
3150{
3151 u64 result;
3152
3222 result = MLX5_MKEY_MASK_PD |
3223 MLX5_MKEY_MASK_KEY |
3224 MLX5_MKEY_MASK_FREE;
3153 result = MLX5_MKEY_MASK_PD;
3225
3226 return cpu_to_be64(result);
3227}
3228
3229static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
3230 struct ib_send_wr *wr, int atomic)
3231{
3232 struct mlx5_umr_wr *umrwr = umr_wr(wr);
3233
3234 memset(umr, 0, sizeof(*umr));
3235
3236 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
3237 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
3238 else
3239 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
3240
3154
3155 return cpu_to_be64(result);
3156}
3157
3158static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
3159 struct ib_send_wr *wr, int atomic)
3160{
3161 struct mlx5_umr_wr *umrwr = umr_wr(wr);
3162
3163 memset(umr, 0, sizeof(*umr));
3164
3165 if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
3166 umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
3167 else
3168 umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
3169
3241 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
3242 umr->klm_octowords = get_klm_octo(umrwr->npages);
3243 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT) {
3244 umr->mkey_mask = get_umr_update_mtt_mask();
3245 umr->bsf_octowords = get_klm_octo(umrwr->target.offset);
3246 umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
3247 }
3248 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
3249 umr->mkey_mask |= get_umr_update_translation_mask();
3250 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_ACCESS)
3251 umr->mkey_mask |= get_umr_update_access_mask();
3252 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD)
3253 umr->mkey_mask |= get_umr_update_pd_mask();
3254 if (!umr->mkey_mask)
3255 umr->mkey_mask = get_umr_reg_mr_mask(atomic);
3256 } else {
3257 umr->mkey_mask = get_umr_unreg_mr_mask();
3170 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
3171 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
3172 u64 offset = get_xlt_octo(umrwr->offset);
3173
3174 umr->xlt_offset = cpu_to_be16(offset & 0xffff);
3175 umr->xlt_offset_47_16 = cpu_to_be32(offset >> 16);
3176 umr->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
3258 }
3177 }
3178 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION)
3179 umr->mkey_mask |= get_umr_update_translation_mask();
3180 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS) {
3181 umr->mkey_mask |= get_umr_update_access_mask(atomic);
3182 umr->mkey_mask |= get_umr_update_pd_mask();
3183 }
3184 if (wr->send_flags & MLX5_IB_SEND_UMR_ENABLE_MR)
3185 umr->mkey_mask |= get_umr_enable_mr_mask();
3186 if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
3187 umr->mkey_mask |= get_umr_disable_mr_mask();
3259
3260 if (!wr->num_sge)
3261 umr->flags |= MLX5_UMR_INLINE;
3262}
3263
3264static u8 get_umr_flags(int acc)
3265{
3266 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |

--- 31 unchanged lines hidden (view full) ---

3298 seg->status = MLX5_MKEY_STATUS_FREE;
3299}
3300
3301static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
3302{
3303 struct mlx5_umr_wr *umrwr = umr_wr(wr);
3304
3305 memset(seg, 0, sizeof(*seg));
3188
3189 if (!wr->num_sge)
3190 umr->flags |= MLX5_UMR_INLINE;
3191}
3192
3193static u8 get_umr_flags(int acc)
3194{
3195 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |

--- 31 unchanged lines hidden (view full) ---

3227 seg->status = MLX5_MKEY_STATUS_FREE;
3228}
3229
3230static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
3231{
3232 struct mlx5_umr_wr *umrwr = umr_wr(wr);
3233
3234 memset(seg, 0, sizeof(*seg));
3306 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
3235 if (wr->send_flags & MLX5_IB_SEND_UMR_DISABLE_MR)
3307 seg->status = MLX5_MKEY_STATUS_FREE;
3236 seg->status = MLX5_MKEY_STATUS_FREE;
3308 return;
3309 }
3310
3311 seg->flags = convert_access(umrwr->access_flags);
3237
3238 seg->flags = convert_access(umrwr->access_flags);
3312 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_MTT)) {
3313 if (umrwr->pd)
3314 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
3315 seg->start_addr = cpu_to_be64(umrwr->target.virt_addr);
3316 }
3239 if (umrwr->pd)
3240 seg->flags_pd = cpu_to_be32(to_mpd(umrwr->pd)->pdn);
3241 if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_TRANSLATION &&
3242 !umrwr->length)
3243 seg->flags_pd |= cpu_to_be32(MLX5_MKEY_LEN64);
3244
3245 seg->start_addr = cpu_to_be64(umrwr->virt_addr);
3317 seg->len = cpu_to_be64(umrwr->length);
3318 seg->log2_page_size = umrwr->page_shift;
3319 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
3320 mlx5_mkey_variant(umrwr->mkey));
3321}
3322
3323static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
3324 struct mlx5_ib_mr *mr,

--- 281 unchanged lines hidden (view full) ---

3606 *size += sizeof(*bsf) / 16;
3607 if (unlikely((*seg == qp->sq.qend)))
3608 *seg = mlx5_get_send_wqe(qp, 0);
3609
3610 return 0;
3611}
3612
3613static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
3246 seg->len = cpu_to_be64(umrwr->length);
3247 seg->log2_page_size = umrwr->page_shift;
3248 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
3249 mlx5_mkey_variant(umrwr->mkey));
3250}
3251
3252static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
3253 struct mlx5_ib_mr *mr,

--- 281 unchanged lines hidden (view full) ---

3535 *size += sizeof(*bsf) / 16;
3536 if (unlikely((*seg == qp->sq.qend)))
3537 *seg = mlx5_get_send_wqe(qp, 0);
3538
3539 return 0;
3540}
3541
3542static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
3614 struct ib_sig_handover_wr *wr, u32 nelements,
3543 struct ib_sig_handover_wr *wr, u32 size,
3615 u32 length, u32 pdn)
3616{
3617 struct ib_mr *sig_mr = wr->sig_mr;
3618 u32 sig_key = sig_mr->rkey;
3619 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
3620
3621 memset(seg, 0, sizeof(*seg));
3622
3623 seg->flags = get_umr_flags(wr->access_flags) |
3624 MLX5_MKC_ACCESS_MODE_KLMS;
3625 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
3626 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
3627 MLX5_MKEY_BSF_EN | pdn);
3628 seg->len = cpu_to_be64(length);
3544 u32 length, u32 pdn)
3545{
3546 struct ib_mr *sig_mr = wr->sig_mr;
3547 u32 sig_key = sig_mr->rkey;
3548 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
3549
3550 memset(seg, 0, sizeof(*seg));
3551
3552 seg->flags = get_umr_flags(wr->access_flags) |
3553 MLX5_MKC_ACCESS_MODE_KLMS;
3554 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
3555 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
3556 MLX5_MKEY_BSF_EN | pdn);
3557 seg->len = cpu_to_be64(length);
3629 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
3558 seg->xlt_oct_size = cpu_to_be32(get_xlt_octo(size));
3630 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
3631}
3632
3633static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
3559 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
3560}
3561
3562static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
3634 u32 nelements)
3563 u32 size)
3635{
3636 memset(umr, 0, sizeof(*umr));
3637
3638 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
3564{
3565 memset(umr, 0, sizeof(*umr));
3566
3567 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
3639 umr->klm_octowords = get_klm_octo(nelements);
3568 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
3640 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
3641 umr->mkey_mask = sig_mkey_mask();
3642}
3643
3644
3645static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
3646 void **seg, int *size)
3647{
3648 struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
3649 struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
3650 u32 pdn = get_pd(qp)->pdn;
3569 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
3570 umr->mkey_mask = sig_mkey_mask();
3571}
3572
3573
3574static int set_sig_umr_wr(struct ib_send_wr *send_wr, struct mlx5_ib_qp *qp,
3575 void **seg, int *size)
3576{
3577 struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
3578 struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
3579 u32 pdn = get_pd(qp)->pdn;
3651 u32 klm_oct_size;
3580 u32 xlt_size;
3652 int region_len, ret;
3653
3654 if (unlikely(wr->wr.num_sge != 1) ||
3655 unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) ||
3656 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
3657 unlikely(!sig_mr->sig->sig_status_checked))
3658 return -EINVAL;
3659

--- 5 unchanged lines hidden (view full) ---

3665 wr->prot->length != wr->wr.sg_list->length))
3666 region_len += wr->prot->length;
3667
3668 /**
3669 * KLM octoword size - if protection was provided
3670 * then we use strided block format (3 octowords),
3671 * else we use single KLM (1 octoword)
3672 **/
3581 int region_len, ret;
3582
3583 if (unlikely(wr->wr.num_sge != 1) ||
3584 unlikely(wr->access_flags & IB_ACCESS_REMOTE_ATOMIC) ||
3585 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
3586 unlikely(!sig_mr->sig->sig_status_checked))
3587 return -EINVAL;
3588

--- 5 unchanged lines hidden (view full) ---

3594 wr->prot->length != wr->wr.sg_list->length))
3595 region_len += wr->prot->length;
3596
3597 /**
3598 * KLM octoword size - if protection was provided
3599 * then we use strided block format (3 octowords),
3600 * else we use single KLM (1 octoword)
3601 **/
3673 klm_oct_size = wr->prot ? 3 : 1;
3602 xlt_size = wr->prot ? 0x30 : sizeof(struct mlx5_klm);
3674
3603
3675 set_sig_umr_segment(*seg, klm_oct_size);
3604 set_sig_umr_segment(*seg, xlt_size);
3676 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
3677 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
3678 if (unlikely((*seg == qp->sq.qend)))
3679 *seg = mlx5_get_send_wqe(qp, 0);
3680
3605 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
3606 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
3607 if (unlikely((*seg == qp->sq.qend)))
3608 *seg = mlx5_get_send_wqe(qp, 0);
3609
3681 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
3610 set_sig_mkey_segment(*seg, wr, xlt_size, region_len, pdn);
3682 *seg += sizeof(struct mlx5_mkey_seg);
3683 *size += sizeof(struct mlx5_mkey_seg) / 16;
3684 if (unlikely((*seg == qp->sq.qend)))
3685 *seg = mlx5_get_send_wqe(qp, 0);
3686
3687 ret = set_sig_data_segment(wr, qp, seg, size);
3688 if (ret)
3689 return ret;

--- 89 unchanged lines hidden (view full) ---

3779 j = 0;
3780 }
3781 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
3782 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
3783 be32_to_cpu(p[j + 3]));
3784 }
3785}
3786
3611 *seg += sizeof(struct mlx5_mkey_seg);
3612 *size += sizeof(struct mlx5_mkey_seg) / 16;
3613 if (unlikely((*seg == qp->sq.qend)))
3614 *seg = mlx5_get_send_wqe(qp, 0);
3615
3616 ret = set_sig_data_segment(wr, qp, seg, size);
3617 if (ret)
3618 return ret;

--- 89 unchanged lines hidden (view full) ---

3708 j = 0;
3709 }
3710 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
3711 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
3712 be32_to_cpu(p[j + 3]));
3713 }
3714}
3715
3787static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
3788 unsigned bytecnt, struct mlx5_ib_qp *qp)
3789{
3790 while (bytecnt > 0) {
3791 __iowrite64_copy(dst++, src++, 8);
3792 __iowrite64_copy(dst++, src++, 8);
3793 __iowrite64_copy(dst++, src++, 8);
3794 __iowrite64_copy(dst++, src++, 8);
3795 __iowrite64_copy(dst++, src++, 8);
3796 __iowrite64_copy(dst++, src++, 8);
3797 __iowrite64_copy(dst++, src++, 8);
3798 __iowrite64_copy(dst++, src++, 8);
3799 bytecnt -= 64;
3800 if (unlikely(src == qp->sq.qend))
3801 src = mlx5_get_send_wqe(qp, 0);
3802 }
3803}
3804
3805static u8 get_fence(u8 fence, struct ib_send_wr *wr)
3806{
3807 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
3808 wr->send_flags & IB_SEND_FENCE))
3809 return MLX5_FENCE_MODE_STRONG_ORDERING;
3810
3811 if (unlikely(fence)) {
3812 if (wr->send_flags & IB_SEND_FENCE)

--- 79 unchanged lines hidden (view full) ---

3892 int i;
3893 u8 next_fence = 0;
3894 u8 fence;
3895
3896 if (unlikely(ibqp->qp_type == IB_QPT_GSI))
3897 return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
3898
3899 qp = to_mqp(ibqp);
3716static u8 get_fence(u8 fence, struct ib_send_wr *wr)
3717{
3718 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
3719 wr->send_flags & IB_SEND_FENCE))
3720 return MLX5_FENCE_MODE_STRONG_ORDERING;
3721
3722 if (unlikely(fence)) {
3723 if (wr->send_flags & IB_SEND_FENCE)

--- 79 unchanged lines hidden (view full) ---

3803 int i;
3804 u8 next_fence = 0;
3805 u8 fence;
3806
3807 if (unlikely(ibqp->qp_type == IB_QPT_GSI))
3808 return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
3809
3810 qp = to_mqp(ibqp);
3900 bf = qp->bf;
3811 bf = &qp->bf;
3901 qend = qp->sq.qend;
3902
3903 spin_lock_irqsave(&qp->sq.lock, flags);
3904
3905 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
3906 err = -EIO;
3907 *bad_wr = wr;
3908 nreq = 0;

--- 256 unchanged lines hidden (view full) ---

4165 wmb();
4166
4167 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
4168
4169 /* Make sure doorbell record is visible to the HCA before
4170 * we hit doorbell */
4171 wmb();
4172
3812 qend = qp->sq.qend;
3813
3814 spin_lock_irqsave(&qp->sq.lock, flags);
3815
3816 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
3817 err = -EIO;
3818 *bad_wr = wr;
3819 nreq = 0;

--- 256 unchanged lines hidden (view full) ---

4076 wmb();
4077
4078 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
4079
4080 /* Make sure doorbell record is visible to the HCA before
4081 * we hit doorbell */
4082 wmb();
4083
4173 if (bf->need_lock)
4174 spin_lock(&bf->lock);
4175 else
4176 __acquire(&bf->lock);
4177
4178 /* TBD enable WC */
4179 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
4180 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
4181 /* wc_wmb(); */
4182 } else {
4183 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
4184 MLX5_GET_DOORBELL_LOCK(&bf->lock32));
4185 /* Make sure doorbells don't leak out of SQ spinlock
4186 * and reach the HCA out of order.
4187 */
4188 mmiowb();
4189 }
4084 /* currently we support only regular doorbells */
4085 mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL);
4086 /* Make sure doorbells don't leak out of SQ spinlock
4087 * and reach the HCA out of order.
4088 */
4089 mmiowb();
4190 bf->offset ^= bf->buf_size;
4090 bf->offset ^= bf->buf_size;
4191 if (bf->need_lock)
4192 spin_unlock(&bf->lock);
4193 else
4194 __release(&bf->lock);
4195 }
4196
4197 spin_unlock_irqrestore(&qp->sq.lock, flags);
4198
4199 return err;
4200}
4201
4202static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)

--- 351 unchanged lines hidden (view full) ---

4554
4555 if (ibqp->rwq_ind_tbl)
4556 return -ENOSYS;
4557
4558 if (unlikely(ibqp->qp_type == IB_QPT_GSI))
4559 return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
4560 qp_init_attr);
4561
4091 }
4092
4093 spin_unlock_irqrestore(&qp->sq.lock, flags);
4094
4095 return err;
4096}
4097
4098static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)

--- 351 unchanged lines hidden (view full) ---

4450
4451 if (ibqp->rwq_ind_tbl)
4452 return -ENOSYS;
4453
4454 if (unlikely(ibqp->qp_type == IB_QPT_GSI))
4455 return mlx5_ib_gsi_query_qp(ibqp, qp_attr, qp_attr_mask,
4456 qp_init_attr);
4457
4562#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
4563 /*
4564 * Wait for any outstanding page faults, in case the user frees memory
4565 * based upon this query's result.
4566 */
4567 flush_workqueue(mlx5_ib_page_fault_wq);
4568#endif
4569
4570 mutex_lock(&qp->mutex);
4571
4572 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
4573 err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
4574 if (err)
4575 goto out;
4576 qp->state = raw_packet_qp_state;
4577 qp_attr->port_num = 1;

--- 440 unchanged lines hidden ---
4458 mutex_lock(&qp->mutex);
4459
4460 if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) {
4461 err = query_raw_packet_qp_state(dev, qp, &raw_packet_qp_state);
4462 if (err)
4463 goto out;
4464 qp->state = raw_packet_qp_state;
4465 qp_attr->port_num = 1;

--- 440 unchanged lines hidden ---