xref: /linux/drivers/infiniband/hw/mana/cq.c (revision 90d32e92011eaae8e70a9169b4e7acf4ca8f9d3a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4  */
5 
6 #include "mana_ib.h"
7 
8 int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9 		      struct ib_udata *udata)
10 {
11 	struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
12 	struct mana_ib_create_cq_resp resp = {};
13 	struct mana_ib_ucontext *mana_ucontext;
14 	struct ib_device *ibdev = ibcq->device;
15 	struct mana_ib_create_cq ucmd = {};
16 	struct mana_ib_dev *mdev;
17 	bool is_rnic_cq;
18 	u32 doorbell;
19 	int err;
20 
21 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
22 
23 	cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
24 	cq->cq_handle = INVALID_MANA_HANDLE;
25 
26 	if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
27 		return -EINVAL;
28 
29 	err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
30 	if (err) {
31 		ibdev_dbg(ibdev,
32 			  "Failed to copy from udata for create cq, %d\n", err);
33 		return err;
34 	}
35 
36 	is_rnic_cq = !!(ucmd.flags & MANA_IB_CREATE_RNIC_CQ);
37 
38 	if (!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) {
39 		ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
40 		return -EINVAL;
41 	}
42 
43 	cq->cqe = attr->cqe;
44 	err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue);
45 	if (err) {
46 		ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
47 		return err;
48 	}
49 
50 	mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
51 						  ibucontext);
52 	doorbell = mana_ucontext->doorbell;
53 
54 	if (is_rnic_cq) {
55 		err = mana_ib_gd_create_cq(mdev, cq, doorbell);
56 		if (err) {
57 			ibdev_dbg(ibdev, "Failed to create RNIC cq, %d\n", err);
58 			goto err_destroy_queue;
59 		}
60 
61 		err = mana_ib_install_cq_cb(mdev, cq);
62 		if (err) {
63 			ibdev_dbg(ibdev, "Failed to install cq callback, %d\n", err);
64 			goto err_destroy_rnic_cq;
65 		}
66 	}
67 
68 	resp.cqid = cq->queue.id;
69 	err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
70 	if (err) {
71 		ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
72 		goto err_remove_cq_cb;
73 	}
74 
75 	return 0;
76 
77 err_remove_cq_cb:
78 	mana_ib_remove_cq_cb(mdev, cq);
79 err_destroy_rnic_cq:
80 	mana_ib_gd_destroy_cq(mdev, cq);
81 err_destroy_queue:
82 	mana_ib_destroy_queue(mdev, &cq->queue);
83 
84 	return err;
85 }
86 
87 int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
88 {
89 	struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
90 	struct ib_device *ibdev = ibcq->device;
91 	struct mana_ib_dev *mdev;
92 
93 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
94 
95 	mana_ib_remove_cq_cb(mdev, cq);
96 
97 	/* Ignore return code as there is not much we can do about it.
98 	 * The error message is printed inside.
99 	 */
100 	mana_ib_gd_destroy_cq(mdev, cq);
101 
102 	mana_ib_destroy_queue(mdev, &cq->queue);
103 
104 	return 0;
105 }
106 
107 static void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
108 {
109 	struct mana_ib_cq *cq = ctx;
110 
111 	if (cq->ibcq.comp_handler)
112 		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
113 }
114 
115 int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
116 {
117 	struct gdma_context *gc = mdev_to_gc(mdev);
118 	struct gdma_queue *gdma_cq;
119 
120 	if (cq->queue.id >= gc->max_num_cqs)
121 		return -EINVAL;
122 	/* Create CQ table entry */
123 	WARN_ON(gc->cq_table[cq->queue.id]);
124 	gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
125 	if (!gdma_cq)
126 		return -ENOMEM;
127 
128 	gdma_cq->cq.context = cq;
129 	gdma_cq->type = GDMA_CQ;
130 	gdma_cq->cq.callback = mana_ib_cq_handler;
131 	gdma_cq->id = cq->queue.id;
132 	gc->cq_table[cq->queue.id] = gdma_cq;
133 	return 0;
134 }
135 
136 void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
137 {
138 	struct gdma_context *gc = mdev_to_gc(mdev);
139 
140 	if (cq->queue.id >= gc->max_num_cqs || cq->queue.id == INVALID_QUEUE_ID)
141 		return;
142 
143 	kfree(gc->cq_table[cq->queue.id]);
144 	gc->cq_table[cq->queue.id] = NULL;
145 }
146