xref: /linux/drivers/infiniband/hw/mana/cq.c (revision 3663e2c4bc45fcdc71931fcbfcbfbf9b71f55c83)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4  */
5 
6 #include "mana_ib.h"
7 
8 int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9 		      struct uverbs_attr_bundle *attrs)
10 {
11 	struct ib_udata *udata = &attrs->driver_udata;
12 	struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
13 	struct mana_ib_create_cq_resp resp = {};
14 	struct mana_ib_ucontext *mana_ucontext;
15 	struct ib_device *ibdev = ibcq->device;
16 	struct mana_ib_create_cq ucmd = {};
17 	struct mana_ib_dev *mdev;
18 	bool is_rnic_cq;
19 	u32 doorbell;
20 	int err;
21 
22 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
23 
24 	cq->comp_vector = attr->comp_vector % ibdev->num_comp_vectors;
25 	cq->cq_handle = INVALID_MANA_HANDLE;
26 
27 	if (udata->inlen < offsetof(struct mana_ib_create_cq, flags))
28 		return -EINVAL;
29 
30 	err = ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen));
31 	if (err) {
32 		ibdev_dbg(ibdev,
33 			  "Failed to copy from udata for create cq, %d\n", err);
34 		return err;
35 	}
36 
37 	is_rnic_cq = !!(ucmd.flags & MANA_IB_CREATE_RNIC_CQ);
38 
39 	if (!is_rnic_cq && attr->cqe > mdev->adapter_caps.max_qp_wr) {
40 		ibdev_dbg(ibdev, "CQE %d exceeding limit\n", attr->cqe);
41 		return -EINVAL;
42 	}
43 
44 	cq->cqe = attr->cqe;
45 	err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue);
46 	if (err) {
47 		ibdev_dbg(ibdev, "Failed to create queue for create cq, %d\n", err);
48 		return err;
49 	}
50 
51 	mana_ucontext = rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
52 						  ibucontext);
53 	doorbell = mana_ucontext->doorbell;
54 
55 	if (is_rnic_cq) {
56 		err = mana_ib_gd_create_cq(mdev, cq, doorbell);
57 		if (err) {
58 			ibdev_dbg(ibdev, "Failed to create RNIC cq, %d\n", err);
59 			goto err_destroy_queue;
60 		}
61 
62 		err = mana_ib_install_cq_cb(mdev, cq);
63 		if (err) {
64 			ibdev_dbg(ibdev, "Failed to install cq callback, %d\n", err);
65 			goto err_destroy_rnic_cq;
66 		}
67 	}
68 
69 	resp.cqid = cq->queue.id;
70 	err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
71 	if (err) {
72 		ibdev_dbg(&mdev->ib_dev, "Failed to copy to udata, %d\n", err);
73 		goto err_remove_cq_cb;
74 	}
75 
76 	return 0;
77 
78 err_remove_cq_cb:
79 	mana_ib_remove_cq_cb(mdev, cq);
80 err_destroy_rnic_cq:
81 	mana_ib_gd_destroy_cq(mdev, cq);
82 err_destroy_queue:
83 	mana_ib_destroy_queue(mdev, &cq->queue);
84 
85 	return err;
86 }
87 
88 int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
89 {
90 	struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
91 	struct ib_device *ibdev = ibcq->device;
92 	struct mana_ib_dev *mdev;
93 
94 	mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
95 
96 	mana_ib_remove_cq_cb(mdev, cq);
97 
98 	/* Ignore return code as there is not much we can do about it.
99 	 * The error message is printed inside.
100 	 */
101 	mana_ib_gd_destroy_cq(mdev, cq);
102 
103 	mana_ib_destroy_queue(mdev, &cq->queue);
104 
105 	return 0;
106 }
107 
108 static void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
109 {
110 	struct mana_ib_cq *cq = ctx;
111 
112 	if (cq->ibcq.comp_handler)
113 		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
114 }
115 
116 int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
117 {
118 	struct gdma_context *gc = mdev_to_gc(mdev);
119 	struct gdma_queue *gdma_cq;
120 
121 	if (cq->queue.id >= gc->max_num_cqs)
122 		return -EINVAL;
123 	/* Create CQ table entry */
124 	WARN_ON(gc->cq_table[cq->queue.id]);
125 	gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
126 	if (!gdma_cq)
127 		return -ENOMEM;
128 
129 	gdma_cq->cq.context = cq;
130 	gdma_cq->type = GDMA_CQ;
131 	gdma_cq->cq.callback = mana_ib_cq_handler;
132 	gdma_cq->id = cq->queue.id;
133 	gc->cq_table[cq->queue.id] = gdma_cq;
134 	return 0;
135 }
136 
137 void mana_ib_remove_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
138 {
139 	struct gdma_context *gc = mdev_to_gc(mdev);
140 
141 	if (cq->queue.id >= gc->max_num_cqs || cq->queue.id == INVALID_QUEUE_ID)
142 		return;
143 
144 	kfree(gc->cq_table[cq->queue.id]);
145 	gc->cq_table[cq->queue.id] = NULL;
146 }
147