xref: /linux/drivers/infiniband/core/uverbs_std_types_dmabuf.c (revision 9e4e86a604dfd06402933467578c4b79f5412b2c)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved
4  */
5 
6 #include <linux/dma-buf-mapping.h>
7 #include <linux/pci-p2pdma.h>
8 #include <linux/dma-resv.h>
9 #include <rdma/uverbs_std_types.h>
10 #include "rdma_core.h"
11 #include "uverbs.h"
12 
13 MODULE_IMPORT_NS("DMA_BUF");
14 
uverbs_dmabuf_attach(struct dma_buf * dmabuf,struct dma_buf_attachment * attachment)15 static int uverbs_dmabuf_attach(struct dma_buf *dmabuf,
16 				struct dma_buf_attachment *attachment)
17 {
18 	if (!attachment->peer2peer)
19 		return -EOPNOTSUPP;
20 
21 	return 0;
22 }
23 
24 static struct sg_table *
uverbs_dmabuf_map(struct dma_buf_attachment * attachment,enum dma_data_direction dir)25 uverbs_dmabuf_map(struct dma_buf_attachment *attachment,
26 		  enum dma_data_direction dir)
27 {
28 	struct ib_uverbs_dmabuf_file *priv = attachment->dmabuf->priv;
29 	struct sg_table *ret;
30 
31 	dma_resv_assert_held(priv->dmabuf->resv);
32 
33 	if (priv->revoked)
34 		return ERR_PTR(-ENODEV);
35 
36 	ret = dma_buf_phys_vec_to_sgt(attachment, priv->provider,
37 				      &priv->phys_vec, 1, priv->phys_vec.len,
38 				      dir);
39 	if (IS_ERR(ret))
40 		return ret;
41 
42 	kref_get(&priv->kref);
43 	return ret;
44 }
45 
uverbs_dmabuf_unmap(struct dma_buf_attachment * attachment,struct sg_table * sgt,enum dma_data_direction dir)46 static void uverbs_dmabuf_unmap(struct dma_buf_attachment *attachment,
47 				struct sg_table *sgt,
48 				enum dma_data_direction dir)
49 {
50 	struct ib_uverbs_dmabuf_file *priv = attachment->dmabuf->priv;
51 
52 	dma_resv_assert_held(priv->dmabuf->resv);
53 	dma_buf_free_sgt(attachment, sgt, dir);
54 	kref_put(&priv->kref, ib_uverbs_dmabuf_done);
55 }
56 
uverbs_dmabuf_pin(struct dma_buf_attachment * attach)57 static int uverbs_dmabuf_pin(struct dma_buf_attachment *attach)
58 {
59 	return -EOPNOTSUPP;
60 }
61 
uverbs_dmabuf_unpin(struct dma_buf_attachment * attach)62 static void uverbs_dmabuf_unpin(struct dma_buf_attachment *attach)
63 {
64 }
65 
uverbs_dmabuf_release(struct dma_buf * dmabuf)66 static void uverbs_dmabuf_release(struct dma_buf *dmabuf)
67 {
68 	struct ib_uverbs_dmabuf_file *priv = dmabuf->priv;
69 
70 	/*
71 	 * This can only happen if the fput came from alloc_abort_fd_uobject()
72 	 */
73 	if (!priv->uobj.context)
74 		return;
75 
76 	uverbs_uobject_release(&priv->uobj);
77 }
78 
79 static const struct dma_buf_ops uverbs_dmabuf_ops = {
80 	.attach = uverbs_dmabuf_attach,
81 	.map_dma_buf = uverbs_dmabuf_map,
82 	.unmap_dma_buf = uverbs_dmabuf_unmap,
83 	.pin = uverbs_dmabuf_pin,
84 	.unpin = uverbs_dmabuf_unpin,
85 	.release = uverbs_dmabuf_release,
86 };
87 
UVERBS_HANDLER(UVERBS_METHOD_DMABUF_ALLOC)88 static int UVERBS_HANDLER(UVERBS_METHOD_DMABUF_ALLOC)(
89 	struct uverbs_attr_bundle *attrs)
90 {
91 	struct ib_uobject *uobj =
92 		uverbs_attr_get(attrs, UVERBS_ATTR_ALLOC_DMABUF_HANDLE)
93 			->obj_attr.uobject;
94 	struct ib_uverbs_dmabuf_file *uverbs_dmabuf =
95 		container_of(uobj, struct ib_uverbs_dmabuf_file, uobj);
96 	struct ib_device *ib_dev = attrs->context->device;
97 	struct rdma_user_mmap_entry *mmap_entry;
98 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
99 	off_t pg_off;
100 	int ret;
101 
102 	ret = uverbs_get_const(&pg_off, attrs, UVERBS_ATTR_ALLOC_DMABUF_PGOFF);
103 	if (ret)
104 		return ret;
105 
106 	mmap_entry = ib_dev->ops.pgoff_to_mmap_entry(attrs->context, pg_off);
107 	if (!mmap_entry)
108 		return -EINVAL;
109 
110 	ret = ib_dev->ops.mmap_get_pfns(mmap_entry, &uverbs_dmabuf->phys_vec,
111 					&uverbs_dmabuf->provider);
112 	if (ret)
113 		goto err;
114 
115 	exp_info.ops = &uverbs_dmabuf_ops;
116 	exp_info.size = uverbs_dmabuf->phys_vec.len;
117 	exp_info.flags = O_CLOEXEC;
118 	exp_info.priv = uverbs_dmabuf;
119 
120 	uverbs_dmabuf->dmabuf = dma_buf_export(&exp_info);
121 	if (IS_ERR(uverbs_dmabuf->dmabuf)) {
122 		ret = PTR_ERR(uverbs_dmabuf->dmabuf);
123 		goto err;
124 	}
125 
126 	kref_init(&uverbs_dmabuf->kref);
127 	init_completion(&uverbs_dmabuf->comp);
128 	INIT_LIST_HEAD(&uverbs_dmabuf->dmabufs_elm);
129 	mutex_lock(&mmap_entry->dmabufs_lock);
130 	if (mmap_entry->driver_removed)
131 		ret = -EIO;
132 	else
133 		list_add_tail(&uverbs_dmabuf->dmabufs_elm, &mmap_entry->dmabufs);
134 	mutex_unlock(&mmap_entry->dmabufs_lock);
135 	if (ret)
136 		goto err_revoked;
137 
138 	uobj->object = uverbs_dmabuf->dmabuf->file;
139 	uverbs_dmabuf->mmap_entry = mmap_entry;
140 	uverbs_finalize_uobj_create(attrs, UVERBS_ATTR_ALLOC_DMABUF_HANDLE);
141 	return 0;
142 
143 err_revoked:
144 	dma_buf_put(uverbs_dmabuf->dmabuf);
145 err:
146 	rdma_user_mmap_entry_put(mmap_entry);
147 	return ret;
148 }
149 
150 DECLARE_UVERBS_NAMED_METHOD(
151 	UVERBS_METHOD_DMABUF_ALLOC,
152 	UVERBS_ATTR_FD(UVERBS_ATTR_ALLOC_DMABUF_HANDLE,
153 		       UVERBS_OBJECT_DMABUF,
154 		       UVERBS_ACCESS_NEW,
155 		       UA_MANDATORY),
156 	UVERBS_ATTR_PTR_IN(UVERBS_ATTR_ALLOC_DMABUF_PGOFF,
157 			   UVERBS_ATTR_TYPE(u64),
158 			   UA_MANDATORY));
159 
uverbs_dmabuf_fd_destroy_uobj(struct ib_uobject * uobj,enum rdma_remove_reason why)160 static void uverbs_dmabuf_fd_destroy_uobj(struct ib_uobject *uobj,
161 					  enum rdma_remove_reason why)
162 {
163 	struct ib_uverbs_dmabuf_file *uverbs_dmabuf =
164 		container_of(uobj, struct ib_uverbs_dmabuf_file, uobj);
165 	bool wait_for_comp = false;
166 
167 	mutex_lock(&uverbs_dmabuf->mmap_entry->dmabufs_lock);
168 	dma_resv_lock(uverbs_dmabuf->dmabuf->resv, NULL);
169 	if (!uverbs_dmabuf->revoked) {
170 		uverbs_dmabuf->revoked = true;
171 		list_del(&uverbs_dmabuf->dmabufs_elm);
172 		dma_buf_invalidate_mappings(uverbs_dmabuf->dmabuf);
173 		dma_resv_wait_timeout(uverbs_dmabuf->dmabuf->resv,
174 				      DMA_RESV_USAGE_BOOKKEEP, false,
175 				      MAX_SCHEDULE_TIMEOUT);
176 		wait_for_comp = true;
177 	}
178 	dma_resv_unlock(uverbs_dmabuf->dmabuf->resv);
179 	if (wait_for_comp) {
180 		kref_put(&uverbs_dmabuf->kref, ib_uverbs_dmabuf_done);
181 		/* Let's wait till all DMA unmap are completed. */
182 		wait_for_completion(&uverbs_dmabuf->comp);
183 	}
184 	mutex_unlock(&uverbs_dmabuf->mmap_entry->dmabufs_lock);
185 
186 	/* Matches the get done as part of pgoff_to_mmap_entry() */
187 	rdma_user_mmap_entry_put(uverbs_dmabuf->mmap_entry);
188 }
189 
190 DECLARE_UVERBS_NAMED_OBJECT(
191 	UVERBS_OBJECT_DMABUF,
192 	UVERBS_TYPE_ALLOC_FD(sizeof(struct ib_uverbs_dmabuf_file),
193 			     uverbs_dmabuf_fd_destroy_uobj,
194 			     NULL, NULL, O_RDONLY),
195 			     &UVERBS_METHOD(UVERBS_METHOD_DMABUF_ALLOC));
196 
197 const struct uapi_definition uverbs_def_obj_dmabuf[] = {
198 	UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DMABUF),
199 				      UAPI_DEF_OBJ_NEEDS_FN(mmap_get_pfns),
200 				      UAPI_DEF_OBJ_NEEDS_FN(pgoff_to_mmap_entry),
201 	{}
202 };
203