xref: /linux/drivers/dibs/dibs_loopback.c (revision cc21191b584c6f7836b0f10774f8278b7cbfba10)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Functions for dibs loopback/loopback-ism device.
4  *
5  *  Copyright (c) 2024, Alibaba Inc.
6  *
7  *  Author: Wen Gu <guwen@linux.alibaba.com>
8  *          Tony Lu <tonylu@linux.alibaba.com>
9  *
10  */
11 
12 #include <linux/bitops.h>
13 #include <linux/device.h>
14 #include <linux/dibs.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/types.h>
18 
19 #include "dibs_loopback.h"
20 
21 #define DIBS_LO_SUPPORT_NOCOPY	0x1
22 #define DIBS_DMA_ADDR_INVALID	(~(dma_addr_t)0)
23 
24 static const char dibs_lo_dev_name[] = "lo";
25 /* global loopback device */
26 static struct dibs_lo_dev *lo_dev;
27 
28 static u16 dibs_lo_get_fabric_id(struct dibs_dev *dibs)
29 {
30 	return DIBS_LOOPBACK_FABRIC;
31 }
32 
33 static int dibs_lo_query_rgid(struct dibs_dev *dibs, const uuid_t *rgid,
34 			      u32 vid_valid, u32 vid)
35 {
36 	/* rgid should be the same as lgid */
37 	if (!uuid_equal(rgid, &dibs->gid))
38 		return -ENETUNREACH;
39 	return 0;
40 }
41 
42 static int dibs_lo_max_dmbs(void)
43 {
44 	return DIBS_LO_MAX_DMBS;
45 }
46 
47 static int dibs_lo_register_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb,
48 				struct dibs_client *client)
49 {
50 	struct dibs_lo_dmb_node *dmb_node, *tmp_node;
51 	struct dibs_lo_dev *ldev;
52 	unsigned long flags;
53 	int sba_idx, rc;
54 
55 	ldev = dibs->drv_priv;
56 	sba_idx = dmb->idx;
57 	/* check space for new dmb */
58 	for_each_clear_bit(sba_idx, ldev->sba_idx_mask, DIBS_LO_MAX_DMBS) {
59 		if (!test_and_set_bit(sba_idx, ldev->sba_idx_mask))
60 			break;
61 	}
62 	if (sba_idx == DIBS_LO_MAX_DMBS)
63 		return -ENOSPC;
64 
65 	dmb_node = kzalloc(sizeof(*dmb_node), GFP_KERNEL);
66 	if (!dmb_node) {
67 		rc = -ENOMEM;
68 		goto err_bit;
69 	}
70 
71 	dmb_node->sba_idx = sba_idx;
72 	dmb_node->len = dmb->dmb_len;
73 	dmb_node->cpu_addr = kzalloc(dmb_node->len, GFP_KERNEL |
74 				     __GFP_NOWARN | __GFP_NORETRY |
75 				     __GFP_NOMEMALLOC);
76 	if (!dmb_node->cpu_addr) {
77 		rc = -ENOMEM;
78 		goto err_node;
79 	}
80 	dmb_node->dma_addr = DIBS_DMA_ADDR_INVALID;
81 	refcount_set(&dmb_node->refcnt, 1);
82 
83 again:
84 	/* add new dmb into hash table */
85 	get_random_bytes(&dmb_node->token, sizeof(dmb_node->token));
86 	write_lock_bh(&ldev->dmb_ht_lock);
87 	hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_node->token) {
88 		if (tmp_node->token == dmb_node->token) {
89 			write_unlock_bh(&ldev->dmb_ht_lock);
90 			goto again;
91 		}
92 	}
93 	hash_add(ldev->dmb_ht, &dmb_node->list, dmb_node->token);
94 	write_unlock_bh(&ldev->dmb_ht_lock);
95 	atomic_inc(&ldev->dmb_cnt);
96 
97 	dmb->idx = dmb_node->sba_idx;
98 	dmb->dmb_tok = dmb_node->token;
99 	dmb->cpu_addr = dmb_node->cpu_addr;
100 	dmb->dma_addr = dmb_node->dma_addr;
101 	dmb->dmb_len = dmb_node->len;
102 
103 	spin_lock_irqsave(&dibs->lock, flags);
104 	dibs->dmb_clientid_arr[sba_idx] = client->id;
105 	spin_unlock_irqrestore(&dibs->lock, flags);
106 
107 	return 0;
108 
109 err_node:
110 	kfree(dmb_node);
111 err_bit:
112 	clear_bit(sba_idx, ldev->sba_idx_mask);
113 	return rc;
114 }
115 
116 static void __dibs_lo_unregister_dmb(struct dibs_lo_dev *ldev,
117 				     struct dibs_lo_dmb_node *dmb_node)
118 {
119 	/* remove dmb from hash table */
120 	write_lock_bh(&ldev->dmb_ht_lock);
121 	hash_del(&dmb_node->list);
122 	write_unlock_bh(&ldev->dmb_ht_lock);
123 
124 	clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask);
125 	kfree(dmb_node->cpu_addr);
126 	kfree(dmb_node);
127 
128 	if (atomic_dec_and_test(&ldev->dmb_cnt))
129 		wake_up(&ldev->ldev_release);
130 }
131 
132 static int dibs_lo_unregister_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
133 {
134 	struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
135 	struct dibs_lo_dev *ldev;
136 	unsigned long flags;
137 
138 	ldev = dibs->drv_priv;
139 
140 	/* find dmb from hash table */
141 	read_lock_bh(&ldev->dmb_ht_lock);
142 	hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
143 		if (tmp_node->token == dmb->dmb_tok) {
144 			dmb_node = tmp_node;
145 			break;
146 		}
147 	}
148 	read_unlock_bh(&ldev->dmb_ht_lock);
149 	if (!dmb_node)
150 		return -EINVAL;
151 
152 	if (refcount_dec_and_test(&dmb_node->refcnt)) {
153 		spin_lock_irqsave(&dibs->lock, flags);
154 		dibs->dmb_clientid_arr[dmb_node->sba_idx] = NO_DIBS_CLIENT;
155 		spin_unlock_irqrestore(&dibs->lock, flags);
156 
157 		__dibs_lo_unregister_dmb(ldev, dmb_node);
158 	}
159 	return 0;
160 }
161 
162 static int dibs_lo_support_dmb_nocopy(struct dibs_dev *dibs)
163 {
164 	return DIBS_LO_SUPPORT_NOCOPY;
165 }
166 
167 static int dibs_lo_attach_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
168 {
169 	struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
170 	struct dibs_lo_dev *ldev;
171 
172 	ldev = dibs->drv_priv;
173 
174 	/* find dmb_node according to dmb->dmb_tok */
175 	read_lock_bh(&ldev->dmb_ht_lock);
176 	hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
177 		if (tmp_node->token == dmb->dmb_tok) {
178 			dmb_node = tmp_node;
179 			break;
180 		}
181 	}
182 	if (!dmb_node) {
183 		read_unlock_bh(&ldev->dmb_ht_lock);
184 		return -EINVAL;
185 	}
186 	read_unlock_bh(&ldev->dmb_ht_lock);
187 
188 	if (!refcount_inc_not_zero(&dmb_node->refcnt))
189 		/* the dmb is being unregistered, but has
190 		 * not been removed from the hash table.
191 		 */
192 		return -EINVAL;
193 
194 	/* provide dmb information */
195 	dmb->idx = dmb_node->sba_idx;
196 	dmb->dmb_tok = dmb_node->token;
197 	dmb->cpu_addr = dmb_node->cpu_addr;
198 	dmb->dma_addr = dmb_node->dma_addr;
199 	dmb->dmb_len = dmb_node->len;
200 	return 0;
201 }
202 
203 static int dibs_lo_detach_dmb(struct dibs_dev *dibs, u64 token)
204 {
205 	struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
206 	struct dibs_lo_dev *ldev;
207 
208 	ldev = dibs->drv_priv;
209 
210 	/* find dmb_node according to dmb->dmb_tok */
211 	read_lock_bh(&ldev->dmb_ht_lock);
212 	hash_for_each_possible(ldev->dmb_ht, tmp_node, list, token) {
213 		if (tmp_node->token == token) {
214 			dmb_node = tmp_node;
215 			break;
216 		}
217 	}
218 	if (!dmb_node) {
219 		read_unlock_bh(&ldev->dmb_ht_lock);
220 		return -EINVAL;
221 	}
222 	read_unlock_bh(&ldev->dmb_ht_lock);
223 
224 	if (refcount_dec_and_test(&dmb_node->refcnt))
225 		__dibs_lo_unregister_dmb(ldev, dmb_node);
226 	return 0;
227 }
228 
229 static int dibs_lo_move_data(struct dibs_dev *dibs, u64 dmb_tok,
230 			     unsigned int idx, bool sf, unsigned int offset,
231 			     void *data, unsigned int size)
232 {
233 	struct dibs_lo_dmb_node *rmb_node = NULL, *tmp_node;
234 	struct dibs_lo_dev *ldev;
235 	u16 s_mask;
236 	u8 client_id;
237 	u32 sba_idx;
238 
239 	ldev = dibs->drv_priv;
240 
241 	read_lock_bh(&ldev->dmb_ht_lock);
242 	hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_tok) {
243 		if (tmp_node->token == dmb_tok) {
244 			rmb_node = tmp_node;
245 			break;
246 		}
247 	}
248 	if (!rmb_node) {
249 		read_unlock_bh(&ldev->dmb_ht_lock);
250 		return -EINVAL;
251 	}
252 	memcpy((char *)rmb_node->cpu_addr + offset, data, size);
253 	sba_idx = rmb_node->sba_idx;
254 	read_unlock_bh(&ldev->dmb_ht_lock);
255 
256 	if (!sf)
257 		return 0;
258 
259 	spin_lock(&dibs->lock);
260 	client_id = dibs->dmb_clientid_arr[sba_idx];
261 	s_mask = ror16(0x1000, idx);
262 	if (likely(client_id != NO_DIBS_CLIENT && dibs->subs[client_id]))
263 		dibs->subs[client_id]->ops->handle_irq(dibs, sba_idx, s_mask);
264 	spin_unlock(&dibs->lock);
265 
266 	return 0;
267 }
268 
269 static const struct dibs_dev_ops dibs_lo_ops = {
270 	.get_fabric_id = dibs_lo_get_fabric_id,
271 	.query_remote_gid = dibs_lo_query_rgid,
272 	.max_dmbs = dibs_lo_max_dmbs,
273 	.register_dmb = dibs_lo_register_dmb,
274 	.unregister_dmb = dibs_lo_unregister_dmb,
275 	.move_data = dibs_lo_move_data,
276 	.support_mmapped_rdmb = dibs_lo_support_dmb_nocopy,
277 	.attach_dmb = dibs_lo_attach_dmb,
278 	.detach_dmb = dibs_lo_detach_dmb,
279 };
280 
281 static void dibs_lo_dev_init(struct dibs_lo_dev *ldev)
282 {
283 	rwlock_init(&ldev->dmb_ht_lock);
284 	hash_init(ldev->dmb_ht);
285 	atomic_set(&ldev->dmb_cnt, 0);
286 	init_waitqueue_head(&ldev->ldev_release);
287 }
288 
289 static void dibs_lo_dev_exit(struct dibs_lo_dev *ldev)
290 {
291 	if (atomic_read(&ldev->dmb_cnt))
292 		wait_event(ldev->ldev_release, !atomic_read(&ldev->dmb_cnt));
293 }
294 
295 static int dibs_lo_dev_probe(void)
296 {
297 	struct dibs_lo_dev *ldev;
298 	struct dibs_dev *dibs;
299 	int ret;
300 
301 	ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
302 	if (!ldev)
303 		return -ENOMEM;
304 
305 	dibs = dibs_dev_alloc();
306 	if (!dibs) {
307 		kfree(ldev);
308 		return -ENOMEM;
309 	}
310 
311 	ldev->dibs = dibs;
312 	dibs->drv_priv = ldev;
313 	dibs_lo_dev_init(ldev);
314 	uuid_gen(&dibs->gid);
315 	dibs->ops = &dibs_lo_ops;
316 
317 	dibs->dev.parent = NULL;
318 	dev_set_name(&dibs->dev, "%s", dibs_lo_dev_name);
319 
320 	ret = dibs_dev_add(dibs);
321 	if (ret)
322 		goto err_reg;
323 	lo_dev = ldev;
324 	return 0;
325 
326 err_reg:
327 	kfree(dibs->dmb_clientid_arr);
328 	/* pairs with dibs_dev_alloc() */
329 	put_device(&dibs->dev);
330 	kfree(ldev);
331 
332 	return ret;
333 }
334 
335 static void dibs_lo_dev_remove(void)
336 {
337 	if (!lo_dev)
338 		return;
339 
340 	dibs_dev_del(lo_dev->dibs);
341 	dibs_lo_dev_exit(lo_dev);
342 	/* pairs with dibs_dev_alloc() */
343 	put_device(&lo_dev->dibs->dev);
344 	kfree(lo_dev);
345 	lo_dev = NULL;
346 }
347 
348 int dibs_loopback_init(void)
349 {
350 	return dibs_lo_dev_probe();
351 }
352 
353 void dibs_loopback_exit(void)
354 {
355 	dibs_lo_dev_remove();
356 }
357