xref: /linux/drivers/accel/habanalabs/common/memory_mgr.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "habanalabs.h"
9 
10 /**
11  * hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to
12  *                        the buffer descriptor.
13  *
14  * @mmg: parent unified memory manager
15  * @handle: requested buffer handle
16  *
17  * Find the buffer in the store and return a pointer to its descriptor.
18  * Increase buffer refcount. If not found - return NULL.
19  */
20 struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle)
21 {
22 	struct hl_mmap_mem_buf *buf;
23 
24 	spin_lock(&mmg->lock);
25 	buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
26 	if (!buf) {
27 		spin_unlock(&mmg->lock);
28 		dev_dbg(mmg->dev, "Buff get failed, no match to handle %#llx\n", handle);
29 		return NULL;
30 	}
31 	kref_get(&buf->refcount);
32 	spin_unlock(&mmg->lock);
33 	return buf;
34 }
35 
36 /**
37  * hl_mmap_mem_buf_destroy - destroy the unused buffer
38  *
39  * @buf: memory manager buffer descriptor
40  *
41  * Internal function, used as a final step of buffer release. Shall be invoked
42  * only when the buffer is no longer in use (removed from idr). Will call the
43  * release callback (if applicable), and free the memory.
44  */
45 static void hl_mmap_mem_buf_destroy(struct hl_mmap_mem_buf *buf)
46 {
47 	if (buf->behavior->release)
48 		buf->behavior->release(buf);
49 
50 	kfree(buf);
51 }
52 
53 /**
54  * hl_mmap_mem_buf_release - release buffer
55  *
56  * @kref: kref that reached 0.
57  *
58  * Internal function, used as a kref release callback, when the last user of
59  * the buffer is released. Shall be called from an interrupt context.
60  */
61 static void hl_mmap_mem_buf_release(struct kref *kref)
62 {
63 	struct hl_mmap_mem_buf *buf =
64 		container_of(kref, struct hl_mmap_mem_buf, refcount);
65 
66 	spin_lock(&buf->mmg->lock);
67 	idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
68 	spin_unlock(&buf->mmg->lock);
69 
70 	hl_mmap_mem_buf_destroy(buf);
71 }
72 
73 /**
74  * hl_mmap_mem_buf_remove_idr_locked - remove handle from idr
75  *
76  * @kref: kref that reached 0.
77  *
78  * Internal function, used for kref put by handle. Assumes mmg lock is taken.
79  * Will remove the buffer from idr, without destroying it.
80  */
81 static void hl_mmap_mem_buf_remove_idr_locked(struct kref *kref)
82 {
83 	struct hl_mmap_mem_buf *buf =
84 		container_of(kref, struct hl_mmap_mem_buf, refcount);
85 
86 	idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
87 }
88 
89 /**
90  * hl_mmap_mem_buf_put - decrease the reference to the buffer
91  *
92  * @buf: memory manager buffer descriptor
93  *
94  * Decrease the reference to the buffer, and release it if it was the last one.
95  * Shall be called from an interrupt context.
96  */
97 int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf)
98 {
99 	return kref_put(&buf->refcount, hl_mmap_mem_buf_release);
100 }
101 
102 /**
103  * hl_mmap_mem_buf_put_handle - decrease the reference to the buffer with the
104  *                              given handle.
105  *
106  * @mmg: parent unified memory manager
107  * @handle: requested buffer handle
108  *
109  * Decrease the reference to the buffer, and release it if it was the last one.
110  * Shall not be called from an interrupt context. Return -EINVAL if handle was
111  * not found, else return the put outcome (0 or 1).
112  */
113 int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle)
114 {
115 	struct hl_mmap_mem_buf *buf;
116 
117 	spin_lock(&mmg->lock);
118 	buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
119 	if (!buf) {
120 		spin_unlock(&mmg->lock);
121 		dev_dbg(mmg->dev,
122 			 "Buff put failed, no match to handle %#llx\n", handle);
123 		return -EINVAL;
124 	}
125 
126 	if (kref_put(&buf->refcount, hl_mmap_mem_buf_remove_idr_locked)) {
127 		spin_unlock(&mmg->lock);
128 		hl_mmap_mem_buf_destroy(buf);
129 		return 1;
130 	}
131 
132 	spin_unlock(&mmg->lock);
133 	return 0;
134 }
135 
136 /**
137  * hl_mmap_mem_buf_alloc - allocate a new mappable buffer
138  *
139  * @mmg: parent unified memory manager
140  * @behavior: behavior object describing this buffer polymorphic behavior
141  * @gfp: gfp flags to use for the memory allocations
142  * @args: additional args passed to behavior->alloc
143  *
144  * Allocate and register a new memory buffer inside the give memory manager.
145  * Return the pointer to the new buffer on success or NULL on failure.
146  */
147 struct hl_mmap_mem_buf *
148 hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
149 		      struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
150 		      void *args)
151 {
152 	struct hl_mmap_mem_buf *buf;
153 	int rc;
154 
155 	buf = kzalloc(sizeof(*buf), gfp);
156 	if (!buf)
157 		return NULL;
158 
159 	spin_lock(&mmg->lock);
160 	rc = idr_alloc(&mmg->handles, buf, 1, 0, GFP_ATOMIC);
161 	spin_unlock(&mmg->lock);
162 	if (rc < 0) {
163 		dev_err(mmg->dev,
164 			"%s: Failed to allocate IDR for a new buffer, rc=%d\n",
165 			behavior->topic, rc);
166 		goto free_buf;
167 	}
168 
169 	buf->mmg = mmg;
170 	buf->behavior = behavior;
171 	buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT);
172 	kref_init(&buf->refcount);
173 
174 	rc = buf->behavior->alloc(buf, gfp, args);
175 	if (rc) {
176 		dev_err(mmg->dev, "%s: Failure in buffer alloc callback %d\n",
177 			behavior->topic, rc);
178 		goto remove_idr;
179 	}
180 
181 	return buf;
182 
183 remove_idr:
184 	spin_lock(&mmg->lock);
185 	idr_remove(&mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
186 	spin_unlock(&mmg->lock);
187 free_buf:
188 	kfree(buf);
189 	return NULL;
190 }
191 
192 /**
193  * hl_mmap_mem_buf_vm_close - handle mmap close
194  *
195  * @vma: the vma object for which mmap was closed.
196  *
197  * Put the memory buffer if it is no longer mapped.
198  */
199 static void hl_mmap_mem_buf_vm_close(struct vm_area_struct *vma)
200 {
201 	struct hl_mmap_mem_buf *buf =
202 		(struct hl_mmap_mem_buf *)vma->vm_private_data;
203 	long new_mmap_size;
204 
205 	new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start);
206 
207 	if (new_mmap_size > 0) {
208 		buf->real_mapped_size = new_mmap_size;
209 		return;
210 	}
211 
212 	atomic_set(&buf->mmap, 0);
213 	hl_mmap_mem_buf_put(buf);
214 	vma->vm_private_data = NULL;
215 }
216 
217 static const struct vm_operations_struct hl_mmap_mem_buf_vm_ops = {
218 	.close = hl_mmap_mem_buf_vm_close
219 };
220 
221 /**
222  * hl_mem_mgr_mmap - map the given buffer to the user
223  *
224  * @mmg: unified memory manager
225  * @vma: the vma object for which mmap was closed.
226  * @args: additional args passed to behavior->mmap
227  *
228  * Map the buffer specified by the vma->vm_pgoff to the given vma.
229  */
230 int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
231 		    void *args)
232 {
233 	struct hl_mmap_mem_buf *buf;
234 	u64 user_mem_size;
235 	u64 handle;
236 	int rc;
237 
238 	/* We use the page offset to hold the idr and thus we need to clear
239 	 * it before doing the mmap itself
240 	 */
241 	handle = vma->vm_pgoff << PAGE_SHIFT;
242 	vma->vm_pgoff = 0;
243 
244 	/* Reference was taken here */
245 	buf = hl_mmap_mem_buf_get(mmg, handle);
246 	if (!buf) {
247 		dev_err(mmg->dev,
248 			"Memory mmap failed, no match to handle %#llx\n", handle);
249 		return -EINVAL;
250 	}
251 
252 	/* Validation check */
253 	user_mem_size = vma->vm_end - vma->vm_start;
254 	if (user_mem_size != ALIGN(buf->mappable_size, PAGE_SIZE)) {
255 		dev_err(mmg->dev,
256 			"%s: Memory mmap failed, mmap VM size 0x%llx != 0x%llx allocated physical mem size\n",
257 			buf->behavior->topic, user_mem_size, buf->mappable_size);
258 		rc = -EINVAL;
259 		goto put_mem;
260 	}
261 
262 #ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
263 	if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
264 		       user_mem_size)) {
265 #else
266 	if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
267 		       user_mem_size)) {
268 #endif
269 		dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
270 			buf->behavior->topic, vma->vm_start);
271 
272 		rc = -EINVAL;
273 		goto put_mem;
274 	}
275 
276 	if (atomic_cmpxchg(&buf->mmap, 0, 1)) {
277 		dev_err(mmg->dev,
278 			"%s, Memory mmap failed, already mapped to user\n",
279 			buf->behavior->topic);
280 		rc = -EINVAL;
281 		goto put_mem;
282 	}
283 
284 	vma->vm_ops = &hl_mmap_mem_buf_vm_ops;
285 
286 	/* Note: We're transferring the memory reference to vma->vm_private_data here. */
287 
288 	vma->vm_private_data = buf;
289 
290 	rc = buf->behavior->mmap(buf, vma, args);
291 	if (rc) {
292 		atomic_set(&buf->mmap, 0);
293 		goto put_mem;
294 	}
295 
296 	buf->real_mapped_size = buf->mappable_size;
297 	vma->vm_pgoff = handle >> PAGE_SHIFT;
298 
299 	return 0;
300 
301 put_mem:
302 	hl_mmap_mem_buf_put(buf);
303 	return rc;
304 }
305 
306 /**
307  * hl_mem_mgr_init - initialize unified memory manager
308  *
309  * @dev: owner device pointer
310  * @mmg: structure to initialize
311  *
312  * Initialize an instance of unified memory manager
313  */
314 void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
315 {
316 	mmg->dev = dev;
317 	spin_lock_init(&mmg->lock);
318 	idr_init(&mmg->handles);
319 }
320 
321 static void hl_mem_mgr_fini_stats_reset(struct hl_mem_mgr_fini_stats *stats)
322 {
323 	if (!stats)
324 		return;
325 
326 	memset(stats, 0, sizeof(*stats));
327 }
328 
329 static void hl_mem_mgr_fini_stats_inc(u64 mem_id, struct hl_mem_mgr_fini_stats *stats)
330 {
331 	if (!stats)
332 		return;
333 
334 	switch (mem_id) {
335 	case HL_MMAP_TYPE_CB:
336 		++stats->n_busy_cb;
337 		break;
338 	case HL_MMAP_TYPE_TS_BUFF:
339 		++stats->n_busy_ts;
340 		break;
341 	default:
342 		/* we currently store only CB/TS so this shouldn't happen */
343 		++stats->n_busy_other;
344 	}
345 }
346 
347 /**
348  * hl_mem_mgr_fini - release unified memory manager
349  *
350  * @mmg: parent unified memory manager
351  * @stats: if non-NULL, will return some counters for handles that could not be removed.
352  *
353  * Release the unified memory manager. Shall be called from an interrupt context.
354  */
355 void hl_mem_mgr_fini(struct hl_mem_mgr *mmg, struct hl_mem_mgr_fini_stats *stats)
356 {
357 	struct hl_mmap_mem_buf *buf;
358 	struct idr *idp;
359 	const char *topic;
360 	u64 mem_id;
361 	u32 id;
362 
363 	hl_mem_mgr_fini_stats_reset(stats);
364 
365 	idp = &mmg->handles;
366 
367 	idr_for_each_entry(idp, buf, id) {
368 		topic = buf->behavior->topic;
369 		mem_id = buf->behavior->mem_id;
370 		if (hl_mmap_mem_buf_put(buf) != 1) {
371 			dev_err(mmg->dev,
372 				"%s: Buff handle %u for CTX is still alive\n",
373 				topic, id);
374 			hl_mem_mgr_fini_stats_inc(mem_id, stats);
375 		}
376 	}
377 }
378 
379 /**
380  * hl_mem_mgr_idr_destroy() - destroy memory manager IDR.
381  * @mmg: parent unified memory manager
382  *
383  * Destroy the memory manager IDR.
384  * Shall be called when IDR is empty and no memory buffers are in use.
385  */
386 void hl_mem_mgr_idr_destroy(struct hl_mem_mgr *mmg)
387 {
388 	if (!idr_is_empty(&mmg->handles))
389 		dev_crit(mmg->dev, "memory manager IDR is destroyed while it is not empty!\n");
390 
391 	idr_destroy(&mmg->handles);
392 }
393