1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 *
5 * Copyright (c) 2010-2017 Intel Corporation. All Rights Reserved.
6 *
7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 */
9 /*
10 * This file contains entry functions for memory management of ISP driver
11 */
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h> /* for kmap */
16 #include <linux/io.h> /* for page_to_phys */
17 #include <linux/sysfs.h>
18
19 #include "hmm/hmm.h"
20 #include "hmm/hmm_bo.h"
21
22 #include "atomisp_internal.h"
23 #include "asm/cacheflush.h"
24 #include "mmu/isp_mmu.h"
25 #include "mmu/sh_mmu_mrfld.h"
26
27 struct hmm_bo_device bo_device;
28 static ia_css_ptr dummy_ptr = mmgr_EXCEPTION;
29 static bool hmm_initialized;
30
31 /*
32 * p: private
33 * v: vmalloc
34 */
35 static const char hmm_bo_type_string[] = "pv";
36
bo_show(struct device * dev,struct device_attribute * attr,char * buf,struct list_head * bo_list,bool active)37 static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
38 char *buf, struct list_head *bo_list, bool active)
39 {
40 ssize_t ret = 0;
41 struct hmm_buffer_object *bo;
42 unsigned long flags;
43 int i;
44 long total[HMM_BO_LAST] = { 0 };
45 long count[HMM_BO_LAST] = { 0 };
46 int index1 = 0;
47 int index2 = 0;
48
49 ret = scnprintf(buf, PAGE_SIZE, "type pgnr\n");
50 if (ret <= 0)
51 return 0;
52
53 index1 += ret;
54
55 spin_lock_irqsave(&bo_device.list_lock, flags);
56 list_for_each_entry(bo, bo_list, list) {
57 if ((active && (bo->status & HMM_BO_ALLOCED)) ||
58 (!active && !(bo->status & HMM_BO_ALLOCED))) {
59 ret = scnprintf(buf + index1, PAGE_SIZE - index1,
60 "%c %d\n",
61 hmm_bo_type_string[bo->type], bo->pgnr);
62
63 total[bo->type] += bo->pgnr;
64 count[bo->type]++;
65 if (ret > 0)
66 index1 += ret;
67 }
68 }
69 spin_unlock_irqrestore(&bo_device.list_lock, flags);
70
71 for (i = 0; i < HMM_BO_LAST; i++) {
72 if (count[i]) {
73 ret = scnprintf(buf + index1 + index2,
74 PAGE_SIZE - index1 - index2,
75 "%ld %c buffer objects: %ld KB\n",
76 count[i], hmm_bo_type_string[i],
77 total[i] * 4);
78 if (ret > 0)
79 index2 += ret;
80 }
81 }
82
83 /* Add trailing zero, not included by scnprintf */
84 return index1 + index2 + 1;
85 }
86
active_bo_show(struct device * dev,struct device_attribute * attr,char * buf)87 static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr,
88 char *buf)
89 {
90 return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
91 }
92
free_bo_show(struct device * dev,struct device_attribute * attr,char * buf)93 static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr,
94 char *buf)
95 {
96 return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
97 }
98
99
100 static DEVICE_ATTR_RO(active_bo);
101 static DEVICE_ATTR_RO(free_bo);
102
103 static struct attribute *sysfs_attrs_ctrl[] = {
104 &dev_attr_active_bo.attr,
105 &dev_attr_free_bo.attr,
106 NULL
107 };
108
109 static struct attribute_group atomisp_attribute_group[] = {
110 {.attrs = sysfs_attrs_ctrl },
111 };
112
hmm_init(void)113 int hmm_init(void)
114 {
115 int ret;
116
117 ret = hmm_bo_device_init(&bo_device, &sh_mmu_mrfld,
118 ISP_VM_START, ISP_VM_SIZE);
119 if (ret)
120 dev_err(atomisp_dev, "hmm_bo_device_init failed.\n");
121
122 hmm_initialized = true;
123
124 /*
125 * As hmm use NULL to indicate invalid ISP virtual address,
126 * and ISP_VM_START is defined to 0 too, so we allocate
127 * one piece of dummy memory, which should return value 0,
128 * at the beginning, to avoid hmm_alloc return 0 in the
129 * further allocation.
130 */
131 dummy_ptr = hmm_alloc(1);
132
133 if (!ret) {
134 ret = sysfs_create_group(&atomisp_dev->kobj,
135 atomisp_attribute_group);
136 if (ret)
137 dev_err(atomisp_dev,
138 "%s Failed to create sysfs\n", __func__);
139 }
140
141 return ret;
142 }
143
hmm_cleanup(void)144 void hmm_cleanup(void)
145 {
146 if (dummy_ptr == mmgr_EXCEPTION)
147 return;
148 sysfs_remove_group(&atomisp_dev->kobj, atomisp_attribute_group);
149
150 /* free dummy memory first */
151 hmm_free(dummy_ptr);
152 dummy_ptr = 0;
153
154 hmm_bo_device_exit(&bo_device);
155 hmm_initialized = false;
156 }
157
__hmm_alloc(size_t bytes,enum hmm_bo_type type,void * vmalloc_addr)158 static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type,
159 void *vmalloc_addr)
160 {
161 unsigned int pgnr;
162 struct hmm_buffer_object *bo;
163 int ret;
164
165 /*
166 * Check if we are initialized. In the ideal world we wouldn't need
167 * this but we can tackle it once the driver is a lot cleaner
168 */
169
170 if (!hmm_initialized)
171 hmm_init();
172 /* Get page number from size */
173 pgnr = size_to_pgnr_ceil(bytes);
174
175 /* Buffer object structure init */
176 bo = hmm_bo_alloc(&bo_device, pgnr);
177 if (!bo) {
178 dev_err(atomisp_dev, "hmm_bo_create failed.\n");
179 goto create_bo_err;
180 }
181
182 /* Allocate pages for memory */
183 ret = hmm_bo_alloc_pages(bo, type, vmalloc_addr);
184 if (ret) {
185 dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
186 goto alloc_page_err;
187 }
188
189 /* Combine the virtual address and pages together */
190 ret = hmm_bo_bind(bo);
191 if (ret) {
192 dev_err(atomisp_dev, "hmm_bo_bind failed.\n");
193 goto bind_err;
194 }
195
196 return bo->start;
197
198 bind_err:
199 hmm_bo_free_pages(bo);
200 alloc_page_err:
201 hmm_bo_unref(bo);
202 create_bo_err:
203 return 0;
204 }
205
hmm_alloc(size_t bytes)206 ia_css_ptr hmm_alloc(size_t bytes)
207 {
208 return __hmm_alloc(bytes, HMM_BO_PRIVATE, NULL);
209 }
210
hmm_create_from_vmalloc_buf(size_t bytes,void * vmalloc_addr)211 ia_css_ptr hmm_create_from_vmalloc_buf(size_t bytes, void *vmalloc_addr)
212 {
213 return __hmm_alloc(bytes, HMM_BO_VMALLOC, vmalloc_addr);
214 }
215
hmm_free(ia_css_ptr virt)216 void hmm_free(ia_css_ptr virt)
217 {
218 struct hmm_buffer_object *bo;
219
220 if (WARN_ON(virt == mmgr_EXCEPTION))
221 return;
222
223 bo = hmm_bo_device_search_start(&bo_device, (unsigned int)virt);
224
225 if (!bo) {
226 dev_err(atomisp_dev,
227 "can not find buffer object start with address 0x%x\n",
228 (unsigned int)virt);
229 return;
230 }
231
232 hmm_bo_unbind(bo);
233 hmm_bo_free_pages(bo);
234 hmm_bo_unref(bo);
235 }
236
hmm_check_bo(struct hmm_buffer_object * bo,unsigned int ptr)237 static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
238 {
239 if (!bo) {
240 dev_err(atomisp_dev,
241 "can not find buffer object contains address 0x%x\n",
242 ptr);
243 return -EINVAL;
244 }
245
246 if (!hmm_bo_page_allocated(bo)) {
247 dev_err(atomisp_dev,
248 "buffer object has no page allocated.\n");
249 return -EINVAL;
250 }
251
252 if (!hmm_bo_allocated(bo)) {
253 dev_err(atomisp_dev,
254 "buffer object has no virtual address space allocated.\n");
255 return -EINVAL;
256 }
257
258 return 0;
259 }
260
261 /* Read function in ISP memory management */
load_and_flush_by_kmap(ia_css_ptr virt,void * data,unsigned int bytes)262 static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
263 unsigned int bytes)
264 {
265 struct hmm_buffer_object *bo;
266 unsigned int idx, offset, len;
267 char *src, *des;
268 int ret;
269
270 bo = hmm_bo_device_search_in_range(&bo_device, virt);
271 ret = hmm_check_bo(bo, virt);
272 if (ret)
273 return ret;
274
275 des = (char *)data;
276 while (bytes) {
277 idx = (virt - bo->start) >> PAGE_SHIFT;
278 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
279
280 src = (char *)kmap_local_page(bo->pages[idx]) + offset;
281
282 if ((bytes + offset) >= PAGE_SIZE) {
283 len = PAGE_SIZE - offset;
284 bytes -= len;
285 } else {
286 len = bytes;
287 bytes = 0;
288 }
289
290 virt += len; /* update virt for next loop */
291
292 if (des) {
293 memcpy(des, src, len);
294 des += len;
295 }
296
297 clflush_cache_range(src, len);
298
299 kunmap_local(src);
300 }
301
302 return 0;
303 }
304
305 /* Read function in ISP memory management */
load_and_flush(ia_css_ptr virt,void * data,unsigned int bytes)306 static int load_and_flush(ia_css_ptr virt, void *data, unsigned int bytes)
307 {
308 struct hmm_buffer_object *bo;
309 int ret;
310
311 bo = hmm_bo_device_search_in_range(&bo_device, virt);
312 ret = hmm_check_bo(bo, virt);
313 if (ret)
314 return ret;
315
316 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
317 void *src = bo->vmap_addr;
318
319 src += (virt - bo->start);
320 memcpy(data, src, bytes);
321 if (bo->status & HMM_BO_VMAPED_CACHED)
322 clflush_cache_range(src, bytes);
323 } else {
324 void *vptr;
325
326 vptr = hmm_bo_vmap(bo, true);
327 if (!vptr)
328 return load_and_flush_by_kmap(virt, data, bytes);
329 else
330 vptr = vptr + (virt - bo->start);
331
332 memcpy(data, vptr, bytes);
333 clflush_cache_range(vptr, bytes);
334 hmm_bo_vunmap(bo);
335 }
336
337 return 0;
338 }
339
340 /* Read function in ISP memory management */
hmm_load(ia_css_ptr virt,void * data,unsigned int bytes)341 int hmm_load(ia_css_ptr virt, void *data, unsigned int bytes)
342 {
343 if (!virt) {
344 dev_warn(atomisp_dev,
345 "hmm_store: address is NULL\n");
346 return -EINVAL;
347 }
348 if (!data) {
349 dev_err(atomisp_dev,
350 "hmm_store: data is a NULL argument\n");
351 return -EINVAL;
352 }
353 return load_and_flush(virt, data, bytes);
354 }
355
356 /* Flush hmm data from the data cache */
hmm_flush(ia_css_ptr virt,unsigned int bytes)357 int hmm_flush(ia_css_ptr virt, unsigned int bytes)
358 {
359 return load_and_flush(virt, NULL, bytes);
360 }
361
362 /* Write function in ISP memory management */
hmm_store(ia_css_ptr virt,const void * data,unsigned int bytes)363 int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
364 {
365 struct hmm_buffer_object *bo;
366 unsigned int idx, offset, len;
367 char *src, *des;
368 int ret;
369
370 if (!virt) {
371 dev_warn(atomisp_dev,
372 "hmm_store: address is NULL\n");
373 return -EINVAL;
374 }
375 if (!data) {
376 dev_err(atomisp_dev,
377 "hmm_store: data is a NULL argument\n");
378 return -EINVAL;
379 }
380
381 bo = hmm_bo_device_search_in_range(&bo_device, virt);
382 ret = hmm_check_bo(bo, virt);
383 if (ret)
384 return ret;
385
386 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
387 void *dst = bo->vmap_addr;
388
389 dst += (virt - bo->start);
390 memcpy(dst, data, bytes);
391 if (bo->status & HMM_BO_VMAPED_CACHED)
392 clflush_cache_range(dst, bytes);
393 } else {
394 void *vptr;
395
396 vptr = hmm_bo_vmap(bo, true);
397 if (vptr) {
398 vptr = vptr + (virt - bo->start);
399
400 memcpy(vptr, data, bytes);
401 clflush_cache_range(vptr, bytes);
402 hmm_bo_vunmap(bo);
403 return 0;
404 }
405 }
406
407 src = (char *)data;
408 while (bytes) {
409 idx = (virt - bo->start) >> PAGE_SHIFT;
410 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
411
412 des = (char *)kmap_local_page(bo->pages[idx]);
413
414 if (!des) {
415 dev_err(atomisp_dev,
416 "kmap buffer object page failed: pg_idx = %d\n",
417 idx);
418 return -EINVAL;
419 }
420
421 des += offset;
422
423 if ((bytes + offset) >= PAGE_SIZE) {
424 len = PAGE_SIZE - offset;
425 bytes -= len;
426 } else {
427 len = bytes;
428 bytes = 0;
429 }
430
431 virt += len;
432
433 memcpy(des, src, len);
434
435 src += len;
436
437 clflush_cache_range(des, len);
438
439 kunmap_local(des);
440 }
441
442 return 0;
443 }
444
445 /* memset function in ISP memory management */
hmm_set(ia_css_ptr virt,int c,unsigned int bytes)446 int hmm_set(ia_css_ptr virt, int c, unsigned int bytes)
447 {
448 struct hmm_buffer_object *bo;
449 unsigned int idx, offset, len;
450 char *des;
451 int ret;
452
453 bo = hmm_bo_device_search_in_range(&bo_device, virt);
454 ret = hmm_check_bo(bo, virt);
455 if (ret)
456 return ret;
457
458 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
459 void *dst = bo->vmap_addr;
460
461 dst += (virt - bo->start);
462 memset(dst, c, bytes);
463
464 if (bo->status & HMM_BO_VMAPED_CACHED)
465 clflush_cache_range(dst, bytes);
466 } else {
467 void *vptr;
468
469 vptr = hmm_bo_vmap(bo, true);
470 if (vptr) {
471 vptr = vptr + (virt - bo->start);
472 memset(vptr, c, bytes);
473 clflush_cache_range(vptr, bytes);
474 hmm_bo_vunmap(bo);
475 return 0;
476 }
477 }
478
479 while (bytes) {
480 idx = (virt - bo->start) >> PAGE_SHIFT;
481 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
482
483 des = (char *)kmap_local_page(bo->pages[idx]) + offset;
484
485 if ((bytes + offset) >= PAGE_SIZE) {
486 len = PAGE_SIZE - offset;
487 bytes -= len;
488 } else {
489 len = bytes;
490 bytes = 0;
491 }
492
493 virt += len;
494
495 memset(des, c, len);
496
497 clflush_cache_range(des, len);
498
499 kunmap_local(des);
500 }
501
502 return 0;
503 }
504
505 /* Virtual address to physical address convert */
hmm_virt_to_phys(ia_css_ptr virt)506 phys_addr_t hmm_virt_to_phys(ia_css_ptr virt)
507 {
508 unsigned int idx, offset;
509 struct hmm_buffer_object *bo;
510
511 bo = hmm_bo_device_search_in_range(&bo_device, virt);
512 if (!bo) {
513 dev_err(atomisp_dev,
514 "can not find buffer object contains address 0x%x\n",
515 virt);
516 return -1;
517 }
518
519 idx = (virt - bo->start) >> PAGE_SHIFT;
520 offset = (virt - bo->start) - (idx << PAGE_SHIFT);
521
522 return page_to_phys(bo->pages[idx]) + offset;
523 }
524
hmm_mmap(struct vm_area_struct * vma,ia_css_ptr virt)525 int hmm_mmap(struct vm_area_struct *vma, ia_css_ptr virt)
526 {
527 struct hmm_buffer_object *bo;
528
529 bo = hmm_bo_device_search_start(&bo_device, virt);
530 if (!bo) {
531 dev_err(atomisp_dev,
532 "can not find buffer object start with address 0x%x\n",
533 virt);
534 return -EINVAL;
535 }
536
537 return hmm_bo_mmap(vma, bo);
538 }
539
540 /* Map ISP virtual address into IA virtual address */
hmm_vmap(ia_css_ptr virt,bool cached)541 void *hmm_vmap(ia_css_ptr virt, bool cached)
542 {
543 struct hmm_buffer_object *bo;
544 void *ptr;
545
546 bo = hmm_bo_device_search_in_range(&bo_device, virt);
547 if (!bo) {
548 dev_err(atomisp_dev,
549 "can not find buffer object contains address 0x%x\n",
550 virt);
551 return NULL;
552 }
553
554 ptr = hmm_bo_vmap(bo, cached);
555 if (ptr)
556 return ptr + (virt - bo->start);
557 else
558 return NULL;
559 }
560
561 /* Flush the memory which is mapped as cached memory through hmm_vmap */
hmm_flush_vmap(ia_css_ptr virt)562 void hmm_flush_vmap(ia_css_ptr virt)
563 {
564 struct hmm_buffer_object *bo;
565
566 bo = hmm_bo_device_search_in_range(&bo_device, virt);
567 if (!bo) {
568 dev_warn(atomisp_dev,
569 "can not find buffer object contains address 0x%x\n",
570 virt);
571 return;
572 }
573
574 hmm_bo_flush_vmap(bo);
575 }
576
hmm_vunmap(ia_css_ptr virt)577 void hmm_vunmap(ia_css_ptr virt)
578 {
579 struct hmm_buffer_object *bo;
580
581 bo = hmm_bo_device_search_in_range(&bo_device, virt);
582 if (!bo) {
583 dev_warn(atomisp_dev,
584 "can not find buffer object contains address 0x%x\n",
585 virt);
586 return;
587 }
588
589 hmm_bo_vunmap(bo);
590 }
591