xref: /linux/arch/sparc/lib/bitext.c (revision 0d456bad36d42d16022be045c8a53ddbb59ee478)
1 /*
2  * bitext.c: kernel little helper (of bit shuffling variety).
3  *
4  * Copyright (C) 2002 Pete Zaitcev <zaitcev@yahoo.com>
5  *
6  * The algorithm to search a zero bit string is geared towards its application.
7  * We expect a couple of fixed sizes of requests, so a rotating counter, reset
8  * by align size, should provide fast enough search while maintaining low
9  * fragmentation.
10  */
11 
12 #include <linux/string.h>
13 #include <linux/bitmap.h>
14 
15 #include <asm/bitext.h>
16 
17 /**
18  * bit_map_string_get - find and set a bit string in bit map.
19  * @t: the bit map.
20  * @len: requested string length
21  * @align: requested alignment
22  *
23  * Returns offset in the map or -1 if out of space.
24  *
25  * Not safe to call from an interrupt (uses spin_lock).
26  */
27 int bit_map_string_get(struct bit_map *t, int len, int align)
28 {
29 	int offset, count;	/* siamese twins */
30 	int off_new;
31 	int align1;
32 	int i, color;
33 
34 	if (t->num_colors) {
35 		/* align is overloaded to be the page color */
36 		color = align;
37 		align = t->num_colors;
38 	} else {
39 		color = 0;
40 		if (align == 0)
41 			align = 1;
42 	}
43 	align1 = align - 1;
44 	if ((align & align1) != 0)
45 		BUG();
46 	if (align < 0 || align >= t->size)
47 		BUG();
48 	if (len <= 0 || len > t->size)
49 		BUG();
50 	color &= align1;
51 
52 	spin_lock(&t->lock);
53 	if (len < t->last_size)
54 		offset = t->first_free;
55 	else
56 		offset = t->last_off & ~align1;
57 	count = 0;
58 	for (;;) {
59 		off_new = find_next_zero_bit(t->map, t->size, offset);
60 		off_new = ((off_new + align1) & ~align1) + color;
61 		count += off_new - offset;
62 		offset = off_new;
63 		if (offset >= t->size)
64 			offset = 0;
65 		if (count + len > t->size) {
66 			spin_unlock(&t->lock);
67 /* P3 */ printk(KERN_ERR
68   "bitmap out: size %d used %d off %d len %d align %d count %d\n",
69   t->size, t->used, offset, len, align, count);
70 			return -1;
71 		}
72 
73 		if (offset + len > t->size) {
74 			count += t->size - offset;
75 			offset = 0;
76 			continue;
77 		}
78 
79 		i = 0;
80 		while (test_bit(offset + i, t->map) == 0) {
81 			i++;
82 			if (i == len) {
83 				bitmap_set(t->map, offset, len);
84 				if (offset == t->first_free)
85 					t->first_free = find_next_zero_bit
86 							(t->map, t->size,
87 							 t->first_free + len);
88 				if ((t->last_off = offset + len) >= t->size)
89 					t->last_off = 0;
90 				t->used += len;
91 				t->last_size = len;
92 				spin_unlock(&t->lock);
93 				return offset;
94 			}
95 		}
96 		count += i + 1;
97 		if ((offset += i + 1) >= t->size)
98 			offset = 0;
99 	}
100 }
101 
102 void bit_map_clear(struct bit_map *t, int offset, int len)
103 {
104 	int i;
105 
106 	if (t->used < len)
107 		BUG();		/* Much too late to do any good, but alas... */
108 	spin_lock(&t->lock);
109 	for (i = 0; i < len; i++) {
110 		if (test_bit(offset + i, t->map) == 0)
111 			BUG();
112 		__clear_bit(offset + i, t->map);
113 	}
114 	if (offset < t->first_free)
115 		t->first_free = offset;
116 	t->used -= len;
117 	spin_unlock(&t->lock);
118 }
119 
120 void bit_map_init(struct bit_map *t, unsigned long *map, int size)
121 {
122 
123 	if ((size & 07) != 0)
124 		BUG();
125 	memset(map, 0, size>>3);
126 
127 	memset(t, 0, sizeof *t);
128 	spin_lock_init(&t->lock);
129 	t->map = map;
130 	t->size = size;
131 }
132