xref: /linux/block/partitions/aix.c (revision 95298d63c67673c654c08952672d016212b26054)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  fs/partitions/aix.c
4  *
5  *  Copyright (C) 2012-2013 Philippe De Muyter <phdm@macqel.be>
6  */
7 
8 #include "check.h"
9 
10 struct lvm_rec {
11 	char lvm_id[4]; /* "_LVM" */
12 	char reserved4[16];
13 	__be32 lvmarea_len;
14 	__be32 vgda_len;
15 	__be32 vgda_psn[2];
16 	char reserved36[10];
17 	__be16 pp_size; /* log2(pp_size) */
18 	char reserved46[12];
19 	__be16 version;
20 	};
21 
22 struct vgda {
23 	__be32 secs;
24 	__be32 usec;
25 	char reserved8[16];
26 	__be16 numlvs;
27 	__be16 maxlvs;
28 	__be16 pp_size;
29 	__be16 numpvs;
30 	__be16 total_vgdas;
31 	__be16 vgda_size;
32 	};
33 
34 struct lvd {
35 	__be16 lv_ix;
36 	__be16 res2;
37 	__be16 res4;
38 	__be16 maxsize;
39 	__be16 lv_state;
40 	__be16 mirror;
41 	__be16 mirror_policy;
42 	__be16 num_lps;
43 	__be16 res10[8];
44 	};
45 
46 struct lvname {
47 	char name[64];
48 	};
49 
50 struct ppe {
51 	__be16 lv_ix;
52 	unsigned short res2;
53 	unsigned short res4;
54 	__be16 lp_ix;
55 	unsigned short res8[12];
56 	};
57 
58 struct pvd {
59 	char reserved0[16];
60 	__be16 pp_count;
61 	char reserved18[2];
62 	__be32 psn_part1;
63 	char reserved24[8];
64 	struct ppe ppe[1016];
65 	};
66 
67 #define LVM_MAXLVS 256
68 
69 /**
70  * last_lba(): return number of last logical block of device
71  * @bdev: block device
72  *
73  * Description: Returns last LBA value on success, 0 on error.
74  * This is stored (by sd and ide-geometry) in
75  *  the part[0] entry for this disk, and is the number of
76  *  physical sectors available on the disk.
77  */
78 static u64 last_lba(struct block_device *bdev)
79 {
80 	if (!bdev || !bdev->bd_inode)
81 		return 0;
82 	return (bdev->bd_inode->i_size >> 9) - 1ULL;
83 }
84 
85 /**
86  * read_lba(): Read bytes from disk, starting at given LBA
87  * @state
88  * @lba
89  * @buffer
90  * @count
91  *
92  * Description:  Reads @count bytes from @state->bdev into @buffer.
93  * Returns number of bytes read on success, 0 on error.
94  */
95 static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
96 			size_t count)
97 {
98 	size_t totalreadcount = 0;
99 
100 	if (!buffer || lba + count / 512 > last_lba(state->bdev))
101 		return 0;
102 
103 	while (count) {
104 		int copied = 512;
105 		Sector sect;
106 		unsigned char *data = read_part_sector(state, lba++, &sect);
107 		if (!data)
108 			break;
109 		if (copied > count)
110 			copied = count;
111 		memcpy(buffer, data, copied);
112 		put_dev_sector(sect);
113 		buffer += copied;
114 		totalreadcount += copied;
115 		count -= copied;
116 	}
117 	return totalreadcount;
118 }
119 
120 /**
121  * alloc_pvd(): reads physical volume descriptor
122  * @state
123  * @lba
124  *
125  * Description: Returns pvd on success,  NULL on error.
126  * Allocates space for pvd and fill it with disk blocks at @lba
127  * Notes: remember to free pvd when you're done!
128  */
129 static struct pvd *alloc_pvd(struct parsed_partitions *state, u32 lba)
130 {
131 	size_t count = sizeof(struct pvd);
132 	struct pvd *p;
133 
134 	p = kmalloc(count, GFP_KERNEL);
135 	if (!p)
136 		return NULL;
137 
138 	if (read_lba(state, lba, (u8 *) p, count) < count) {
139 		kfree(p);
140 		return NULL;
141 	}
142 	return p;
143 }
144 
145 /**
146  * alloc_lvn(): reads logical volume names
147  * @state
148  * @lba
149  *
150  * Description: Returns lvn on success,  NULL on error.
151  * Allocates space for lvn and fill it with disk blocks at @lba
152  * Notes: remember to free lvn when you're done!
153  */
154 static struct lvname *alloc_lvn(struct parsed_partitions *state, u32 lba)
155 {
156 	size_t count = sizeof(struct lvname) * LVM_MAXLVS;
157 	struct lvname *p;
158 
159 	p = kmalloc(count, GFP_KERNEL);
160 	if (!p)
161 		return NULL;
162 
163 	if (read_lba(state, lba, (u8 *) p, count) < count) {
164 		kfree(p);
165 		return NULL;
166 	}
167 	return p;
168 }
169 
170 int aix_partition(struct parsed_partitions *state)
171 {
172 	int ret = 0;
173 	Sector sect;
174 	unsigned char *d;
175 	u32 pp_bytes_size;
176 	u32 pp_blocks_size = 0;
177 	u32 vgda_sector = 0;
178 	u32 vgda_len = 0;
179 	int numlvs = 0;
180 	struct pvd *pvd = NULL;
181 	struct lv_info {
182 		unsigned short pps_per_lv;
183 		unsigned short pps_found;
184 		unsigned char lv_is_contiguous;
185 	} *lvip;
186 	struct lvname *n = NULL;
187 
188 	d = read_part_sector(state, 7, &sect);
189 	if (d) {
190 		struct lvm_rec *p = (struct lvm_rec *)d;
191 		u16 lvm_version = be16_to_cpu(p->version);
192 		char tmp[64];
193 
194 		if (lvm_version == 1) {
195 			int pp_size_log2 = be16_to_cpu(p->pp_size);
196 
197 			pp_bytes_size = 1 << pp_size_log2;
198 			pp_blocks_size = pp_bytes_size / 512;
199 			snprintf(tmp, sizeof(tmp),
200 				" AIX LVM header version %u found\n",
201 				lvm_version);
202 			vgda_len = be32_to_cpu(p->vgda_len);
203 			vgda_sector = be32_to_cpu(p->vgda_psn[0]);
204 		} else {
205 			snprintf(tmp, sizeof(tmp),
206 				" unsupported AIX LVM version %d found\n",
207 				lvm_version);
208 		}
209 		strlcat(state->pp_buf, tmp, PAGE_SIZE);
210 		put_dev_sector(sect);
211 	}
212 	if (vgda_sector && (d = read_part_sector(state, vgda_sector, &sect))) {
213 		struct vgda *p = (struct vgda *)d;
214 
215 		numlvs = be16_to_cpu(p->numlvs);
216 		put_dev_sector(sect);
217 	}
218 	lvip = kcalloc(state->limit, sizeof(struct lv_info), GFP_KERNEL);
219 	if (!lvip)
220 		return 0;
221 	if (numlvs && (d = read_part_sector(state, vgda_sector + 1, &sect))) {
222 		struct lvd *p = (struct lvd *)d;
223 		int i;
224 
225 		n = alloc_lvn(state, vgda_sector + vgda_len - 33);
226 		if (n) {
227 			int foundlvs = 0;
228 
229 			for (i = 0; foundlvs < numlvs && i < state->limit; i += 1) {
230 				lvip[i].pps_per_lv = be16_to_cpu(p[i].num_lps);
231 				if (lvip[i].pps_per_lv)
232 					foundlvs += 1;
233 			}
234 			/* pvd loops depend on n[].name and lvip[].pps_per_lv */
235 			pvd = alloc_pvd(state, vgda_sector + 17);
236 		}
237 		put_dev_sector(sect);
238 	}
239 	if (pvd) {
240 		int numpps = be16_to_cpu(pvd->pp_count);
241 		int psn_part1 = be32_to_cpu(pvd->psn_part1);
242 		int i;
243 		int cur_lv_ix = -1;
244 		int next_lp_ix = 1;
245 		int lp_ix;
246 
247 		for (i = 0; i < numpps; i += 1) {
248 			struct ppe *p = pvd->ppe + i;
249 			unsigned int lv_ix;
250 
251 			lp_ix = be16_to_cpu(p->lp_ix);
252 			if (!lp_ix) {
253 				next_lp_ix = 1;
254 				continue;
255 			}
256 			lv_ix = be16_to_cpu(p->lv_ix) - 1;
257 			if (lv_ix >= state->limit) {
258 				cur_lv_ix = -1;
259 				continue;
260 			}
261 			lvip[lv_ix].pps_found += 1;
262 			if (lp_ix == 1) {
263 				cur_lv_ix = lv_ix;
264 				next_lp_ix = 1;
265 			} else if (lv_ix != cur_lv_ix || lp_ix != next_lp_ix) {
266 				next_lp_ix = 1;
267 				continue;
268 			}
269 			if (lp_ix == lvip[lv_ix].pps_per_lv) {
270 				char tmp[70];
271 
272 				put_partition(state, lv_ix + 1,
273 				  (i + 1 - lp_ix) * pp_blocks_size + psn_part1,
274 				  lvip[lv_ix].pps_per_lv * pp_blocks_size);
275 				snprintf(tmp, sizeof(tmp), " <%s>\n",
276 					 n[lv_ix].name);
277 				strlcat(state->pp_buf, tmp, PAGE_SIZE);
278 				lvip[lv_ix].lv_is_contiguous = 1;
279 				ret = 1;
280 				next_lp_ix = 1;
281 			} else
282 				next_lp_ix += 1;
283 		}
284 		for (i = 0; i < state->limit; i += 1)
285 			if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
286 				char tmp[sizeof(n[i].name) + 1]; // null char
287 
288 				snprintf(tmp, sizeof(tmp), "%s", n[i].name);
289 				pr_warn("partition %s (%u pp's found) is "
290 					"not contiguous\n",
291 					tmp, lvip[i].pps_found);
292 			}
293 		kfree(pvd);
294 	}
295 	kfree(n);
296 	kfree(lvip);
297 	return ret;
298 }
299