xref: /linux/block/partitions/aix.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  fs/partitions/aix.c
4  *
5  *  Copyright (C) 2012-2013 Philippe De Muyter <phdm@macqel.be>
6  */
7 
8 #include "check.h"
9 #include "aix.h"
10 
11 struct lvm_rec {
12 	char lvm_id[4]; /* "_LVM" */
13 	char reserved4[16];
14 	__be32 lvmarea_len;
15 	__be32 vgda_len;
16 	__be32 vgda_psn[2];
17 	char reserved36[10];
18 	__be16 pp_size; /* log2(pp_size) */
19 	char reserved46[12];
20 	__be16 version;
21 	};
22 
23 struct vgda {
24 	__be32 secs;
25 	__be32 usec;
26 	char reserved8[16];
27 	__be16 numlvs;
28 	__be16 maxlvs;
29 	__be16 pp_size;
30 	__be16 numpvs;
31 	__be16 total_vgdas;
32 	__be16 vgda_size;
33 	};
34 
35 struct lvd {
36 	__be16 lv_ix;
37 	__be16 res2;
38 	__be16 res4;
39 	__be16 maxsize;
40 	__be16 lv_state;
41 	__be16 mirror;
42 	__be16 mirror_policy;
43 	__be16 num_lps;
44 	__be16 res10[8];
45 	};
46 
47 struct lvname {
48 	char name[64];
49 	};
50 
51 struct ppe {
52 	__be16 lv_ix;
53 	unsigned short res2;
54 	unsigned short res4;
55 	__be16 lp_ix;
56 	unsigned short res8[12];
57 	};
58 
59 struct pvd {
60 	char reserved0[16];
61 	__be16 pp_count;
62 	char reserved18[2];
63 	__be32 psn_part1;
64 	char reserved24[8];
65 	struct ppe ppe[1016];
66 	};
67 
68 #define LVM_MAXLVS 256
69 
70 /**
71  * last_lba(): return number of last logical block of device
72  * @bdev: block device
73  *
74  * Description: Returns last LBA value on success, 0 on error.
75  * This is stored (by sd and ide-geometry) in
76  *  the part[0] entry for this disk, and is the number of
77  *  physical sectors available on the disk.
78  */
79 static u64 last_lba(struct block_device *bdev)
80 {
81 	if (!bdev || !bdev->bd_inode)
82 		return 0;
83 	return (bdev->bd_inode->i_size >> 9) - 1ULL;
84 }
85 
86 /**
87  * read_lba(): Read bytes from disk, starting at given LBA
88  * @state
89  * @lba
90  * @buffer
91  * @count
92  *
93  * Description:  Reads @count bytes from @state->bdev into @buffer.
94  * Returns number of bytes read on success, 0 on error.
95  */
96 static size_t read_lba(struct parsed_partitions *state, u64 lba, u8 *buffer,
97 			size_t count)
98 {
99 	size_t totalreadcount = 0;
100 
101 	if (!buffer || lba + count / 512 > last_lba(state->bdev))
102 		return 0;
103 
104 	while (count) {
105 		int copied = 512;
106 		Sector sect;
107 		unsigned char *data = read_part_sector(state, lba++, &sect);
108 		if (!data)
109 			break;
110 		if (copied > count)
111 			copied = count;
112 		memcpy(buffer, data, copied);
113 		put_dev_sector(sect);
114 		buffer += copied;
115 		totalreadcount += copied;
116 		count -= copied;
117 	}
118 	return totalreadcount;
119 }
120 
121 /**
122  * alloc_pvd(): reads physical volume descriptor
123  * @state
124  * @lba
125  *
126  * Description: Returns pvd on success,  NULL on error.
127  * Allocates space for pvd and fill it with disk blocks at @lba
128  * Notes: remember to free pvd when you're done!
129  */
130 static struct pvd *alloc_pvd(struct parsed_partitions *state, u32 lba)
131 {
132 	size_t count = sizeof(struct pvd);
133 	struct pvd *p;
134 
135 	p = kmalloc(count, GFP_KERNEL);
136 	if (!p)
137 		return NULL;
138 
139 	if (read_lba(state, lba, (u8 *) p, count) < count) {
140 		kfree(p);
141 		return NULL;
142 	}
143 	return p;
144 }
145 
146 /**
147  * alloc_lvn(): reads logical volume names
148  * @state
149  * @lba
150  *
151  * Description: Returns lvn on success,  NULL on error.
152  * Allocates space for lvn and fill it with disk blocks at @lba
153  * Notes: remember to free lvn when you're done!
154  */
155 static struct lvname *alloc_lvn(struct parsed_partitions *state, u32 lba)
156 {
157 	size_t count = sizeof(struct lvname) * LVM_MAXLVS;
158 	struct lvname *p;
159 
160 	p = kmalloc(count, GFP_KERNEL);
161 	if (!p)
162 		return NULL;
163 
164 	if (read_lba(state, lba, (u8 *) p, count) < count) {
165 		kfree(p);
166 		return NULL;
167 	}
168 	return p;
169 }
170 
171 int aix_partition(struct parsed_partitions *state)
172 {
173 	int ret = 0;
174 	Sector sect;
175 	unsigned char *d;
176 	u32 pp_bytes_size;
177 	u32 pp_blocks_size = 0;
178 	u32 vgda_sector = 0;
179 	u32 vgda_len = 0;
180 	int numlvs = 0;
181 	struct pvd *pvd = NULL;
182 	struct lv_info {
183 		unsigned short pps_per_lv;
184 		unsigned short pps_found;
185 		unsigned char lv_is_contiguous;
186 	} *lvip;
187 	struct lvname *n = NULL;
188 
189 	d = read_part_sector(state, 7, &sect);
190 	if (d) {
191 		struct lvm_rec *p = (struct lvm_rec *)d;
192 		u16 lvm_version = be16_to_cpu(p->version);
193 		char tmp[64];
194 
195 		if (lvm_version == 1) {
196 			int pp_size_log2 = be16_to_cpu(p->pp_size);
197 
198 			pp_bytes_size = 1 << pp_size_log2;
199 			pp_blocks_size = pp_bytes_size / 512;
200 			snprintf(tmp, sizeof(tmp),
201 				" AIX LVM header version %u found\n",
202 				lvm_version);
203 			vgda_len = be32_to_cpu(p->vgda_len);
204 			vgda_sector = be32_to_cpu(p->vgda_psn[0]);
205 		} else {
206 			snprintf(tmp, sizeof(tmp),
207 				" unsupported AIX LVM version %d found\n",
208 				lvm_version);
209 		}
210 		strlcat(state->pp_buf, tmp, PAGE_SIZE);
211 		put_dev_sector(sect);
212 	}
213 	if (vgda_sector && (d = read_part_sector(state, vgda_sector, &sect))) {
214 		struct vgda *p = (struct vgda *)d;
215 
216 		numlvs = be16_to_cpu(p->numlvs);
217 		put_dev_sector(sect);
218 	}
219 	lvip = kcalloc(state->limit, sizeof(struct lv_info), GFP_KERNEL);
220 	if (!lvip)
221 		return 0;
222 	if (numlvs && (d = read_part_sector(state, vgda_sector + 1, &sect))) {
223 		struct lvd *p = (struct lvd *)d;
224 		int i;
225 
226 		n = alloc_lvn(state, vgda_sector + vgda_len - 33);
227 		if (n) {
228 			int foundlvs = 0;
229 
230 			for (i = 0; foundlvs < numlvs && i < state->limit; i += 1) {
231 				lvip[i].pps_per_lv = be16_to_cpu(p[i].num_lps);
232 				if (lvip[i].pps_per_lv)
233 					foundlvs += 1;
234 			}
235 			/* pvd loops depend on n[].name and lvip[].pps_per_lv */
236 			pvd = alloc_pvd(state, vgda_sector + 17);
237 		}
238 		put_dev_sector(sect);
239 	}
240 	if (pvd) {
241 		int numpps = be16_to_cpu(pvd->pp_count);
242 		int psn_part1 = be32_to_cpu(pvd->psn_part1);
243 		int i;
244 		int cur_lv_ix = -1;
245 		int next_lp_ix = 1;
246 		int lp_ix;
247 
248 		for (i = 0; i < numpps; i += 1) {
249 			struct ppe *p = pvd->ppe + i;
250 			unsigned int lv_ix;
251 
252 			lp_ix = be16_to_cpu(p->lp_ix);
253 			if (!lp_ix) {
254 				next_lp_ix = 1;
255 				continue;
256 			}
257 			lv_ix = be16_to_cpu(p->lv_ix) - 1;
258 			if (lv_ix >= state->limit) {
259 				cur_lv_ix = -1;
260 				continue;
261 			}
262 			lvip[lv_ix].pps_found += 1;
263 			if (lp_ix == 1) {
264 				cur_lv_ix = lv_ix;
265 				next_lp_ix = 1;
266 			} else if (lv_ix != cur_lv_ix || lp_ix != next_lp_ix) {
267 				next_lp_ix = 1;
268 				continue;
269 			}
270 			if (lp_ix == lvip[lv_ix].pps_per_lv) {
271 				char tmp[70];
272 
273 				put_partition(state, lv_ix + 1,
274 				  (i + 1 - lp_ix) * pp_blocks_size + psn_part1,
275 				  lvip[lv_ix].pps_per_lv * pp_blocks_size);
276 				snprintf(tmp, sizeof(tmp), " <%s>\n",
277 					 n[lv_ix].name);
278 				strlcat(state->pp_buf, tmp, PAGE_SIZE);
279 				lvip[lv_ix].lv_is_contiguous = 1;
280 				ret = 1;
281 				next_lp_ix = 1;
282 			} else
283 				next_lp_ix += 1;
284 		}
285 		for (i = 0; i < state->limit; i += 1)
286 			if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
287 				char tmp[sizeof(n[i].name) + 1]; // null char
288 
289 				snprintf(tmp, sizeof(tmp), "%s", n[i].name);
290 				pr_warn("partition %s (%u pp's found) is "
291 					"not contiguous\n",
292 					tmp, lvip[i].pps_found);
293 			}
294 		kfree(pvd);
295 	}
296 	kfree(n);
297 	kfree(lvip);
298 	return ret;
299 }
300