xref: /linux/drivers/mtd/mtdpart.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * Simple MTD partitioning layer
3  *
4  * (C) 2000 Nicolas Pitre <nico@cam.org>
5  *
6  * This code is GPL
7  *
8  * $Id: mtdpart.c,v 1.55 2005/11/07 11:14:20 gleixner Exp $
9  *
10  * 	02-21-2002	Thomas Gleixner <gleixner@autronix.de>
11  *			added support for read_oob, write_oob
12  */
13 
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/list.h>
19 #include <linux/config.h>
20 #include <linux/kmod.h>
21 #include <linux/mtd/mtd.h>
22 #include <linux/mtd/partitions.h>
23 #include <linux/mtd/compatmac.h>
24 
25 /* Our partition linked list */
26 static LIST_HEAD(mtd_partitions);
27 
28 /* Our partition node structure */
29 struct mtd_part {
30 	struct mtd_info mtd;
31 	struct mtd_info *master;
32 	u_int32_t offset;
33 	int index;
34 	struct list_head list;
35 	int registered;
36 };
37 
38 /*
39  * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
40  * the pointer to that structure with this macro.
41  */
42 #define PART(x)  ((struct mtd_part *)(x))
43 
44 
45 /*
46  * MTD methods which simply translate the effective address and pass through
47  * to the _real_ device.
48  */
49 
50 static int part_read (struct mtd_info *mtd, loff_t from, size_t len,
51 			size_t *retlen, u_char *buf)
52 {
53 	struct mtd_part *part = PART(mtd);
54 	int res;
55 
56 	if (from >= mtd->size)
57 		len = 0;
58 	else if (from + len > mtd->size)
59 		len = mtd->size - from;
60 	res = part->master->read (part->master, from + part->offset,
61 				   len, retlen, buf);
62 	if (unlikely(res)) {
63 		if (res == -EUCLEAN)
64 			mtd->ecc_stats.corrected++;
65 		if (res == -EBADMSG)
66 			mtd->ecc_stats.failed++;
67 	}
68 	return res;
69 }
70 
71 static int part_point (struct mtd_info *mtd, loff_t from, size_t len,
72 			size_t *retlen, u_char **buf)
73 {
74 	struct mtd_part *part = PART(mtd);
75 	if (from >= mtd->size)
76 		len = 0;
77 	else if (from + len > mtd->size)
78 		len = mtd->size - from;
79 	return part->master->point (part->master, from + part->offset,
80 				    len, retlen, buf);
81 }
82 
83 static void part_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
84 {
85 	struct mtd_part *part = PART(mtd);
86 
87 	part->master->unpoint (part->master, addr, from + part->offset, len);
88 }
89 
90 static int part_read_oob(struct mtd_info *mtd, loff_t from,
91 			 struct mtd_oob_ops *ops)
92 {
93 	struct mtd_part *part = PART(mtd);
94 	int res;
95 
96 	if (from >= mtd->size)
97 		return -EINVAL;
98 	if (from + ops->len > mtd->size)
99 		return -EINVAL;
100 	res = part->master->read_oob(part->master, from + part->offset, ops);
101 
102 	if (unlikely(res)) {
103 		if (res == -EUCLEAN)
104 			mtd->ecc_stats.corrected++;
105 		if (res == -EBADMSG)
106 			mtd->ecc_stats.failed++;
107 	}
108 	return res;
109 }
110 
111 static int part_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
112 			size_t *retlen, u_char *buf)
113 {
114 	struct mtd_part *part = PART(mtd);
115 	return part->master->read_user_prot_reg (part->master, from,
116 					len, retlen, buf);
117 }
118 
119 static int part_get_user_prot_info (struct mtd_info *mtd,
120 				    struct otp_info *buf, size_t len)
121 {
122 	struct mtd_part *part = PART(mtd);
123 	return part->master->get_user_prot_info (part->master, buf, len);
124 }
125 
126 static int part_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
127 			size_t *retlen, u_char *buf)
128 {
129 	struct mtd_part *part = PART(mtd);
130 	return part->master->read_fact_prot_reg (part->master, from,
131 					len, retlen, buf);
132 }
133 
134 static int part_get_fact_prot_info (struct mtd_info *mtd,
135 				    struct otp_info *buf, size_t len)
136 {
137 	struct mtd_part *part = PART(mtd);
138 	return part->master->get_fact_prot_info (part->master, buf, len);
139 }
140 
141 static int part_write (struct mtd_info *mtd, loff_t to, size_t len,
142 			size_t *retlen, const u_char *buf)
143 {
144 	struct mtd_part *part = PART(mtd);
145 	if (!(mtd->flags & MTD_WRITEABLE))
146 		return -EROFS;
147 	if (to >= mtd->size)
148 		len = 0;
149 	else if (to + len > mtd->size)
150 		len = mtd->size - to;
151 	return part->master->write (part->master, to + part->offset,
152 				    len, retlen, buf);
153 }
154 
155 static int part_write_oob(struct mtd_info *mtd, loff_t to,
156 			 struct mtd_oob_ops *ops)
157 {
158 	struct mtd_part *part = PART(mtd);
159 
160 	if (!(mtd->flags & MTD_WRITEABLE))
161 		return -EROFS;
162 
163 	if (to >= mtd->size)
164 		return -EINVAL;
165 	if (to + ops->len > mtd->size)
166 		return -EINVAL;
167 	return part->master->write_oob(part->master, to + part->offset, ops);
168 }
169 
170 static int part_write_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len,
171 			size_t *retlen, u_char *buf)
172 {
173 	struct mtd_part *part = PART(mtd);
174 	return part->master->write_user_prot_reg (part->master, from,
175 					len, retlen, buf);
176 }
177 
178 static int part_lock_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len)
179 {
180 	struct mtd_part *part = PART(mtd);
181 	return part->master->lock_user_prot_reg (part->master, from, len);
182 }
183 
184 static int part_writev (struct mtd_info *mtd,  const struct kvec *vecs,
185 			 unsigned long count, loff_t to, size_t *retlen)
186 {
187 	struct mtd_part *part = PART(mtd);
188 	if (!(mtd->flags & MTD_WRITEABLE))
189 		return -EROFS;
190 	return part->master->writev (part->master, vecs, count,
191 					to + part->offset, retlen);
192 }
193 
194 static int part_erase (struct mtd_info *mtd, struct erase_info *instr)
195 {
196 	struct mtd_part *part = PART(mtd);
197 	int ret;
198 	if (!(mtd->flags & MTD_WRITEABLE))
199 		return -EROFS;
200 	if (instr->addr >= mtd->size)
201 		return -EINVAL;
202 	instr->addr += part->offset;
203 	ret = part->master->erase(part->master, instr);
204 	return ret;
205 }
206 
207 void mtd_erase_callback(struct erase_info *instr)
208 {
209 	if (instr->mtd->erase == part_erase) {
210 		struct mtd_part *part = PART(instr->mtd);
211 
212 		if (instr->fail_addr != 0xffffffff)
213 			instr->fail_addr -= part->offset;
214 		instr->addr -= part->offset;
215 	}
216 	if (instr->callback)
217 		instr->callback(instr);
218 }
219 EXPORT_SYMBOL_GPL(mtd_erase_callback);
220 
221 static int part_lock (struct mtd_info *mtd, loff_t ofs, size_t len)
222 {
223 	struct mtd_part *part = PART(mtd);
224 	if ((len + ofs) > mtd->size)
225 		return -EINVAL;
226 	return part->master->lock(part->master, ofs + part->offset, len);
227 }
228 
229 static int part_unlock (struct mtd_info *mtd, loff_t ofs, size_t len)
230 {
231 	struct mtd_part *part = PART(mtd);
232 	if ((len + ofs) > mtd->size)
233 		return -EINVAL;
234 	return part->master->unlock(part->master, ofs + part->offset, len);
235 }
236 
237 static void part_sync(struct mtd_info *mtd)
238 {
239 	struct mtd_part *part = PART(mtd);
240 	part->master->sync(part->master);
241 }
242 
243 static int part_suspend(struct mtd_info *mtd)
244 {
245 	struct mtd_part *part = PART(mtd);
246 	return part->master->suspend(part->master);
247 }
248 
249 static void part_resume(struct mtd_info *mtd)
250 {
251 	struct mtd_part *part = PART(mtd);
252 	part->master->resume(part->master);
253 }
254 
255 static int part_block_isbad (struct mtd_info *mtd, loff_t ofs)
256 {
257 	struct mtd_part *part = PART(mtd);
258 	if (ofs >= mtd->size)
259 		return -EINVAL;
260 	ofs += part->offset;
261 	return part->master->block_isbad(part->master, ofs);
262 }
263 
264 static int part_block_markbad (struct mtd_info *mtd, loff_t ofs)
265 {
266 	struct mtd_part *part = PART(mtd);
267 	int res;
268 
269 	if (!(mtd->flags & MTD_WRITEABLE))
270 		return -EROFS;
271 	if (ofs >= mtd->size)
272 		return -EINVAL;
273 	ofs += part->offset;
274 	res = part->master->block_markbad(part->master, ofs);
275 	if (!res)
276 		mtd->ecc_stats.badblocks++;
277 	return res;
278 }
279 
280 /*
281  * This function unregisters and destroy all slave MTD objects which are
282  * attached to the given master MTD object.
283  */
284 
285 int del_mtd_partitions(struct mtd_info *master)
286 {
287 	struct list_head *node;
288 	struct mtd_part *slave;
289 
290 	for (node = mtd_partitions.next;
291 	     node != &mtd_partitions;
292 	     node = node->next) {
293 		slave = list_entry(node, struct mtd_part, list);
294 		if (slave->master == master) {
295 			struct list_head *prev = node->prev;
296 			__list_del(prev, node->next);
297 			if(slave->registered)
298 				del_mtd_device(&slave->mtd);
299 			kfree(slave);
300 			node = prev;
301 		}
302 	}
303 
304 	return 0;
305 }
306 
307 /*
308  * This function, given a master MTD object and a partition table, creates
309  * and registers slave MTD objects which are bound to the master according to
310  * the partition definitions.
311  * (Q: should we register the master MTD object as well?)
312  */
313 
314 int add_mtd_partitions(struct mtd_info *master,
315 		       const struct mtd_partition *parts,
316 		       int nbparts)
317 {
318 	struct mtd_part *slave;
319 	u_int32_t cur_offset = 0;
320 	int i;
321 
322 	printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
323 
324 	for (i = 0; i < nbparts; i++) {
325 
326 		/* allocate the partition structure */
327 		slave = kmalloc (sizeof(*slave), GFP_KERNEL);
328 		if (!slave) {
329 			printk ("memory allocation error while creating partitions for \"%s\"\n",
330 				master->name);
331 			del_mtd_partitions(master);
332 			return -ENOMEM;
333 		}
334 		memset(slave, 0, sizeof(*slave));
335 		list_add(&slave->list, &mtd_partitions);
336 
337 		/* set up the MTD object for this partition */
338 		slave->mtd.type = master->type;
339 		slave->mtd.flags = master->flags & ~parts[i].mask_flags;
340 		slave->mtd.size = parts[i].size;
341 		slave->mtd.writesize = master->writesize;
342 		slave->mtd.oobsize = master->oobsize;
343 		slave->mtd.ecctype = master->ecctype;
344 		slave->mtd.eccsize = master->eccsize;
345 
346 		slave->mtd.name = parts[i].name;
347 		slave->mtd.bank_size = master->bank_size;
348 		slave->mtd.owner = master->owner;
349 
350 		slave->mtd.read = part_read;
351 		slave->mtd.write = part_write;
352 
353 		if(master->point && master->unpoint){
354 			slave->mtd.point = part_point;
355 			slave->mtd.unpoint = part_unpoint;
356 		}
357 
358 		if (master->read_oob)
359 			slave->mtd.read_oob = part_read_oob;
360 		if (master->write_oob)
361 			slave->mtd.write_oob = part_write_oob;
362 		if(master->read_user_prot_reg)
363 			slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
364 		if(master->read_fact_prot_reg)
365 			slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
366 		if(master->write_user_prot_reg)
367 			slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
368 		if(master->lock_user_prot_reg)
369 			slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
370 		if(master->get_user_prot_info)
371 			slave->mtd.get_user_prot_info = part_get_user_prot_info;
372 		if(master->get_fact_prot_info)
373 			slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
374 		if (master->sync)
375 			slave->mtd.sync = part_sync;
376 		if (!i && master->suspend && master->resume) {
377 				slave->mtd.suspend = part_suspend;
378 				slave->mtd.resume = part_resume;
379 		}
380 		if (master->writev)
381 			slave->mtd.writev = part_writev;
382 		if (master->lock)
383 			slave->mtd.lock = part_lock;
384 		if (master->unlock)
385 			slave->mtd.unlock = part_unlock;
386 		if (master->block_isbad)
387 			slave->mtd.block_isbad = part_block_isbad;
388 		if (master->block_markbad)
389 			slave->mtd.block_markbad = part_block_markbad;
390 		slave->mtd.erase = part_erase;
391 		slave->master = master;
392 		slave->offset = parts[i].offset;
393 		slave->index = i;
394 
395 		if (slave->offset == MTDPART_OFS_APPEND)
396 			slave->offset = cur_offset;
397 		if (slave->offset == MTDPART_OFS_NXTBLK) {
398 			slave->offset = cur_offset;
399 			if ((cur_offset % master->erasesize) != 0) {
400 				/* Round up to next erasesize */
401 				slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
402 				printk(KERN_NOTICE "Moving partition %d: "
403 				       "0x%08x -> 0x%08x\n", i,
404 				       cur_offset, slave->offset);
405 			}
406 		}
407 		if (slave->mtd.size == MTDPART_SIZ_FULL)
408 			slave->mtd.size = master->size - slave->offset;
409 		cur_offset = slave->offset + slave->mtd.size;
410 
411 		printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
412 			slave->offset + slave->mtd.size, slave->mtd.name);
413 
414 		/* let's do some sanity checks */
415 		if (slave->offset >= master->size) {
416 				/* let's register it anyway to preserve ordering */
417 			slave->offset = 0;
418 			slave->mtd.size = 0;
419 			printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
420 				parts[i].name);
421 		}
422 		if (slave->offset + slave->mtd.size > master->size) {
423 			slave->mtd.size = master->size - slave->offset;
424 			printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
425 				parts[i].name, master->name, slave->mtd.size);
426 		}
427 		if (master->numeraseregions>1) {
428 			/* Deal with variable erase size stuff */
429 			int i;
430 			struct mtd_erase_region_info *regions = master->eraseregions;
431 
432 			/* Find the first erase regions which is part of this partition. */
433 			for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
434 				;
435 
436 			for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
437 				if (slave->mtd.erasesize < regions[i].erasesize) {
438 					slave->mtd.erasesize = regions[i].erasesize;
439 				}
440 			}
441 		} else {
442 			/* Single erase size */
443 			slave->mtd.erasesize = master->erasesize;
444 		}
445 
446 		if ((slave->mtd.flags & MTD_WRITEABLE) &&
447 		    (slave->offset % slave->mtd.erasesize)) {
448 			/* Doesn't start on a boundary of major erase size */
449 			/* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
450 			slave->mtd.flags &= ~MTD_WRITEABLE;
451 			printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
452 				parts[i].name);
453 		}
454 		if ((slave->mtd.flags & MTD_WRITEABLE) &&
455 		    (slave->mtd.size % slave->mtd.erasesize)) {
456 			slave->mtd.flags &= ~MTD_WRITEABLE;
457 			printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
458 				parts[i].name);
459 		}
460 
461 		slave->mtd.ecclayout = master->ecclayout;
462 		if (master->block_isbad) {
463 			uint32_t offs = 0;
464 
465 			while(offs < slave->mtd.size) {
466 				if (master->block_isbad(master,
467 							offs + slave->offset))
468 					slave->mtd.ecc_stats.badblocks++;
469 				offs += slave->mtd.erasesize;
470 			}
471 		}
472 
473 		if(parts[i].mtdp)
474 		{	/* store the object pointer (caller may or may not register it */
475 			*parts[i].mtdp = &slave->mtd;
476 			slave->registered = 0;
477 		}
478 		else
479 		{
480 			/* register our partition */
481 			add_mtd_device(&slave->mtd);
482 			slave->registered = 1;
483 		}
484 	}
485 
486 	return 0;
487 }
488 
489 EXPORT_SYMBOL(add_mtd_partitions);
490 EXPORT_SYMBOL(del_mtd_partitions);
491 
492 static DEFINE_SPINLOCK(part_parser_lock);
493 static LIST_HEAD(part_parsers);
494 
495 static struct mtd_part_parser *get_partition_parser(const char *name)
496 {
497 	struct list_head *this;
498 	void *ret = NULL;
499 	spin_lock(&part_parser_lock);
500 
501 	list_for_each(this, &part_parsers) {
502 		struct mtd_part_parser *p = list_entry(this, struct mtd_part_parser, list);
503 
504 		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
505 			ret = p;
506 			break;
507 		}
508 	}
509 	spin_unlock(&part_parser_lock);
510 
511 	return ret;
512 }
513 
514 int register_mtd_parser(struct mtd_part_parser *p)
515 {
516 	spin_lock(&part_parser_lock);
517 	list_add(&p->list, &part_parsers);
518 	spin_unlock(&part_parser_lock);
519 
520 	return 0;
521 }
522 
523 int deregister_mtd_parser(struct mtd_part_parser *p)
524 {
525 	spin_lock(&part_parser_lock);
526 	list_del(&p->list);
527 	spin_unlock(&part_parser_lock);
528 	return 0;
529 }
530 
531 int parse_mtd_partitions(struct mtd_info *master, const char **types,
532 			 struct mtd_partition **pparts, unsigned long origin)
533 {
534 	struct mtd_part_parser *parser;
535 	int ret = 0;
536 
537 	for ( ; ret <= 0 && *types; types++) {
538 		parser = get_partition_parser(*types);
539 #ifdef CONFIG_KMOD
540 		if (!parser && !request_module("%s", *types))
541 				parser = get_partition_parser(*types);
542 #endif
543 		if (!parser) {
544 			printk(KERN_NOTICE "%s partition parsing not available\n",
545 			       *types);
546 			continue;
547 		}
548 		ret = (*parser->parse_fn)(master, pparts, origin);
549 		if (ret > 0) {
550 			printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
551 			       ret, parser->name, master->name);
552 		}
553 		put_partition_parser(parser);
554 	}
555 	return ret;
556 }
557 
558 EXPORT_SYMBOL_GPL(parse_mtd_partitions);
559 EXPORT_SYMBOL_GPL(register_mtd_parser);
560 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
561 
562 MODULE_LICENSE("GPL");
563 MODULE_AUTHOR("Nicolas Pitre <nico@cam.org>");
564 MODULE_DESCRIPTION("Generic support for partitioning of MTD devices");
565 
566