xref: /linux/include/linux/mtd/mtd.h (revision f3539c12d8196ce0a1993364d30b3a18908470d1)
1 /*
2  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  *
18  */
19 
20 #ifndef __MTD_MTD_H__
21 #define __MTD_MTD_H__
22 
23 #include <linux/types.h>
24 #include <linux/uio.h>
25 #include <linux/notifier.h>
26 #include <linux/device.h>
27 
28 #include <mtd/mtd-abi.h>
29 
30 #include <asm/div64.h>
31 
32 #define MTD_ERASE_PENDING	0x01
33 #define MTD_ERASING		0x02
34 #define MTD_ERASE_SUSPEND	0x04
35 #define MTD_ERASE_DONE		0x08
36 #define MTD_ERASE_FAILED	0x10
37 
38 #define MTD_FAIL_ADDR_UNKNOWN -1LL
39 
40 /*
41  * If the erase fails, fail_addr might indicate exactly which block failed. If
42  * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
43  * or was not specific to any particular block.
44  */
45 struct erase_info {
46 	struct mtd_info *mtd;
47 	uint64_t addr;
48 	uint64_t len;
49 	uint64_t fail_addr;
50 	u_long time;
51 	u_long retries;
52 	unsigned dev;
53 	unsigned cell;
54 	void (*callback) (struct erase_info *self);
55 	u_long priv;
56 	u_char state;
57 	struct erase_info *next;
58 };
59 
60 struct mtd_erase_region_info {
61 	uint64_t offset;		/* At which this region starts, from the beginning of the MTD */
62 	uint32_t erasesize;		/* For this region */
63 	uint32_t numblocks;		/* Number of blocks of erasesize in this region */
64 	unsigned long *lockmap;		/* If keeping bitmap of locks */
65 };
66 
67 /**
68  * struct mtd_oob_ops - oob operation operands
69  * @mode:	operation mode
70  *
71  * @len:	number of data bytes to write/read
72  *
73  * @retlen:	number of data bytes written/read
74  *
75  * @ooblen:	number of oob bytes to write/read
76  * @oobretlen:	number of oob bytes written/read
77  * @ooboffs:	offset of oob data in the oob area (only relevant when
78  *		mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW)
79  * @datbuf:	data buffer - if NULL only oob data are read/written
80  * @oobbuf:	oob data buffer
81  *
82  * Note, it is allowed to read more than one OOB area at one go, but not write.
83  * The interface assumes that the OOB write requests program only one page's
84  * OOB area.
85  */
86 struct mtd_oob_ops {
87 	unsigned int	mode;
88 	size_t		len;
89 	size_t		retlen;
90 	size_t		ooblen;
91 	size_t		oobretlen;
92 	uint32_t	ooboffs;
93 	uint8_t		*datbuf;
94 	uint8_t		*oobbuf;
95 };
96 
97 #define MTD_MAX_OOBFREE_ENTRIES_LARGE	32
98 #define MTD_MAX_ECCPOS_ENTRIES_LARGE	640
99 /**
100  * struct mtd_oob_region - oob region definition
101  * @offset: region offset
102  * @length: region length
103  *
104  * This structure describes a region of the OOB area, and is used
105  * to retrieve ECC or free bytes sections.
106  * Each section is defined by an offset within the OOB area and a
107  * length.
108  */
109 struct mtd_oob_region {
110 	u32 offset;
111 	u32 length;
112 };
113 
114 /*
115  * struct mtd_ooblayout_ops - NAND OOB layout operations
116  * @ecc: function returning an ECC region in the OOB area.
117  *	 Should return -ERANGE if %section exceeds the total number of
118  *	 ECC sections.
119  * @free: function returning a free region in the OOB area.
120  *	  Should return -ERANGE if %section exceeds the total number of
121  *	  free sections.
122  */
123 struct mtd_ooblayout_ops {
124 	int (*ecc)(struct mtd_info *mtd, int section,
125 		   struct mtd_oob_region *oobecc);
126 	int (*free)(struct mtd_info *mtd, int section,
127 		    struct mtd_oob_region *oobfree);
128 };
129 
130 struct module;	/* only needed for owner field in mtd_info */
131 
132 struct mtd_info {
133 	u_char type;
134 	uint32_t flags;
135 	uint64_t size;	 // Total size of the MTD
136 
137 	/* "Major" erase size for the device. Naïve users may take this
138 	 * to be the only erase size available, or may use the more detailed
139 	 * information below if they desire
140 	 */
141 	uint32_t erasesize;
142 	/* Minimal writable flash unit size. In case of NOR flash it is 1 (even
143 	 * though individual bits can be cleared), in case of NAND flash it is
144 	 * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR
145 	 * it is of ECC block size, etc. It is illegal to have writesize = 0.
146 	 * Any driver registering a struct mtd_info must ensure a writesize of
147 	 * 1 or larger.
148 	 */
149 	uint32_t writesize;
150 
151 	/*
152 	 * Size of the write buffer used by the MTD. MTD devices having a write
153 	 * buffer can write multiple writesize chunks at a time. E.g. while
154 	 * writing 4 * writesize bytes to a device with 2 * writesize bytes
155 	 * buffer the MTD driver can (but doesn't have to) do 2 writesize
156 	 * operations, but not 4. Currently, all NANDs have writebufsize
157 	 * equivalent to writesize (NAND page size). Some NOR flashes do have
158 	 * writebufsize greater than writesize.
159 	 */
160 	uint32_t writebufsize;
161 
162 	uint32_t oobsize;   // Amount of OOB data per block (e.g. 16)
163 	uint32_t oobavail;  // Available OOB bytes per block
164 
165 	/*
166 	 * If erasesize is a power of 2 then the shift is stored in
167 	 * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize.
168 	 */
169 	unsigned int erasesize_shift;
170 	unsigned int writesize_shift;
171 	/* Masks based on erasesize_shift and writesize_shift */
172 	unsigned int erasesize_mask;
173 	unsigned int writesize_mask;
174 
175 	/*
176 	 * read ops return -EUCLEAN if max number of bitflips corrected on any
177 	 * one region comprising an ecc step equals or exceeds this value.
178 	 * Settable by driver, else defaults to ecc_strength.  User can override
179 	 * in sysfs.  N.B. The meaning of the -EUCLEAN return code has changed;
180 	 * see Documentation/ABI/testing/sysfs-class-mtd for more detail.
181 	 */
182 	unsigned int bitflip_threshold;
183 
184 	// Kernel-only stuff starts here.
185 	const char *name;
186 	int index;
187 
188 	/* OOB layout description */
189 	const struct mtd_ooblayout_ops *ooblayout;
190 
191 	/* the ecc step size. */
192 	unsigned int ecc_step_size;
193 
194 	/* max number of correctible bit errors per ecc step */
195 	unsigned int ecc_strength;
196 
197 	/* Data for variable erase regions. If numeraseregions is zero,
198 	 * it means that the whole device has erasesize as given above.
199 	 */
200 	int numeraseregions;
201 	struct mtd_erase_region_info *eraseregions;
202 
203 	/*
204 	 * Do not call via these pointers, use corresponding mtd_*()
205 	 * wrappers instead.
206 	 */
207 	int (*_erase) (struct mtd_info *mtd, struct erase_info *instr);
208 	int (*_point) (struct mtd_info *mtd, loff_t from, size_t len,
209 		       size_t *retlen, void **virt, resource_size_t *phys);
210 	int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
211 	unsigned long (*_get_unmapped_area) (struct mtd_info *mtd,
212 					     unsigned long len,
213 					     unsigned long offset,
214 					     unsigned long flags);
215 	int (*_read) (struct mtd_info *mtd, loff_t from, size_t len,
216 		      size_t *retlen, u_char *buf);
217 	int (*_write) (struct mtd_info *mtd, loff_t to, size_t len,
218 		       size_t *retlen, const u_char *buf);
219 	int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len,
220 			     size_t *retlen, const u_char *buf);
221 	int (*_read_oob) (struct mtd_info *mtd, loff_t from,
222 			  struct mtd_oob_ops *ops);
223 	int (*_write_oob) (struct mtd_info *mtd, loff_t to,
224 			   struct mtd_oob_ops *ops);
225 	int (*_get_fact_prot_info) (struct mtd_info *mtd, size_t len,
226 				    size_t *retlen, struct otp_info *buf);
227 	int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from,
228 				    size_t len, size_t *retlen, u_char *buf);
229 	int (*_get_user_prot_info) (struct mtd_info *mtd, size_t len,
230 				    size_t *retlen, struct otp_info *buf);
231 	int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from,
232 				    size_t len, size_t *retlen, u_char *buf);
233 	int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to,
234 				     size_t len, size_t *retlen, u_char *buf);
235 	int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from,
236 				    size_t len);
237 	int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs,
238 			unsigned long count, loff_t to, size_t *retlen);
239 	void (*_sync) (struct mtd_info *mtd);
240 	int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
241 	int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
242 	int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len);
243 	int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs);
244 	int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs);
245 	int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs);
246 	int (*_suspend) (struct mtd_info *mtd);
247 	void (*_resume) (struct mtd_info *mtd);
248 	void (*_reboot) (struct mtd_info *mtd);
249 	/*
250 	 * If the driver is something smart, like UBI, it may need to maintain
251 	 * its own reference counting. The below functions are only for driver.
252 	 */
253 	int (*_get_device) (struct mtd_info *mtd);
254 	void (*_put_device) (struct mtd_info *mtd);
255 
256 	/* Backing device capabilities for this device
257 	 * - provides mmap capabilities
258 	 */
259 	struct backing_dev_info *backing_dev_info;
260 
261 	struct notifier_block reboot_notifier;  /* default mode before reboot */
262 
263 	/* ECC status information */
264 	struct mtd_ecc_stats ecc_stats;
265 	/* Subpage shift (NAND) */
266 	int subpage_sft;
267 
268 	void *priv;
269 
270 	struct module *owner;
271 	struct device dev;
272 	int usecount;
273 };
274 
275 int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
276 		      struct mtd_oob_region *oobecc);
277 int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
278 				 int *section,
279 				 struct mtd_oob_region *oobregion);
280 int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
281 			       const u8 *oobbuf, int start, int nbytes);
282 int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
283 			       u8 *oobbuf, int start, int nbytes);
284 int mtd_ooblayout_free(struct mtd_info *mtd, int section,
285 		       struct mtd_oob_region *oobfree);
286 int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
287 				const u8 *oobbuf, int start, int nbytes);
288 int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
289 				u8 *oobbuf, int start, int nbytes);
290 int mtd_ooblayout_count_freebytes(struct mtd_info *mtd);
291 int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd);
292 
293 static inline void mtd_set_ooblayout(struct mtd_info *mtd,
294 				     const struct mtd_ooblayout_ops *ooblayout)
295 {
296 	mtd->ooblayout = ooblayout;
297 }
298 
299 static inline void mtd_set_of_node(struct mtd_info *mtd,
300 				   struct device_node *np)
301 {
302 	mtd->dev.of_node = np;
303 }
304 
305 static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
306 {
307 	return mtd->dev.of_node;
308 }
309 
310 static inline int mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
311 {
312 	return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
313 }
314 
315 int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
316 int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
317 	      void **virt, resource_size_t *phys);
318 int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
319 unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
320 				    unsigned long offset, unsigned long flags);
321 int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
322 	     u_char *buf);
323 int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
324 	      const u_char *buf);
325 int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
326 		    const u_char *buf);
327 
328 int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
329 int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops);
330 
331 int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
332 			   struct otp_info *buf);
333 int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
334 			   size_t *retlen, u_char *buf);
335 int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
336 			   struct otp_info *buf);
337 int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
338 			   size_t *retlen, u_char *buf);
339 int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
340 			    size_t *retlen, u_char *buf);
341 int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len);
342 
343 int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
344 	       unsigned long count, loff_t to, size_t *retlen);
345 
346 static inline void mtd_sync(struct mtd_info *mtd)
347 {
348 	if (mtd->_sync)
349 		mtd->_sync(mtd);
350 }
351 
352 int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
353 int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
354 int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
355 int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs);
356 int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs);
357 int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs);
358 
359 static inline int mtd_suspend(struct mtd_info *mtd)
360 {
361 	return mtd->_suspend ? mtd->_suspend(mtd) : 0;
362 }
363 
364 static inline void mtd_resume(struct mtd_info *mtd)
365 {
366 	if (mtd->_resume)
367 		mtd->_resume(mtd);
368 }
369 
370 static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
371 {
372 	if (mtd->erasesize_shift)
373 		return sz >> mtd->erasesize_shift;
374 	do_div(sz, mtd->erasesize);
375 	return sz;
376 }
377 
378 static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd)
379 {
380 	if (mtd->erasesize_shift)
381 		return sz & mtd->erasesize_mask;
382 	return do_div(sz, mtd->erasesize);
383 }
384 
385 static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd)
386 {
387 	if (mtd->writesize_shift)
388 		return sz >> mtd->writesize_shift;
389 	do_div(sz, mtd->writesize);
390 	return sz;
391 }
392 
393 static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd)
394 {
395 	if (mtd->writesize_shift)
396 		return sz & mtd->writesize_mask;
397 	return do_div(sz, mtd->writesize);
398 }
399 
400 static inline int mtd_has_oob(const struct mtd_info *mtd)
401 {
402 	return mtd->_read_oob && mtd->_write_oob;
403 }
404 
405 static inline int mtd_type_is_nand(const struct mtd_info *mtd)
406 {
407 	return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH;
408 }
409 
410 static inline int mtd_can_have_bb(const struct mtd_info *mtd)
411 {
412 	return !!mtd->_block_isbad;
413 }
414 
415 	/* Kernel-side ioctl definitions */
416 
417 struct mtd_partition;
418 struct mtd_part_parser_data;
419 
420 extern int mtd_device_parse_register(struct mtd_info *mtd,
421 				     const char * const *part_probe_types,
422 				     struct mtd_part_parser_data *parser_data,
423 				     const struct mtd_partition *defparts,
424 				     int defnr_parts);
425 #define mtd_device_register(master, parts, nr_parts)	\
426 	mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
427 extern int mtd_device_unregister(struct mtd_info *master);
428 extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
429 extern int __get_mtd_device(struct mtd_info *mtd);
430 extern void __put_mtd_device(struct mtd_info *mtd);
431 extern struct mtd_info *get_mtd_device_nm(const char *name);
432 extern void put_mtd_device(struct mtd_info *mtd);
433 
434 
435 struct mtd_notifier {
436 	void (*add)(struct mtd_info *mtd);
437 	void (*remove)(struct mtd_info *mtd);
438 	struct list_head list;
439 };
440 
441 
442 extern void register_mtd_user (struct mtd_notifier *new);
443 extern int unregister_mtd_user (struct mtd_notifier *old);
444 void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size);
445 
446 void mtd_erase_callback(struct erase_info *instr);
447 
448 static inline int mtd_is_bitflip(int err) {
449 	return err == -EUCLEAN;
450 }
451 
452 static inline int mtd_is_eccerr(int err) {
453 	return err == -EBADMSG;
454 }
455 
456 static inline int mtd_is_bitflip_or_eccerr(int err) {
457 	return mtd_is_bitflip(err) || mtd_is_eccerr(err);
458 }
459 
460 unsigned mtd_mmap_capabilities(struct mtd_info *mtd);
461 
462 #endif /* __MTD_MTD_H__ */
463