xref: /linux/fs/ubifs/io.c (revision 5f4123be3cdb1dbd77fa9d6d2bb96bb9689a0a19)
1 /*
2  * This file is part of UBIFS.
3  *
4  * Copyright (C) 2006-2008 Nokia Corporation.
5  * Copyright (C) 2006, 2007 University of Szeged, Hungary
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published by
9  * the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program; if not, write to the Free Software Foundation, Inc., 51
18  * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19  *
20  * Authors: Artem Bityutskiy (Битюцкий Артём)
21  *          Adrian Hunter
22  *          Zoltan Sogor
23  */
24 
25 /*
26  * This file implements UBIFS I/O subsystem which provides various I/O-related
27  * helper functions (reading/writing/checking/validating nodes) and implements
28  * write-buffering support. Write buffers help to save space which otherwise
29  * would have been wasted for padding to the nearest minimal I/O unit boundary.
30  * Instead, data first goes to the write-buffer and is flushed when the
31  * buffer is full or when it is not used for some time (by timer). This is
32  * similarto the mechanism is used by JFFS2.
33  *
34  * Write-buffers are defined by 'struct ubifs_wbuf' objects and protected by
35  * mutexes defined inside these objects. Since sometimes upper-level code
36  * has to lock the write-buffer (e.g. journal space reservation code), many
37  * functions related to write-buffers have "nolock" suffix which means that the
38  * caller has to lock the write-buffer before calling this function.
39  *
40  * UBIFS stores nodes at 64 bit-aligned addresses. If the node length is not
41  * aligned, UBIFS starts the next node from the aligned address, and the padded
42  * bytes may contain any rubbish. In other words, UBIFS does not put padding
43  * bytes in those small gaps. Common headers of nodes store real node lengths,
44  * not aligned lengths. Indexing nodes also store real lengths in branches.
45  *
46  * UBIFS uses padding when it pads to the next min. I/O unit. In this case it
47  * uses padding nodes or padding bytes, if the padding node does not fit.
48  *
49  * All UBIFS nodes are protected by CRC checksums and UBIFS checks all nodes
50  * every time they are read from the flash media.
51  */
52 
53 #include <linux/crc32.h>
54 #include "ubifs.h"
55 
56 /**
57  * ubifs_ro_mode - switch UBIFS to read read-only mode.
58  * @c: UBIFS file-system description object
59  * @err: error code which is the reason of switching to R/O mode
60  */
61 void ubifs_ro_mode(struct ubifs_info *c, int err)
62 {
63 	if (!c->ro_media) {
64 		c->ro_media = 1;
65 		ubifs_warn("switched to read-only mode, error %d", err);
66 		dbg_dump_stack();
67 	}
68 }
69 
70 /**
71  * ubifs_check_node - check node.
72  * @c: UBIFS file-system description object
73  * @buf: node to check
74  * @lnum: logical eraseblock number
75  * @offs: offset within the logical eraseblock
76  * @quiet: print no messages
77  *
78  * This function checks node magic number and CRC checksum. This function also
79  * validates node length to prevent UBIFS from becoming crazy when an attacker
80  * feeds it a file-system image with incorrect nodes. For example, too large
81  * node length in the common header could cause UBIFS to read memory outside of
82  * allocated buffer when checking the CRC checksum.
83  *
84  * This function returns zero in case of success %-EUCLEAN in case of bad CRC
85  * or magic.
86  */
87 int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
88 		     int offs, int quiet)
89 {
90 	int err = -EINVAL, type, node_len;
91 	uint32_t crc, node_crc, magic;
92 	const struct ubifs_ch *ch = buf;
93 
94 	ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
95 	ubifs_assert(!(offs & 7) && offs < c->leb_size);
96 
97 	magic = le32_to_cpu(ch->magic);
98 	if (magic != UBIFS_NODE_MAGIC) {
99 		if (!quiet)
100 			ubifs_err("bad magic %#08x, expected %#08x",
101 				  magic, UBIFS_NODE_MAGIC);
102 		err = -EUCLEAN;
103 		goto out;
104 	}
105 
106 	type = ch->node_type;
107 	if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) {
108 		if (!quiet)
109 			ubifs_err("bad node type %d", type);
110 		goto out;
111 	}
112 
113 	node_len = le32_to_cpu(ch->len);
114 	if (node_len + offs > c->leb_size)
115 		goto out_len;
116 
117 	if (c->ranges[type].max_len == 0) {
118 		if (node_len != c->ranges[type].len)
119 			goto out_len;
120 	} else if (node_len < c->ranges[type].min_len ||
121 		   node_len > c->ranges[type].max_len)
122 		goto out_len;
123 
124 	crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
125 	node_crc = le32_to_cpu(ch->crc);
126 	if (crc != node_crc) {
127 		if (!quiet)
128 			ubifs_err("bad CRC: calculated %#08x, read %#08x",
129 				  crc, node_crc);
130 		err = -EUCLEAN;
131 		goto out;
132 	}
133 
134 	return 0;
135 
136 out_len:
137 	if (!quiet)
138 		ubifs_err("bad node length %d", node_len);
139 out:
140 	if (!quiet) {
141 		ubifs_err("bad node at LEB %d:%d", lnum, offs);
142 		dbg_dump_node(c, buf);
143 		dbg_dump_stack();
144 	}
145 	return err;
146 }
147 
148 /**
149  * ubifs_pad - pad flash space.
150  * @c: UBIFS file-system description object
151  * @buf: buffer to put padding to
152  * @pad: how many bytes to pad
153  *
154  * The flash media obliges us to write only in chunks of %c->min_io_size and
155  * when we have to write less data we add padding node to the write-buffer and
156  * pad it to the next minimal I/O unit's boundary. Padding nodes help when the
157  * media is being scanned. If the amount of wasted space is not enough to fit a
158  * padding node which takes %UBIFS_PAD_NODE_SZ bytes, we write padding bytes
159  * pattern (%UBIFS_PADDING_BYTE).
160  *
161  * Padding nodes are also used to fill gaps when the "commit-in-gaps" method is
162  * used.
163  */
164 void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
165 {
166 	uint32_t crc;
167 
168 	ubifs_assert(pad >= 0 && !(pad & 7));
169 
170 	if (pad >= UBIFS_PAD_NODE_SZ) {
171 		struct ubifs_ch *ch = buf;
172 		struct ubifs_pad_node *pad_node = buf;
173 
174 		ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
175 		ch->node_type = UBIFS_PAD_NODE;
176 		ch->group_type = UBIFS_NO_NODE_GROUP;
177 		ch->padding[0] = ch->padding[1] = 0;
178 		ch->sqnum = 0;
179 		ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ);
180 		pad -= UBIFS_PAD_NODE_SZ;
181 		pad_node->pad_len = cpu_to_le32(pad);
182 		crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8);
183 		ch->crc = cpu_to_le32(crc);
184 		memset(buf + UBIFS_PAD_NODE_SZ, 0, pad);
185 	} else if (pad > 0)
186 		/* Too little space, padding node won't fit */
187 		memset(buf, UBIFS_PADDING_BYTE, pad);
188 }
189 
190 /**
191  * next_sqnum - get next sequence number.
192  * @c: UBIFS file-system description object
193  */
194 static unsigned long long next_sqnum(struct ubifs_info *c)
195 {
196 	unsigned long long sqnum;
197 
198 	spin_lock(&c->cnt_lock);
199 	sqnum = ++c->max_sqnum;
200 	spin_unlock(&c->cnt_lock);
201 
202 	if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) {
203 		if (sqnum >= SQNUM_WATERMARK) {
204 			ubifs_err("sequence number overflow %llu, end of life",
205 				  sqnum);
206 			ubifs_ro_mode(c, -EINVAL);
207 		}
208 		ubifs_warn("running out of sequence numbers, end of life soon");
209 	}
210 
211 	return sqnum;
212 }
213 
214 /**
215  * ubifs_prepare_node - prepare node to be written to flash.
216  * @c: UBIFS file-system description object
217  * @node: the node to pad
218  * @len: node length
219  * @pad: if the buffer has to be padded
220  *
221  * This function prepares node at @node to be written to the media - it
222  * calculates node CRC, fills the common header, and adds proper padding up to
223  * the next minimum I/O unit if @pad is not zero.
224  */
225 void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
226 {
227 	uint32_t crc;
228 	struct ubifs_ch *ch = node;
229 	unsigned long long sqnum = next_sqnum(c);
230 
231 	ubifs_assert(len >= UBIFS_CH_SZ);
232 
233 	ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
234 	ch->len = cpu_to_le32(len);
235 	ch->group_type = UBIFS_NO_NODE_GROUP;
236 	ch->sqnum = cpu_to_le64(sqnum);
237 	ch->padding[0] = ch->padding[1] = 0;
238 	crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
239 	ch->crc = cpu_to_le32(crc);
240 
241 	if (pad) {
242 		len = ALIGN(len, 8);
243 		pad = ALIGN(len, c->min_io_size) - len;
244 		ubifs_pad(c, node + len, pad);
245 	}
246 }
247 
248 /**
249  * ubifs_prep_grp_node - prepare node of a group to be written to flash.
250  * @c: UBIFS file-system description object
251  * @node: the node to pad
252  * @len: node length
253  * @last: indicates the last node of the group
254  *
255  * This function prepares node at @node to be written to the media - it
256  * calculates node CRC and fills the common header.
257  */
258 void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last)
259 {
260 	uint32_t crc;
261 	struct ubifs_ch *ch = node;
262 	unsigned long long sqnum = next_sqnum(c);
263 
264 	ubifs_assert(len >= UBIFS_CH_SZ);
265 
266 	ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC);
267 	ch->len = cpu_to_le32(len);
268 	if (last)
269 		ch->group_type = UBIFS_LAST_OF_NODE_GROUP;
270 	else
271 		ch->group_type = UBIFS_IN_NODE_GROUP;
272 	ch->sqnum = cpu_to_le64(sqnum);
273 	ch->padding[0] = ch->padding[1] = 0;
274 	crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
275 	ch->crc = cpu_to_le32(crc);
276 }
277 
278 /**
279  * wbuf_timer_callback - write-buffer timer callback function.
280  * @data: timer data (write-buffer descriptor)
281  *
282  * This function is called when the write-buffer timer expires.
283  */
284 static void wbuf_timer_callback_nolock(unsigned long data)
285 {
286 	struct ubifs_wbuf *wbuf = (struct ubifs_wbuf *)data;
287 
288 	wbuf->need_sync = 1;
289 	wbuf->c->need_wbuf_sync = 1;
290 	ubifs_wake_up_bgt(wbuf->c);
291 }
292 
293 /**
294  * new_wbuf_timer - start new write-buffer timer.
295  * @wbuf: write-buffer descriptor
296  */
297 static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
298 {
299 	ubifs_assert(!timer_pending(&wbuf->timer));
300 
301 	if (!wbuf->timeout)
302 		return;
303 
304 	wbuf->timer.expires = jiffies + wbuf->timeout;
305 	add_timer(&wbuf->timer);
306 }
307 
308 /**
309  * cancel_wbuf_timer - cancel write-buffer timer.
310  * @wbuf: write-buffer descriptor
311  */
312 static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
313 {
314 	/*
315 	 * If the syncer is waiting for the lock (from the background thread's
316 	 * context) and another task is changing write-buffer then the syncing
317 	 * should be canceled.
318 	 */
319 	wbuf->need_sync = 0;
320 	del_timer(&wbuf->timer);
321 }
322 
323 /**
324  * ubifs_wbuf_sync_nolock - synchronize write-buffer.
325  * @wbuf: write-buffer to synchronize
326  *
327  * This function synchronizes write-buffer @buf and returns zero in case of
328  * success or a negative error code in case of failure.
329  */
330 int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
331 {
332 	struct ubifs_info *c = wbuf->c;
333 	int err, dirt;
334 
335 	cancel_wbuf_timer_nolock(wbuf);
336 	if (!wbuf->used || wbuf->lnum == -1)
337 		/* Write-buffer is empty or not seeked */
338 		return 0;
339 
340 	dbg_io("LEB %d:%d, %d bytes",
341 	       wbuf->lnum, wbuf->offs, wbuf->used);
342 	ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
343 	ubifs_assert(!(wbuf->avail & 7));
344 	ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size);
345 
346 	if (c->ro_media)
347 		return -EROFS;
348 
349 	ubifs_pad(c, wbuf->buf + wbuf->used, wbuf->avail);
350 	err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
351 			    c->min_io_size, wbuf->dtype);
352 	if (err) {
353 		ubifs_err("cannot write %d bytes to LEB %d:%d",
354 			  c->min_io_size, wbuf->lnum, wbuf->offs);
355 		dbg_dump_stack();
356 		return err;
357 	}
358 
359 	dirt = wbuf->avail;
360 
361 	spin_lock(&wbuf->lock);
362 	wbuf->offs += c->min_io_size;
363 	wbuf->avail = c->min_io_size;
364 	wbuf->used = 0;
365 	wbuf->next_ino = 0;
366 	spin_unlock(&wbuf->lock);
367 
368 	if (wbuf->sync_callback)
369 		err = wbuf->sync_callback(c, wbuf->lnum,
370 					  c->leb_size - wbuf->offs, dirt);
371 	return err;
372 }
373 
374 /**
375  * ubifs_wbuf_seek_nolock - seek write-buffer.
376  * @wbuf: write-buffer
377  * @lnum: logical eraseblock number to seek to
378  * @offs: logical eraseblock offset to seek to
379  * @dtype: data type
380  *
381  * This function targets the write buffer to logical eraseblock @lnum:@offs.
382  * The write-buffer is synchronized if it is not empty. Returns zero in case of
383  * success and a negative error code in case of failure.
384  */
385 int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
386 			   int dtype)
387 {
388 	const struct ubifs_info *c = wbuf->c;
389 
390 	dbg_io("LEB %d:%d", lnum, offs);
391 	ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
392 	ubifs_assert(offs >= 0 && offs <= c->leb_size);
393 	ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
394 	ubifs_assert(lnum != wbuf->lnum);
395 
396 	if (wbuf->used > 0) {
397 		int err = ubifs_wbuf_sync_nolock(wbuf);
398 
399 		if (err)
400 			return err;
401 	}
402 
403 	spin_lock(&wbuf->lock);
404 	wbuf->lnum = lnum;
405 	wbuf->offs = offs;
406 	wbuf->avail = c->min_io_size;
407 	wbuf->used = 0;
408 	spin_unlock(&wbuf->lock);
409 	wbuf->dtype = dtype;
410 
411 	return 0;
412 }
413 
414 /**
415  * ubifs_bg_wbufs_sync - synchronize write-buffers.
416  * @c: UBIFS file-system description object
417  *
418  * This function is called by background thread to synchronize write-buffers.
419  * Returns zero in case of success and a negative error code in case of
420  * failure.
421  */
422 int ubifs_bg_wbufs_sync(struct ubifs_info *c)
423 {
424 	int err, i;
425 
426 	if (!c->need_wbuf_sync)
427 		return 0;
428 	c->need_wbuf_sync = 0;
429 
430 	if (c->ro_media) {
431 		err = -EROFS;
432 		goto out_timers;
433 	}
434 
435 	dbg_io("synchronize");
436 	for (i = 0; i < c->jhead_cnt; i++) {
437 		struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
438 
439 		cond_resched();
440 
441 		/*
442 		 * If the mutex is locked then wbuf is being changed, so
443 		 * synchronization is not necessary.
444 		 */
445 		if (mutex_is_locked(&wbuf->io_mutex))
446 			continue;
447 
448 		mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
449 		if (!wbuf->need_sync) {
450 			mutex_unlock(&wbuf->io_mutex);
451 			continue;
452 		}
453 
454 		err = ubifs_wbuf_sync_nolock(wbuf);
455 		mutex_unlock(&wbuf->io_mutex);
456 		if (err) {
457 			ubifs_err("cannot sync write-buffer, error %d", err);
458 			ubifs_ro_mode(c, err);
459 			goto out_timers;
460 		}
461 	}
462 
463 	return 0;
464 
465 out_timers:
466 	/* Cancel all timers to prevent repeated errors */
467 	for (i = 0; i < c->jhead_cnt; i++) {
468 		struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
469 
470 		mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
471 		cancel_wbuf_timer_nolock(wbuf);
472 		mutex_unlock(&wbuf->io_mutex);
473 	}
474 	return err;
475 }
476 
477 /**
478  * ubifs_wbuf_write_nolock - write data to flash via write-buffer.
479  * @wbuf: write-buffer
480  * @buf: node to write
481  * @len: node length
482  *
483  * This function writes data to flash via write-buffer @wbuf. This means that
484  * the last piece of the node won't reach the flash media immediately if it
485  * does not take whole minimal I/O unit. Instead, the node will sit in RAM
486  * until the write-buffer is synchronized (e.g., by timer).
487  *
488  * This function returns zero in case of success and a negative error code in
489  * case of failure. If the node cannot be written because there is no more
490  * space in this logical eraseblock, %-ENOSPC is returned.
491  */
492 int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
493 {
494 	struct ubifs_info *c = wbuf->c;
495 	int err, written, n, aligned_len = ALIGN(len, 8), offs;
496 
497 	dbg_io("%d bytes (%s) to wbuf at LEB %d:%d", len,
498 	       dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->lnum,
499 	       wbuf->offs + wbuf->used);
500 	ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
501 	ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
502 	ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
503 	ubifs_assert(wbuf->avail > 0 && wbuf->avail <= c->min_io_size);
504 	ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
505 
506 	if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) {
507 		err = -ENOSPC;
508 		goto out;
509 	}
510 
511 	cancel_wbuf_timer_nolock(wbuf);
512 
513 	if (c->ro_media)
514 		return -EROFS;
515 
516 	if (aligned_len <= wbuf->avail) {
517 		/*
518 		 * The node is not very large and fits entirely within
519 		 * write-buffer.
520 		 */
521 		memcpy(wbuf->buf + wbuf->used, buf, len);
522 
523 		if (aligned_len == wbuf->avail) {
524 			dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum,
525 				wbuf->offs);
526 			err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf,
527 					    wbuf->offs, c->min_io_size,
528 					    wbuf->dtype);
529 			if (err)
530 				goto out;
531 
532 			spin_lock(&wbuf->lock);
533 			wbuf->offs += c->min_io_size;
534 			wbuf->avail = c->min_io_size;
535 			wbuf->used = 0;
536 			wbuf->next_ino = 0;
537 			spin_unlock(&wbuf->lock);
538 		} else {
539 			spin_lock(&wbuf->lock);
540 			wbuf->avail -= aligned_len;
541 			wbuf->used += aligned_len;
542 			spin_unlock(&wbuf->lock);
543 		}
544 
545 		goto exit;
546 	}
547 
548 	/*
549 	 * The node is large enough and does not fit entirely within current
550 	 * minimal I/O unit. We have to fill and flush write-buffer and switch
551 	 * to the next min. I/O unit.
552 	 */
553 	dbg_io("flush wbuf to LEB %d:%d", wbuf->lnum, wbuf->offs);
554 	memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
555 	err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
556 			    c->min_io_size, wbuf->dtype);
557 	if (err)
558 		goto out;
559 
560 	offs = wbuf->offs + c->min_io_size;
561 	len -= wbuf->avail;
562 	aligned_len -= wbuf->avail;
563 	written = wbuf->avail;
564 
565 	/*
566 	 * The remaining data may take more whole min. I/O units, so write the
567 	 * remains multiple to min. I/O unit size directly to the flash media.
568 	 * We align node length to 8-byte boundary because we anyway flash wbuf
569 	 * if the remaining space is less than 8 bytes.
570 	 */
571 	n = aligned_len >> c->min_io_shift;
572 	if (n) {
573 		n <<= c->min_io_shift;
574 		dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, offs);
575 		err = ubi_leb_write(c->ubi, wbuf->lnum, buf + written, offs, n,
576 				    wbuf->dtype);
577 		if (err)
578 			goto out;
579 		offs += n;
580 		aligned_len -= n;
581 		len -= n;
582 		written += n;
583 	}
584 
585 	spin_lock(&wbuf->lock);
586 	if (aligned_len)
587 		/*
588 		 * And now we have what's left and what does not take whole
589 		 * min. I/O unit, so write it to the write-buffer and we are
590 		 * done.
591 		 */
592 		memcpy(wbuf->buf, buf + written, len);
593 
594 	wbuf->offs = offs;
595 	wbuf->used = aligned_len;
596 	wbuf->avail = c->min_io_size - aligned_len;
597 	wbuf->next_ino = 0;
598 	spin_unlock(&wbuf->lock);
599 
600 exit:
601 	if (wbuf->sync_callback) {
602 		int free = c->leb_size - wbuf->offs - wbuf->used;
603 
604 		err = wbuf->sync_callback(c, wbuf->lnum, free, 0);
605 		if (err)
606 			goto out;
607 	}
608 
609 	if (wbuf->used)
610 		new_wbuf_timer_nolock(wbuf);
611 
612 	return 0;
613 
614 out:
615 	ubifs_err("cannot write %d bytes to LEB %d:%d, error %d",
616 		  len, wbuf->lnum, wbuf->offs, err);
617 	dbg_dump_node(c, buf);
618 	dbg_dump_stack();
619 	dbg_dump_leb(c, wbuf->lnum);
620 	return err;
621 }
622 
623 /**
624  * ubifs_write_node - write node to the media.
625  * @c: UBIFS file-system description object
626  * @buf: the node to write
627  * @len: node length
628  * @lnum: logical eraseblock number
629  * @offs: offset within the logical eraseblock
630  * @dtype: node life-time hint (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN)
631  *
632  * This function automatically fills node magic number, assigns sequence
633  * number, and calculates node CRC checksum. The length of the @buf buffer has
634  * to be aligned to the minimal I/O unit size. This function automatically
635  * appends padding node and padding bytes if needed. Returns zero in case of
636  * success and a negative error code in case of failure.
637  */
638 int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
639 		     int offs, int dtype)
640 {
641 	int err, buf_len = ALIGN(len, c->min_io_size);
642 
643 	dbg_io("LEB %d:%d, %s, length %d (aligned %d)",
644 	       lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len,
645 	       buf_len);
646 	ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
647 	ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
648 
649 	if (c->ro_media)
650 		return -EROFS;
651 
652 	ubifs_prepare_node(c, buf, len, 1);
653 	err = ubi_leb_write(c->ubi, lnum, buf, offs, buf_len, dtype);
654 	if (err) {
655 		ubifs_err("cannot write %d bytes to LEB %d:%d, error %d",
656 			  buf_len, lnum, offs, err);
657 		dbg_dump_node(c, buf);
658 		dbg_dump_stack();
659 	}
660 
661 	return err;
662 }
663 
664 /**
665  * ubifs_read_node_wbuf - read node from the media or write-buffer.
666  * @wbuf: wbuf to check for un-written data
667  * @buf: buffer to read to
668  * @type: node type
669  * @len: node length
670  * @lnum: logical eraseblock number
671  * @offs: offset within the logical eraseblock
672  *
673  * This function reads a node of known type and length, checks it and stores
674  * in @buf. If the node partially or fully sits in the write-buffer, this
675  * function takes data from the buffer, otherwise it reads the flash media.
676  * Returns zero in case of success, %-EUCLEAN if CRC mismatched and a negative
677  * error code in case of failure.
678  */
679 int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
680 			 int lnum, int offs)
681 {
682 	const struct ubifs_info *c = wbuf->c;
683 	int err, rlen, overlap;
684 	struct ubifs_ch *ch = buf;
685 
686 	dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
687 	ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
688 	ubifs_assert(!(offs & 7) && offs < c->leb_size);
689 	ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
690 
691 	spin_lock(&wbuf->lock);
692 	overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
693 	if (!overlap) {
694 		/* We may safely unlock the write-buffer and read the data */
695 		spin_unlock(&wbuf->lock);
696 		return ubifs_read_node(c, buf, type, len, lnum, offs);
697 	}
698 
699 	/* Don't read under wbuf */
700 	rlen = wbuf->offs - offs;
701 	if (rlen < 0)
702 		rlen = 0;
703 
704 	/* Copy the rest from the write-buffer */
705 	memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
706 	spin_unlock(&wbuf->lock);
707 
708 	if (rlen > 0) {
709 		/* Read everything that goes before write-buffer */
710 		err = ubi_read(c->ubi, lnum, buf, offs, rlen);
711 		if (err && err != -EBADMSG) {
712 			ubifs_err("failed to read node %d from LEB %d:%d, "
713 				  "error %d", type, lnum, offs, err);
714 			dbg_dump_stack();
715 			return err;
716 		}
717 	}
718 
719 	if (type != ch->node_type) {
720 		ubifs_err("bad node type (%d but expected %d)",
721 			  ch->node_type, type);
722 		goto out;
723 	}
724 
725 	err = ubifs_check_node(c, buf, lnum, offs, 0);
726 	if (err) {
727 		ubifs_err("expected node type %d", type);
728 		return err;
729 	}
730 
731 	rlen = le32_to_cpu(ch->len);
732 	if (rlen != len) {
733 		ubifs_err("bad node length %d, expected %d", rlen, len);
734 		goto out;
735 	}
736 
737 	return 0;
738 
739 out:
740 	ubifs_err("bad node at LEB %d:%d", lnum, offs);
741 	dbg_dump_node(c, buf);
742 	dbg_dump_stack();
743 	return -EINVAL;
744 }
745 
746 /**
747  * ubifs_read_node - read node.
748  * @c: UBIFS file-system description object
749  * @buf: buffer to read to
750  * @type: node type
751  * @len: node length (not aligned)
752  * @lnum: logical eraseblock number
753  * @offs: offset within the logical eraseblock
754  *
755  * This function reads a node of known type and and length, checks it and
756  * stores in @buf. Returns zero in case of success, %-EUCLEAN if CRC mismatched
757  * and a negative error code in case of failure.
758  */
759 int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
760 		    int lnum, int offs)
761 {
762 	int err, l;
763 	struct ubifs_ch *ch = buf;
764 
765 	dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
766 	ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
767 	ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size);
768 	ubifs_assert(!(offs & 7) && offs < c->leb_size);
769 	ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
770 
771 	err = ubi_read(c->ubi, lnum, buf, offs, len);
772 	if (err && err != -EBADMSG) {
773 		ubifs_err("cannot read node %d from LEB %d:%d, error %d",
774 			  type, lnum, offs, err);
775 		return err;
776 	}
777 
778 	if (type != ch->node_type) {
779 		ubifs_err("bad node type (%d but expected %d)",
780 			  ch->node_type, type);
781 		goto out;
782 	}
783 
784 	err = ubifs_check_node(c, buf, lnum, offs, 0);
785 	if (err) {
786 		ubifs_err("expected node type %d", type);
787 		return err;
788 	}
789 
790 	l = le32_to_cpu(ch->len);
791 	if (l != len) {
792 		ubifs_err("bad node length %d, expected %d", l, len);
793 		goto out;
794 	}
795 
796 	return 0;
797 
798 out:
799 	ubifs_err("bad node at LEB %d:%d", lnum, offs);
800 	dbg_dump_node(c, buf);
801 	dbg_dump_stack();
802 	return -EINVAL;
803 }
804 
805 /**
806  * ubifs_wbuf_init - initialize write-buffer.
807  * @c: UBIFS file-system description object
808  * @wbuf: write-buffer to initialize
809  *
810  * This function initializes write buffer. Returns zero in case of success
811  * %-ENOMEM in case of failure.
812  */
813 int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf)
814 {
815 	size_t size;
816 
817 	wbuf->buf = kmalloc(c->min_io_size, GFP_KERNEL);
818 	if (!wbuf->buf)
819 		return -ENOMEM;
820 
821 	size = (c->min_io_size / UBIFS_CH_SZ + 1) * sizeof(ino_t);
822 	wbuf->inodes = kmalloc(size, GFP_KERNEL);
823 	if (!wbuf->inodes) {
824 		kfree(wbuf->buf);
825 		wbuf->buf = NULL;
826 		return -ENOMEM;
827 	}
828 
829 	wbuf->used = 0;
830 	wbuf->lnum = wbuf->offs = -1;
831 	wbuf->avail = c->min_io_size;
832 	wbuf->dtype = UBI_UNKNOWN;
833 	wbuf->sync_callback = NULL;
834 	mutex_init(&wbuf->io_mutex);
835 	spin_lock_init(&wbuf->lock);
836 
837 	wbuf->c = c;
838 	init_timer(&wbuf->timer);
839 	wbuf->timer.function = wbuf_timer_callback_nolock;
840 	wbuf->timer.data = (unsigned long)wbuf;
841 	wbuf->timeout = DEFAULT_WBUF_TIMEOUT;
842 	wbuf->next_ino = 0;
843 
844 	return 0;
845 }
846 
847 /**
848  * ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array.
849  * @wbuf: the write-buffer whereto add
850  * @inum: the inode number
851  *
852  * This function adds an inode number to the inode array of the write-buffer.
853  */
854 void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum)
855 {
856 	if (!wbuf->buf)
857 		/* NOR flash or something similar */
858 		return;
859 
860 	spin_lock(&wbuf->lock);
861 	if (wbuf->used)
862 		wbuf->inodes[wbuf->next_ino++] = inum;
863 	spin_unlock(&wbuf->lock);
864 }
865 
866 /**
867  * wbuf_has_ino - returns if the wbuf contains data from the inode.
868  * @wbuf: the write-buffer
869  * @inum: the inode number
870  *
871  * This function returns with %1 if the write-buffer contains some data from the
872  * given inode otherwise it returns with %0.
873  */
874 static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum)
875 {
876 	int i, ret = 0;
877 
878 	spin_lock(&wbuf->lock);
879 	for (i = 0; i < wbuf->next_ino; i++)
880 		if (inum == wbuf->inodes[i]) {
881 			ret = 1;
882 			break;
883 		}
884 	spin_unlock(&wbuf->lock);
885 
886 	return ret;
887 }
888 
889 /**
890  * ubifs_sync_wbufs_by_inode - synchronize write-buffers for an inode.
891  * @c: UBIFS file-system description object
892  * @inode: inode to synchronize
893  *
894  * This function synchronizes write-buffers which contain nodes belonging to
895  * @inode. Returns zero in case of success and a negative error code in case of
896  * failure.
897  */
898 int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode)
899 {
900 	int i, err = 0;
901 
902 	for (i = 0; i < c->jhead_cnt; i++) {
903 		struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf;
904 
905 		if (i == GCHD)
906 			/*
907 			 * GC head is special, do not look at it. Even if the
908 			 * head contains something related to this inode, it is
909 			 * a _copy_ of corresponding on-flash node which sits
910 			 * somewhere else.
911 			 */
912 			continue;
913 
914 		if (!wbuf_has_ino(wbuf, inode->i_ino))
915 			continue;
916 
917 		mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
918 		if (wbuf_has_ino(wbuf, inode->i_ino))
919 			err = ubifs_wbuf_sync_nolock(wbuf);
920 		mutex_unlock(&wbuf->io_mutex);
921 
922 		if (err) {
923 			ubifs_ro_mode(c, err);
924 			return err;
925 		}
926 	}
927 	return 0;
928 }
929