1 /* 2 * JFFS2 -- Journalling Flash File System, Version 2. 3 * 4 * Copyright © 2001-2007 Red Hat, Inc. 5 * 6 * Created by David Woodhouse <dwmw2@infradead.org> 7 * 8 * For licensing information, see the file 'LICENCE' in this directory. 9 * 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/slab.h> 14 #include <linux/mtd/mtd.h> 15 #include <linux/compiler.h> 16 #include <linux/crc32.h> 17 #include <linux/sched.h> 18 #include <linux/pagemap.h> 19 #include "nodelist.h" 20 21 struct erase_priv_struct { 22 struct jffs2_eraseblock *jeb; 23 struct jffs2_sb_info *c; 24 }; 25 26 #ifndef __ECOS 27 static void jffs2_erase_callback(struct erase_info *); 28 #endif 29 static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset); 30 static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); 31 static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb); 32 33 static void jffs2_erase_block(struct jffs2_sb_info *c, 34 struct jffs2_eraseblock *jeb) 35 { 36 int ret; 37 uint32_t bad_offset; 38 #ifdef __ECOS 39 ret = jffs2_flash_erase(c, jeb); 40 if (!ret) { 41 jffs2_erase_succeeded(c, jeb); 42 return; 43 } 44 bad_offset = jeb->offset; 45 #else /* Linux */ 46 struct erase_info *instr; 47 48 D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#08x (range %#08x-%#08x)\n", 49 jeb->offset, jeb->offset, jeb->offset + c->sector_size)); 50 instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL); 51 if (!instr) { 52 printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n"); 53 mutex_lock(&c->erase_free_sem); 54 spin_lock(&c->erase_completion_lock); 55 list_move(&jeb->list, &c->erase_pending_list); 56 c->erasing_size -= c->sector_size; 57 c->dirty_size += c->sector_size; 58 jeb->dirty_size = c->sector_size; 59 spin_unlock(&c->erase_completion_lock); 60 mutex_unlock(&c->erase_free_sem); 61 return; 62 } 63 64 memset(instr, 0, sizeof(*instr)); 65 66 instr->mtd = c->mtd; 67 instr->addr = jeb->offset; 68 instr->len = c->sector_size; 69 instr->callback = jffs2_erase_callback; 70 instr->priv = (unsigned long)(&instr[1]); 71 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN; 72 73 ((struct erase_priv_struct *)instr->priv)->jeb = jeb; 74 ((struct erase_priv_struct *)instr->priv)->c = c; 75 76 ret = c->mtd->erase(c->mtd, instr); 77 if (!ret) 78 return; 79 80 bad_offset = instr->fail_addr; 81 kfree(instr); 82 #endif /* __ECOS */ 83 84 if (ret == -ENOMEM || ret == -EAGAIN) { 85 /* Erase failed immediately. Refile it on the list */ 86 D1(printk(KERN_DEBUG "Erase at 0x%08x failed: %d. Refiling on erase_pending_list\n", jeb->offset, ret)); 87 mutex_lock(&c->erase_free_sem); 88 spin_lock(&c->erase_completion_lock); 89 list_move(&jeb->list, &c->erase_pending_list); 90 c->erasing_size -= c->sector_size; 91 c->dirty_size += c->sector_size; 92 jeb->dirty_size = c->sector_size; 93 spin_unlock(&c->erase_completion_lock); 94 mutex_unlock(&c->erase_free_sem); 95 return; 96 } 97 98 if (ret == -EROFS) 99 printk(KERN_WARNING "Erase at 0x%08x failed immediately: -EROFS. Is the sector locked?\n", jeb->offset); 100 else 101 printk(KERN_WARNING "Erase at 0x%08x failed immediately: errno %d\n", jeb->offset, ret); 102 103 jffs2_erase_failed(c, jeb, bad_offset); 104 } 105 106 void jffs2_erase_pending_blocks(struct jffs2_sb_info *c, int count) 107 { 108 struct jffs2_eraseblock *jeb; 109 110 mutex_lock(&c->erase_free_sem); 111 112 spin_lock(&c->erase_completion_lock); 113 114 while (!list_empty(&c->erase_complete_list) || 115 !list_empty(&c->erase_pending_list)) { 116 117 if (!list_empty(&c->erase_complete_list)) { 118 jeb = list_entry(c->erase_complete_list.next, struct jffs2_eraseblock, list); 119 list_move(&jeb->list, &c->erase_checking_list); 120 spin_unlock(&c->erase_completion_lock); 121 mutex_unlock(&c->erase_free_sem); 122 jffs2_mark_erased_block(c, jeb); 123 124 if (!--count) { 125 D1(printk(KERN_DEBUG "Count reached. jffs2_erase_pending_blocks leaving\n")); 126 goto done; 127 } 128 129 } else if (!list_empty(&c->erase_pending_list)) { 130 jeb = list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list); 131 D1(printk(KERN_DEBUG "Starting erase of pending block 0x%08x\n", jeb->offset)); 132 list_del(&jeb->list); 133 c->erasing_size += c->sector_size; 134 c->wasted_size -= jeb->wasted_size; 135 c->free_size -= jeb->free_size; 136 c->used_size -= jeb->used_size; 137 c->dirty_size -= jeb->dirty_size; 138 jeb->wasted_size = jeb->used_size = jeb->dirty_size = jeb->free_size = 0; 139 jffs2_free_jeb_node_refs(c, jeb); 140 list_add(&jeb->list, &c->erasing_list); 141 spin_unlock(&c->erase_completion_lock); 142 mutex_unlock(&c->erase_free_sem); 143 144 jffs2_erase_block(c, jeb); 145 146 } else { 147 BUG(); 148 } 149 150 /* Be nice */ 151 yield(); 152 mutex_lock(&c->erase_free_sem); 153 spin_lock(&c->erase_completion_lock); 154 } 155 156 spin_unlock(&c->erase_completion_lock); 157 mutex_unlock(&c->erase_free_sem); 158 done: 159 D1(printk(KERN_DEBUG "jffs2_erase_pending_blocks completed\n")); 160 } 161 162 static void jffs2_erase_succeeded(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 163 { 164 D1(printk(KERN_DEBUG "Erase completed successfully at 0x%08x\n", jeb->offset)); 165 mutex_lock(&c->erase_free_sem); 166 spin_lock(&c->erase_completion_lock); 167 list_move_tail(&jeb->list, &c->erase_complete_list); 168 spin_unlock(&c->erase_completion_lock); 169 mutex_unlock(&c->erase_free_sem); 170 /* Ensure that kupdated calls us again to mark them clean */ 171 jffs2_erase_pending_trigger(c); 172 } 173 174 static void jffs2_erase_failed(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset) 175 { 176 /* For NAND, if the failure did not occur at the device level for a 177 specific physical page, don't bother updating the bad block table. */ 178 if (jffs2_cleanmarker_oob(c) && (bad_offset != (uint32_t)MTD_FAIL_ADDR_UNKNOWN)) { 179 /* We had a device-level failure to erase. Let's see if we've 180 failed too many times. */ 181 if (!jffs2_write_nand_badblock(c, jeb, bad_offset)) { 182 /* We'd like to give this block another try. */ 183 mutex_lock(&c->erase_free_sem); 184 spin_lock(&c->erase_completion_lock); 185 list_move(&jeb->list, &c->erase_pending_list); 186 c->erasing_size -= c->sector_size; 187 c->dirty_size += c->sector_size; 188 jeb->dirty_size = c->sector_size; 189 spin_unlock(&c->erase_completion_lock); 190 mutex_unlock(&c->erase_free_sem); 191 return; 192 } 193 } 194 195 mutex_lock(&c->erase_free_sem); 196 spin_lock(&c->erase_completion_lock); 197 c->erasing_size -= c->sector_size; 198 c->bad_size += c->sector_size; 199 list_move(&jeb->list, &c->bad_list); 200 c->nr_erasing_blocks--; 201 spin_unlock(&c->erase_completion_lock); 202 mutex_unlock(&c->erase_free_sem); 203 wake_up(&c->erase_wait); 204 } 205 206 #ifndef __ECOS 207 static void jffs2_erase_callback(struct erase_info *instr) 208 { 209 struct erase_priv_struct *priv = (void *)instr->priv; 210 211 if(instr->state != MTD_ERASE_DONE) { 212 printk(KERN_WARNING "Erase at 0x%08llx finished, but state != MTD_ERASE_DONE. State is 0x%x instead.\n", 213 (unsigned long long)instr->addr, instr->state); 214 jffs2_erase_failed(priv->c, priv->jeb, instr->fail_addr); 215 } else { 216 jffs2_erase_succeeded(priv->c, priv->jeb); 217 } 218 kfree(instr); 219 } 220 #endif /* !__ECOS */ 221 222 /* Hmmm. Maybe we should accept the extra space it takes and make 223 this a standard doubly-linked list? */ 224 static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c, 225 struct jffs2_raw_node_ref *ref, struct jffs2_eraseblock *jeb) 226 { 227 struct jffs2_inode_cache *ic = NULL; 228 struct jffs2_raw_node_ref **prev; 229 230 prev = &ref->next_in_ino; 231 232 /* Walk the inode's list once, removing any nodes from this eraseblock */ 233 while (1) { 234 if (!(*prev)->next_in_ino) { 235 /* We're looking at the jffs2_inode_cache, which is 236 at the end of the linked list. Stash it and continue 237 from the beginning of the list */ 238 ic = (struct jffs2_inode_cache *)(*prev); 239 prev = &ic->nodes; 240 continue; 241 } 242 243 if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) { 244 /* It's in the block we're erasing */ 245 struct jffs2_raw_node_ref *this; 246 247 this = *prev; 248 *prev = this->next_in_ino; 249 this->next_in_ino = NULL; 250 251 if (this == ref) 252 break; 253 254 continue; 255 } 256 /* Not to be deleted. Skip */ 257 prev = &((*prev)->next_in_ino); 258 } 259 260 /* PARANOIA */ 261 if (!ic) { 262 JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref" 263 " not found in remove_node_refs()!!\n"); 264 return; 265 } 266 267 D1(printk(KERN_DEBUG "Removed nodes in range 0x%08x-0x%08x from ino #%u\n", 268 jeb->offset, jeb->offset + c->sector_size, ic->ino)); 269 270 D2({ 271 int i=0; 272 struct jffs2_raw_node_ref *this; 273 printk(KERN_DEBUG "After remove_node_refs_from_ino_list: \n" KERN_DEBUG); 274 275 this = ic->nodes; 276 277 while(this) { 278 printk( "0x%08x(%d)->", ref_offset(this), ref_flags(this)); 279 if (++i == 5) { 280 printk("\n" KERN_DEBUG); 281 i=0; 282 } 283 this = this->next_in_ino; 284 } 285 printk("\n"); 286 }); 287 288 switch (ic->class) { 289 #ifdef CONFIG_JFFS2_FS_XATTR 290 case RAWNODE_CLASS_XATTR_DATUM: 291 jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); 292 break; 293 case RAWNODE_CLASS_XATTR_REF: 294 jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); 295 break; 296 #endif 297 default: 298 if (ic->nodes == (void *)ic && ic->pino_nlink == 0) 299 jffs2_del_ino_cache(c, ic); 300 } 301 } 302 303 void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 304 { 305 struct jffs2_raw_node_ref *block, *ref; 306 D1(printk(KERN_DEBUG "Freeing all node refs for eraseblock offset 0x%08x\n", jeb->offset)); 307 308 block = ref = jeb->first_node; 309 310 while (ref) { 311 if (ref->flash_offset == REF_LINK_NODE) { 312 ref = ref->next_in_ino; 313 jffs2_free_refblock(block); 314 block = ref; 315 continue; 316 } 317 if (ref->flash_offset != REF_EMPTY_NODE && ref->next_in_ino) 318 jffs2_remove_node_refs_from_ino_list(c, ref, jeb); 319 /* else it was a non-inode node or already removed, so don't bother */ 320 321 ref++; 322 } 323 jeb->first_node = jeb->last_node = NULL; 324 } 325 326 static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset) 327 { 328 void *ebuf; 329 uint32_t ofs; 330 size_t retlen; 331 int ret = -EIO; 332 333 if (c->mtd->point) { 334 unsigned long *wordebuf; 335 336 ret = c->mtd->point(c->mtd, jeb->offset, c->sector_size, 337 &retlen, &ebuf, NULL); 338 if (ret) { 339 D1(printk(KERN_DEBUG "MTD point failed %d\n", ret)); 340 goto do_flash_read; 341 } 342 if (retlen < c->sector_size) { 343 /* Don't muck about if it won't let us point to the whole erase sector */ 344 D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", retlen)); 345 c->mtd->unpoint(c->mtd, jeb->offset, retlen); 346 goto do_flash_read; 347 } 348 wordebuf = ebuf-sizeof(*wordebuf); 349 retlen /= sizeof(*wordebuf); 350 do { 351 if (*++wordebuf != ~0) 352 break; 353 } while(--retlen); 354 c->mtd->unpoint(c->mtd, jeb->offset, c->sector_size); 355 if (retlen) { 356 printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08tx\n", 357 *wordebuf, jeb->offset + c->sector_size-retlen*sizeof(*wordebuf)); 358 return -EIO; 359 } 360 return 0; 361 } 362 do_flash_read: 363 ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 364 if (!ebuf) { 365 printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Refiling\n", jeb->offset); 366 return -EAGAIN; 367 } 368 369 D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset)); 370 371 for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) { 372 uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs); 373 int i; 374 375 *bad_offset = ofs; 376 377 ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf); 378 if (ret) { 379 printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret); 380 ret = -EIO; 381 goto fail; 382 } 383 if (retlen != readlen) { 384 printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen); 385 ret = -EIO; 386 goto fail; 387 } 388 for (i=0; i<readlen; i += sizeof(unsigned long)) { 389 /* It's OK. We know it's properly aligned */ 390 unsigned long *datum = ebuf + i; 391 if (*datum + 1) { 392 *bad_offset += i; 393 printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset); 394 ret = -EIO; 395 goto fail; 396 } 397 } 398 ofs += readlen; 399 cond_resched(); 400 } 401 ret = 0; 402 fail: 403 kfree(ebuf); 404 return ret; 405 } 406 407 static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) 408 { 409 size_t retlen; 410 int ret; 411 uint32_t uninitialized_var(bad_offset); 412 413 switch (jffs2_block_check_erase(c, jeb, &bad_offset)) { 414 case -EAGAIN: goto refile; 415 case -EIO: goto filebad; 416 } 417 418 /* Write the erase complete marker */ 419 D1(printk(KERN_DEBUG "Writing erased marker to block at 0x%08x\n", jeb->offset)); 420 bad_offset = jeb->offset; 421 422 /* Cleanmarker in oob area or no cleanmarker at all ? */ 423 if (jffs2_cleanmarker_oob(c) || c->cleanmarker_size == 0) { 424 425 if (jffs2_cleanmarker_oob(c)) { 426 if (jffs2_write_nand_cleanmarker(c, jeb)) 427 goto filebad; 428 } 429 } else { 430 431 struct kvec vecs[1]; 432 struct jffs2_unknown_node marker = { 433 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), 434 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), 435 .totlen = cpu_to_je32(c->cleanmarker_size) 436 }; 437 438 jffs2_prealloc_raw_node_refs(c, jeb, 1); 439 440 marker.hdr_crc = cpu_to_je32(crc32(0, &marker, sizeof(struct jffs2_unknown_node)-4)); 441 442 vecs[0].iov_base = (unsigned char *) ▮ 443 vecs[0].iov_len = sizeof(marker); 444 ret = jffs2_flash_direct_writev(c, vecs, 1, jeb->offset, &retlen); 445 446 if (ret || retlen != sizeof(marker)) { 447 if (ret) 448 printk(KERN_WARNING "Write clean marker to block at 0x%08x failed: %d\n", 449 jeb->offset, ret); 450 else 451 printk(KERN_WARNING "Short write to newly-erased block at 0x%08x: Wanted %zd, got %zd\n", 452 jeb->offset, sizeof(marker), retlen); 453 454 goto filebad; 455 } 456 } 457 /* Everything else got zeroed before the erase */ 458 jeb->free_size = c->sector_size; 459 460 mutex_lock(&c->erase_free_sem); 461 spin_lock(&c->erase_completion_lock); 462 463 c->erasing_size -= c->sector_size; 464 c->free_size += c->sector_size; 465 466 /* Account for cleanmarker now, if it's in-band */ 467 if (c->cleanmarker_size && !jffs2_cleanmarker_oob(c)) 468 jffs2_link_node_ref(c, jeb, jeb->offset | REF_NORMAL, c->cleanmarker_size, NULL); 469 470 list_move_tail(&jeb->list, &c->free_list); 471 c->nr_erasing_blocks--; 472 c->nr_free_blocks++; 473 474 jffs2_dbg_acct_sanity_check_nolock(c, jeb); 475 jffs2_dbg_acct_paranoia_check_nolock(c, jeb); 476 477 spin_unlock(&c->erase_completion_lock); 478 mutex_unlock(&c->erase_free_sem); 479 wake_up(&c->erase_wait); 480 return; 481 482 filebad: 483 mutex_lock(&c->erase_free_sem); 484 spin_lock(&c->erase_completion_lock); 485 /* Stick it on a list (any list) so erase_failed can take it 486 right off again. Silly, but shouldn't happen often. */ 487 list_move(&jeb->list, &c->erasing_list); 488 spin_unlock(&c->erase_completion_lock); 489 mutex_unlock(&c->erase_free_sem); 490 jffs2_erase_failed(c, jeb, bad_offset); 491 return; 492 493 refile: 494 /* Stick it back on the list from whence it came and come back later */ 495 jffs2_erase_pending_trigger(c); 496 mutex_lock(&c->erase_free_sem); 497 spin_lock(&c->erase_completion_lock); 498 list_move(&jeb->list, &c->erase_complete_list); 499 spin_unlock(&c->erase_completion_lock); 500 mutex_unlock(&c->erase_free_sem); 501 return; 502 } 503