journal.c (112d6212c80a1c560757520bd822a41c0ad15c2c) journal.c (464170647b5648bb81f3615567485fcb9a685bed)
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * linux/fs/jbd2/journal.c
4 *
5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 *
7 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 *

--- 349 unchanged lines hidden (view full) ---

358 */
359 J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
360
361 new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
362
363 /* keep subsequent assertions sane */
364 atomic_set(&new_bh->b_count, 1);
365
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * linux/fs/jbd2/journal.c
4 *
5 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 *
7 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 *

--- 349 unchanged lines hidden (view full) ---

358 */
359 J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
360
361 new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
362
363 /* keep subsequent assertions sane */
364 atomic_set(&new_bh->b_count, 1);
365
366 jbd_lock_bh_state(bh_in);
366 spin_lock(&jh_in->b_state_lock);
367repeat:
368 /*
369 * If a new transaction has already done a buffer copy-out, then
370 * we use that version of the data for the commit.
371 */
372 if (jh_in->b_frozen_data) {
373 done_copy_out = 1;
374 new_page = virt_to_page(jh_in->b_frozen_data);

--- 25 unchanged lines hidden (view full) ---

400 kunmap_atomic(mapped_data);
401
402 /*
403 * Do we need to do a data copy?
404 */
405 if (need_copy_out && !done_copy_out) {
406 char *tmp;
407
367repeat:
368 /*
369 * If a new transaction has already done a buffer copy-out, then
370 * we use that version of the data for the commit.
371 */
372 if (jh_in->b_frozen_data) {
373 done_copy_out = 1;
374 new_page = virt_to_page(jh_in->b_frozen_data);

--- 25 unchanged lines hidden (view full) ---

400 kunmap_atomic(mapped_data);
401
402 /*
403 * Do we need to do a data copy?
404 */
405 if (need_copy_out && !done_copy_out) {
406 char *tmp;
407
408 jbd_unlock_bh_state(bh_in);
408 spin_unlock(&jh_in->b_state_lock);
409 tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
410 if (!tmp) {
411 brelse(new_bh);
412 return -ENOMEM;
413 }
409 tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
410 if (!tmp) {
411 brelse(new_bh);
412 return -ENOMEM;
413 }
414 jbd_lock_bh_state(bh_in);
414 spin_lock(&jh_in->b_state_lock);
415 if (jh_in->b_frozen_data) {
416 jbd2_free(tmp, bh_in->b_size);
417 goto repeat;
418 }
419
420 jh_in->b_frozen_data = tmp;
421 mapped_data = kmap_atomic(new_page);
422 memcpy(tmp, mapped_data + new_offset, bh_in->b_size);

--- 36 unchanged lines hidden (view full) ---

459 * and the original buffer whose contents we are shadowing or
460 * copying is moved to the transaction's shadow queue.
461 */
462 JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
463 spin_lock(&journal->j_list_lock);
464 __jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
465 spin_unlock(&journal->j_list_lock);
466 set_buffer_shadow(bh_in);
415 if (jh_in->b_frozen_data) {
416 jbd2_free(tmp, bh_in->b_size);
417 goto repeat;
418 }
419
420 jh_in->b_frozen_data = tmp;
421 mapped_data = kmap_atomic(new_page);
422 memcpy(tmp, mapped_data + new_offset, bh_in->b_size);

--- 36 unchanged lines hidden (view full) ---

459 * and the original buffer whose contents we are shadowing or
460 * copying is moved to the transaction's shadow queue.
461 */
462 JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
463 spin_lock(&journal->j_list_lock);
464 __jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
465 spin_unlock(&journal->j_list_lock);
466 set_buffer_shadow(bh_in);
467 jbd_unlock_bh_state(bh_in);
467 spin_unlock(&jh_in->b_state_lock);
468
469 return do_escape | (done_copy_out << 1);
470}
471
472/*
473 * Allocation code for the journal file. Manage the space left in the
474 * journal, so that we can begin checkpointing when appropriate.
475 */

--- 1929 unchanged lines hidden (view full) ---

2405#endif
2406 ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS);
2407 if (!ret) {
2408 jbd_debug(1, "out of memory for journal_head\n");
2409 pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__);
2410 ret = kmem_cache_zalloc(jbd2_journal_head_cache,
2411 GFP_NOFS | __GFP_NOFAIL);
2412 }
468
469 return do_escape | (done_copy_out << 1);
470}
471
472/*
473 * Allocation code for the journal file. Manage the space left in the
474 * journal, so that we can begin checkpointing when appropriate.
475 */

--- 1929 unchanged lines hidden (view full) ---

2405#endif
2406 ret = kmem_cache_zalloc(jbd2_journal_head_cache, GFP_NOFS);
2407 if (!ret) {
2408 jbd_debug(1, "out of memory for journal_head\n");
2409 pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__);
2410 ret = kmem_cache_zalloc(jbd2_journal_head_cache,
2411 GFP_NOFS | __GFP_NOFAIL);
2412 }
2413 if (ret)
2414 spin_lock_init(&ret->b_state_lock);
2413 return ret;
2414}
2415
2416static void journal_free_journal_head(struct journal_head *jh)
2417{
2418#ifdef CONFIG_JBD2_DEBUG
2419 atomic_dec(&nr_journal_heads);
2420 memset(jh, JBD2_POISON_FREE, sizeof(*jh));

--- 311 unchanged lines hidden ---
2415 return ret;
2416}
2417
2418static void journal_free_journal_head(struct journal_head *jh)
2419{
2420#ifdef CONFIG_JBD2_DEBUG
2421 atomic_dec(&nr_journal_heads);
2422 memset(jh, JBD2_POISON_FREE, sizeof(*jh));

--- 311 unchanged lines hidden ---