xfs_aops.c (8c57a5e7b2820f349c95b8c8393fec1e0f4070d2) xfs_aops.c (fa8d972d055c723cc427e14d4d7919640f418730)
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *

--- 70 unchanged lines hidden (view full) ---

79
80 if (XFS_IS_REALTIME_INODE(ip))
81 return mp->m_rtdev_targp->bt_bdev;
82 else
83 return mp->m_ddev_targp->bt_bdev;
84}
85
86/*
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *

--- 70 unchanged lines hidden (view full) ---

79
80 if (XFS_IS_REALTIME_INODE(ip))
81 return mp->m_rtdev_targp->bt_bdev;
82 else
83 return mp->m_ddev_targp->bt_bdev;
84}
85
86/*
87 * We're now finished for good with this ioend structure.
88 * Update the page state via the associated buffer_heads,
89 * release holds on the inode and bio, and finally free
90 * up memory. Do not use the ioend after this.
87 * We're now finished for good with this page. Update the page state via the
88 * associated buffer_heads, paying attention to the start and end offsets that
89 * we need to process on the page.
91 */
90 */
91static void
92xfs_finish_page_writeback(
93 struct inode *inode,
94 struct bio_vec *bvec,
95 int error)
96{
97 unsigned int end = bvec->bv_offset + bvec->bv_len - 1;
98 struct buffer_head *head, *bh;
99 unsigned int off = 0;
100
101 ASSERT(bvec->bv_offset < PAGE_SIZE);
102 ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
103 ASSERT(end < PAGE_SIZE);
104 ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0);
105
106 bh = head = page_buffers(bvec->bv_page);
107
108 do {
109 if (off < bvec->bv_offset)
110 goto next_bh;
111 if (off > end)
112 break;
113 bh->b_end_io(bh, !error);
114next_bh:
115 off += bh->b_size;
116 } while ((bh = bh->b_this_page) != head);
117}
118
119/*
120 * We're now finished for good with this ioend structure. Update the page
121 * state, release holds on bios, and finally free up memory. Do not use the
122 * ioend after this.
123 */
92STATIC void
93xfs_destroy_ioend(
124STATIC void
125xfs_destroy_ioend(
94 xfs_ioend_t *ioend)
126 struct xfs_ioend *ioend,
127 int error)
95{
128{
96 struct buffer_head *bh, *next;
129 struct inode *inode = ioend->io_inode;
130 struct bio *last = ioend->io_bio;
131 struct bio *bio, *next;
97
132
98 for (bh = ioend->io_buffer_head; bh; bh = next) {
99 next = bh->b_private;
100 bh->b_end_io(bh, !ioend->io_error);
101 }
133 for (bio = &ioend->io_inline_bio; bio; bio = next) {
134 struct bio_vec *bvec;
135 int i;
102
136
103 mempool_free(ioend, xfs_ioend_pool);
137 /*
138 * For the last bio, bi_private points to the ioend, so we
139 * need to explicitly end the iteration here.
140 */
141 if (bio == last)
142 next = NULL;
143 else
144 next = bio->bi_private;
145
146 /* walk each page on bio, ending page IO on them */
147 bio_for_each_segment_all(bvec, bio, i)
148 xfs_finish_page_writeback(inode, bvec, error);
149
150 bio_put(bio);
151 }
104}
105
106/*
107 * Fast and loose check if this write could update the on-disk inode size.
108 */
109static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
110{
111 return ioend->io_offset + ioend->io_size >
112 XFS_I(ioend->io_inode)->i_d.di_size;
113}
114
115STATIC int
116xfs_setfilesize_trans_alloc(
117 struct xfs_ioend *ioend)
118{
119 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
120 struct xfs_trans *tp;
121 int error;
122
152}
153
154/*
155 * Fast and loose check if this write could update the on-disk inode size.
156 */
157static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
158{
159 return ioend->io_offset + ioend->io_size >
160 XFS_I(ioend->io_inode)->i_d.di_size;
161}
162
163STATIC int
164xfs_setfilesize_trans_alloc(
165 struct xfs_ioend *ioend)
166{
167 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
168 struct xfs_trans *tp;
169 int error;
170
123 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
124
125 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
126 if (error) {
127 xfs_trans_cancel(tp);
171 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
172 if (error)
128 return error;
173 return error;
129 }
130
131 ioend->io_append_trans = tp;
132
133 /*
134 * We may pass freeze protection with a transaction. So tell lockdep
135 * we released it.
136 */
137 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);

--- 31 unchanged lines hidden (view full) ---

169 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
170 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
171
172 return xfs_trans_commit(tp);
173}
174
175STATIC int
176xfs_setfilesize_ioend(
174
175 ioend->io_append_trans = tp;
176
177 /*
178 * We may pass freeze protection with a transaction. So tell lockdep
179 * we released it.
180 */
181 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);

--- 31 unchanged lines hidden (view full) ---

213 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
214 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
215
216 return xfs_trans_commit(tp);
217}
218
219STATIC int
220xfs_setfilesize_ioend(
177 struct xfs_ioend *ioend)
221 struct xfs_ioend *ioend,
222 int error)
178{
179 struct xfs_inode *ip = XFS_I(ioend->io_inode);
180 struct xfs_trans *tp = ioend->io_append_trans;
181
182 /*
183 * The transaction may have been allocated in the I/O submission thread,
184 * thus we need to mark ourselves as being in a transaction manually.
185 * Similarly for freeze protection.
186 */
187 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
188 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
189
190 /* we abort the update if there was an IO error */
223{
224 struct xfs_inode *ip = XFS_I(ioend->io_inode);
225 struct xfs_trans *tp = ioend->io_append_trans;
226
227 /*
228 * The transaction may have been allocated in the I/O submission thread,
229 * thus we need to mark ourselves as being in a transaction manually.
230 * Similarly for freeze protection.
231 */
232 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
233 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
234
235 /* we abort the update if there was an IO error */
191 if (ioend->io_error) {
236 if (error) {
192 xfs_trans_cancel(tp);
237 xfs_trans_cancel(tp);
193 return ioend->io_error;
238 return error;
194 }
195
196 return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
197}
198
199/*
239 }
240
241 return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
242}
243
244/*
200 * Schedule IO completion handling on the final put of an ioend.
201 *
202 * If there is no work to do we might as well call it a day and free the
203 * ioend right now.
204 */
205STATIC void
206xfs_finish_ioend(
207 struct xfs_ioend *ioend)
208{
209 if (atomic_dec_and_test(&ioend->io_remaining)) {
210 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
211
212 if (ioend->io_type == XFS_IO_UNWRITTEN)
213 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
214 else if (ioend->io_append_trans)
215 queue_work(mp->m_data_workqueue, &ioend->io_work);
216 else
217 xfs_destroy_ioend(ioend);
218 }
219}
220
221/*
222 * IO write completion.
223 */
224STATIC void
225xfs_end_io(
226 struct work_struct *work)
227{
245 * IO write completion.
246 */
247STATIC void
248xfs_end_io(
249 struct work_struct *work)
250{
228 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
229 struct xfs_inode *ip = XFS_I(ioend->io_inode);
230 int error = 0;
251 struct xfs_ioend *ioend =
252 container_of(work, struct xfs_ioend, io_work);
253 struct xfs_inode *ip = XFS_I(ioend->io_inode);
254 int error = ioend->io_bio->bi_error;
231
232 /*
233 * Set an error if the mount has shut down and proceed with end I/O
234 * processing so it can perform whatever cleanups are necessary.
235 */
236 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
255
256 /*
257 * Set an error if the mount has shut down and proceed with end I/O
258 * processing so it can perform whatever cleanups are necessary.
259 */
260 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
237 ioend->io_error = -EIO;
261 error = -EIO;
238
239 /*
240 * For unwritten extents we need to issue transactions to convert a
241 * range to normal written extens after the data I/O has finished.
242 * Detecting and handling completion IO errors is done individually
243 * for each case as different cleanup operations need to be performed
244 * on error.
245 */
246 if (ioend->io_type == XFS_IO_UNWRITTEN) {
262
263 /*
264 * For unwritten extents we need to issue transactions to convert a
265 * range to normal written extens after the data I/O has finished.
266 * Detecting and handling completion IO errors is done individually
267 * for each case as different cleanup operations need to be performed
268 * on error.
269 */
270 if (ioend->io_type == XFS_IO_UNWRITTEN) {
247 if (ioend->io_error)
271 if (error)
248 goto done;
249 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
250 ioend->io_size);
251 } else if (ioend->io_append_trans) {
272 goto done;
273 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
274 ioend->io_size);
275 } else if (ioend->io_append_trans) {
252 error = xfs_setfilesize_ioend(ioend);
276 error = xfs_setfilesize_ioend(ioend, error);
253 } else {
254 ASSERT(!xfs_ioend_is_append(ioend));
255 }
256
257done:
277 } else {
278 ASSERT(!xfs_ioend_is_append(ioend));
279 }
280
281done:
258 if (error)
259 ioend->io_error = error;
260 xfs_destroy_ioend(ioend);
282 xfs_destroy_ioend(ioend, error);
261}
262
283}
284
263/*
264 * Allocate and initialise an IO completion structure.
265 * We need to track unwritten extent write completion here initially.
266 * We'll need to extend this for updating the ondisk inode size later
267 * (vs. incore size).
268 */
269STATIC xfs_ioend_t *
270xfs_alloc_ioend(
271 struct inode *inode,
272 unsigned int type)
285STATIC void
286xfs_end_bio(
287 struct bio *bio)
273{
288{
274 xfs_ioend_t *ioend;
289 struct xfs_ioend *ioend = bio->bi_private;
290 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
275
291
276 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
277
278 /*
279 * Set the count to 1 initially, which will prevent an I/O
280 * completion callback from happening before we have started
281 * all the I/O from calling the completion routine too early.
282 */
283 atomic_set(&ioend->io_remaining, 1);
284 ioend->io_error = 0;
285 INIT_LIST_HEAD(&ioend->io_list);
286 ioend->io_type = type;
287 ioend->io_inode = inode;
288 ioend->io_buffer_head = NULL;
289 ioend->io_buffer_tail = NULL;
290 ioend->io_offset = 0;
291 ioend->io_size = 0;
292 ioend->io_append_trans = NULL;
293
294 INIT_WORK(&ioend->io_work, xfs_end_io);
295 return ioend;
292 if (ioend->io_type == XFS_IO_UNWRITTEN)
293 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
294 else if (ioend->io_append_trans)
295 queue_work(mp->m_data_workqueue, &ioend->io_work);
296 else
297 xfs_destroy_ioend(ioend, bio->bi_error);
296}
297
298STATIC int
299xfs_map_blocks(
300 struct inode *inode,
301 loff_t offset,
302 struct xfs_bmbt_irec *imap,
303 int type)

--- 55 unchanged lines hidden (view full) ---

359 xfs_off_t offset)
360{
361 offset >>= inode->i_blkbits;
362
363 return offset >= imap->br_startoff &&
364 offset < imap->br_startoff + imap->br_blockcount;
365}
366
298}
299
300STATIC int
301xfs_map_blocks(
302 struct inode *inode,
303 loff_t offset,
304 struct xfs_bmbt_irec *imap,
305 int type)

--- 55 unchanged lines hidden (view full) ---

361 xfs_off_t offset)
362{
363 offset >>= inode->i_blkbits;
364
365 return offset >= imap->br_startoff &&
366 offset < imap->br_startoff + imap->br_blockcount;
367}
368
367/*
368 * BIO completion handler for buffered IO.
369 */
370STATIC void
369STATIC void
371xfs_end_bio(
372 struct bio *bio)
373{
374 xfs_ioend_t *ioend = bio->bi_private;
375
376 if (!ioend->io_error)
377 ioend->io_error = bio->bi_error;
378
379 /* Toss bio and pass work off to an xfsdatad thread */
380 bio->bi_private = NULL;
381 bio->bi_end_io = NULL;
382 bio_put(bio);
383
384 xfs_finish_ioend(ioend);
385}
386
387STATIC void
388xfs_submit_ioend_bio(
389 struct writeback_control *wbc,
390 xfs_ioend_t *ioend,
391 struct bio *bio)
392{
393 atomic_inc(&ioend->io_remaining);
394 bio->bi_private = ioend;
395 bio->bi_end_io = xfs_end_bio;
396 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
397}
398
399STATIC struct bio *
400xfs_alloc_ioend_bio(
401 struct buffer_head *bh)
402{
403 struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
404
405 ASSERT(bio->bi_private == NULL);
406 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
407 bio->bi_bdev = bh->b_bdev;
408 return bio;
409}
410
411STATIC void
412xfs_start_buffer_writeback(
413 struct buffer_head *bh)
414{
415 ASSERT(buffer_mapped(bh));
416 ASSERT(buffer_locked(bh));
417 ASSERT(!buffer_delay(bh));
418 ASSERT(!buffer_unwritten(bh));
419

--- 27 unchanged lines hidden (view full) ---

447}
448
449static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
450{
451 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
452}
453
454/*
370xfs_start_buffer_writeback(
371 struct buffer_head *bh)
372{
373 ASSERT(buffer_mapped(bh));
374 ASSERT(buffer_locked(bh));
375 ASSERT(!buffer_delay(bh));
376 ASSERT(!buffer_unwritten(bh));
377

--- 27 unchanged lines hidden (view full) ---

405}
406
407static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
408{
409 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
410}
411
412/*
455 * Submit all of the bios for an ioend. We are only passed a single ioend at a
456 * time; the caller is responsible for chaining prior to submission.
413 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
414 * it, and we submit that bio. The ioend may be used for multiple bio
415 * submissions, so we only want to allocate an append transaction for the ioend
416 * once. In the case of multiple bio submission, each bio will take an IO
417 * reference to the ioend to ensure that the ioend completion is only done once
418 * all bios have been submitted and the ioend is really done.
457 *
458 * If @fail is non-zero, it means that we have a situation where some part of
459 * the submission process has failed after we have marked paged for writeback
419 *
420 * If @fail is non-zero, it means that we have a situation where some part of
421 * the submission process has failed after we have marked paged for writeback
460 * and unlocked them. In this situation, we need to fail the ioend chain rather
461 * than submit it to IO. This typically only happens on a filesystem shutdown.
422 * and unlocked them. In this situation, we need to fail the bio and ioend
423 * rather than submit it to IO. This typically only happens on a filesystem
424 * shutdown.
462 */
463STATIC int
464xfs_submit_ioend(
465 struct writeback_control *wbc,
425 */
426STATIC int
427xfs_submit_ioend(
428 struct writeback_control *wbc,
466 xfs_ioend_t *ioend,
429 struct xfs_ioend *ioend,
467 int status)
468{
430 int status)
431{
469 struct buffer_head *bh;
470 struct bio *bio;
471 sector_t lastblock = 0;
472
473 /* Reserve log space if we might write beyond the on-disk inode size. */
474 if (!status &&
432 /* Reserve log space if we might write beyond the on-disk inode size. */
433 if (!status &&
475 ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
434 ioend->io_type != XFS_IO_UNWRITTEN &&
435 xfs_ioend_is_append(ioend) &&
436 !ioend->io_append_trans)
476 status = xfs_setfilesize_trans_alloc(ioend);
437 status = xfs_setfilesize_trans_alloc(ioend);
438
439 ioend->io_bio->bi_private = ioend;
440 ioend->io_bio->bi_end_io = xfs_end_bio;
441
477 /*
478 * If we are failing the IO now, just mark the ioend with an
479 * error and finish it. This will run IO completion immediately
480 * as there is only one reference to the ioend at this point in
481 * time.
482 */
483 if (status) {
442 /*
443 * If we are failing the IO now, just mark the ioend with an
444 * error and finish it. This will run IO completion immediately
445 * as there is only one reference to the ioend at this point in
446 * time.
447 */
448 if (status) {
484 ioend->io_error = status;
485 xfs_finish_ioend(ioend);
449 ioend->io_bio->bi_error = status;
450 bio_endio(ioend->io_bio);
486 return status;
487 }
488
451 return status;
452 }
453
489 bio = NULL;
490 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
454 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE,
455 ioend->io_bio);
456 return 0;
457}
491
458
492 if (!bio) {
493retry:
494 bio = xfs_alloc_ioend_bio(bh);
495 } else if (bh->b_blocknr != lastblock + 1) {
496 xfs_submit_ioend_bio(wbc, ioend, bio);
497 goto retry;
498 }
459static void
460xfs_init_bio_from_bh(
461 struct bio *bio,
462 struct buffer_head *bh)
463{
464 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
465 bio->bi_bdev = bh->b_bdev;
466}
499
467
500 if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
501 xfs_submit_ioend_bio(wbc, ioend, bio);
502 goto retry;
503 }
468static struct xfs_ioend *
469xfs_alloc_ioend(
470 struct inode *inode,
471 unsigned int type,
472 xfs_off_t offset,
473 struct buffer_head *bh)
474{
475 struct xfs_ioend *ioend;
476 struct bio *bio;
504
477
505 lastblock = bh->b_blocknr;
506 }
507 if (bio)
508 xfs_submit_ioend_bio(wbc, ioend, bio);
509 xfs_finish_ioend(ioend);
510 return 0;
478 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset);
479 xfs_init_bio_from_bh(bio, bh);
480
481 ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
482 INIT_LIST_HEAD(&ioend->io_list);
483 ioend->io_type = type;
484 ioend->io_inode = inode;
485 ioend->io_size = 0;
486 ioend->io_offset = offset;
487 INIT_WORK(&ioend->io_work, xfs_end_io);
488 ioend->io_append_trans = NULL;
489 ioend->io_bio = bio;
490 return ioend;
511}
512
513/*
491}
492
493/*
494 * Allocate a new bio, and chain the old bio to the new one.
495 *
496 * Note that we have to do perform the chaining in this unintuitive order
497 * so that the bi_private linkage is set up in the right direction for the
498 * traversal in xfs_destroy_ioend().
499 */
500static void
501xfs_chain_bio(
502 struct xfs_ioend *ioend,
503 struct writeback_control *wbc,
504 struct buffer_head *bh)
505{
506 struct bio *new;
507
508 new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
509 xfs_init_bio_from_bh(new, bh);
510
511 bio_chain(ioend->io_bio, new);
512 bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
513 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE,
514 ioend->io_bio);
515 ioend->io_bio = new;
516}
517
518/*
514 * Test to see if we've been building up a completion structure for
515 * earlier buffers -- if so, we try to append to this ioend if we
516 * can, otherwise we finish off any current ioend and start another.
517 * Return the ioend we finished off so that the caller can submit it
518 * once it has finished processing the dirty page.
519 */
520STATIC void
521xfs_add_to_ioend(
522 struct inode *inode,
523 struct buffer_head *bh,
524 xfs_off_t offset,
525 struct xfs_writepage_ctx *wpc,
519 * Test to see if we've been building up a completion structure for
520 * earlier buffers -- if so, we try to append to this ioend if we
521 * can, otherwise we finish off any current ioend and start another.
522 * Return the ioend we finished off so that the caller can submit it
523 * once it has finished processing the dirty page.
524 */
525STATIC void
526xfs_add_to_ioend(
527 struct inode *inode,
528 struct buffer_head *bh,
529 xfs_off_t offset,
530 struct xfs_writepage_ctx *wpc,
531 struct writeback_control *wbc,
526 struct list_head *iolist)
527{
528 if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
529 bh->b_blocknr != wpc->last_block + 1 ||
530 offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
532 struct list_head *iolist)
533{
534 if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
535 bh->b_blocknr != wpc->last_block + 1 ||
536 offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
531 struct xfs_ioend *new;
532
533 if (wpc->ioend)
534 list_add(&wpc->ioend->io_list, iolist);
537 if (wpc->ioend)
538 list_add(&wpc->ioend->io_list, iolist);
535
536 new = xfs_alloc_ioend(inode, wpc->io_type);
537 new->io_offset = offset;
538 new->io_buffer_head = bh;
539 new->io_buffer_tail = bh;
540 wpc->ioend = new;
541 } else {
542 wpc->ioend->io_buffer_tail->b_private = bh;
543 wpc->ioend->io_buffer_tail = bh;
539 wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
544 }
545
540 }
541
546 bh->b_private = NULL;
542 /*
543 * If the buffer doesn't fit into the bio we need to allocate a new
544 * one. This shouldn't happen more than once for a given buffer.
545 */
546 while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
547 xfs_chain_bio(wpc->ioend, wbc, bh);
548
547 wpc->ioend->io_size += bh->b_size;
548 wpc->last_block = bh->b_blocknr;
549 xfs_start_buffer_writeback(bh);
550}
551
552STATIC void
553xfs_map_buffer(
554 struct inode *inode,

--- 243 unchanged lines hidden (view full) ---

798 goto out;
799 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
800 offset);
801 }
802 if (wpc->imap_valid) {
803 lock_buffer(bh);
804 if (wpc->io_type != XFS_IO_OVERWRITE)
805 xfs_map_at_offset(inode, bh, &wpc->imap, offset);
549 wpc->ioend->io_size += bh->b_size;
550 wpc->last_block = bh->b_blocknr;
551 xfs_start_buffer_writeback(bh);
552}
553
554STATIC void
555xfs_map_buffer(
556 struct inode *inode,

--- 243 unchanged lines hidden (view full) ---

800 goto out;
801 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
802 offset);
803 }
804 if (wpc->imap_valid) {
805 lock_buffer(bh);
806 if (wpc->io_type != XFS_IO_OVERWRITE)
807 xfs_map_at_offset(inode, bh, &wpc->imap, offset);
806 xfs_add_to_ioend(inode, bh, offset, wpc, &submit_list);
808 xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
807 count++;
808 }
809
810 } while (offset += len, ((bh = bh->b_this_page) != head));
811
812 if (uptodate && bh == head)
813 SetPageUptodate(page);
814

--- 514 unchanged lines hidden (view full) ---

1329 * xfs_map_direct passes us some flags in the private data to tell us what to
1330 * do. If no flags are set, then the write IO is an overwrite wholly within
1331 * the existing allocated file size and so there is nothing for us to do.
1332 *
1333 * Note that in this case the completion can be called in interrupt context,
1334 * whereas if we have flags set we will always be called in task context
1335 * (i.e. from a workqueue).
1336 */
809 count++;
810 }
811
812 } while (offset += len, ((bh = bh->b_this_page) != head));
813
814 if (uptodate && bh == head)
815 SetPageUptodate(page);
816

--- 514 unchanged lines hidden (view full) ---

1331 * xfs_map_direct passes us some flags in the private data to tell us what to
1332 * do. If no flags are set, then the write IO is an overwrite wholly within
1333 * the existing allocated file size and so there is nothing for us to do.
1334 *
1335 * Note that in this case the completion can be called in interrupt context,
1336 * whereas if we have flags set we will always be called in task context
1337 * (i.e. from a workqueue).
1338 */
1337STATIC int
1339int
1338xfs_end_io_direct_write(
1339 struct kiocb *iocb,
1340 loff_t offset,
1341 ssize_t size,
1342 void *private)
1343{
1344 struct inode *inode = file_inode(iocb->ki_filp);
1345 struct xfs_inode *ip = XFS_I(inode);

--- 40 unchanged lines hidden (view full) ---

1386 trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
1387
1388 error = xfs_iomap_write_unwritten(ip, offset, size);
1389 } else if (flags & XFS_DIO_FLAG_APPEND) {
1390 struct xfs_trans *tp;
1391
1392 trace_xfs_end_io_direct_write_append(ip, offset, size);
1393
1340xfs_end_io_direct_write(
1341 struct kiocb *iocb,
1342 loff_t offset,
1343 ssize_t size,
1344 void *private)
1345{
1346 struct inode *inode = file_inode(iocb->ki_filp);
1347 struct xfs_inode *ip = XFS_I(inode);

--- 40 unchanged lines hidden (view full) ---

1388 trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
1389
1390 error = xfs_iomap_write_unwritten(ip, offset, size);
1391 } else if (flags & XFS_DIO_FLAG_APPEND) {
1392 struct xfs_trans *tp;
1393
1394 trace_xfs_end_io_direct_write_append(ip, offset, size);
1395
1394 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
1395 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
1396 if (error) {
1397 xfs_trans_cancel(tp);
1398 return error;
1399 }
1400 error = xfs_setfilesize(ip, tp, offset, size);
1396 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0,
1397 &tp);
1398 if (!error)
1399 error = xfs_setfilesize(ip, tp, offset, size);
1401 }
1402
1403 return error;
1404}
1405
1406STATIC ssize_t
1407xfs_vm_direct_IO(
1408 struct kiocb *iocb,
1400 }
1401
1402 return error;
1403}
1404
1405STATIC ssize_t
1406xfs_vm_direct_IO(
1407 struct kiocb *iocb,
1409 struct iov_iter *iter,
1410 loff_t offset)
1408 struct iov_iter *iter)
1411{
1409{
1412 struct inode *inode = iocb->ki_filp->f_mapping->host;
1413 dio_iodone_t *endio = NULL;
1414 int flags = 0;
1415 struct block_device *bdev;
1416
1417 if (iov_iter_rw(iter) == WRITE) {
1418 endio = xfs_end_io_direct_write;
1419 flags = DIO_ASYNC_EXTEND;
1420 }
1421
1422 if (IS_DAX(inode)) {
1423 return dax_do_io(iocb, inode, iter, offset,
1424 xfs_get_blocks_direct, endio, 0);
1425 }
1426
1427 bdev = xfs_find_bdev_for_inode(inode);
1428 return __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
1429 xfs_get_blocks_direct, endio, NULL, flags);
1410 /*
1411 * We just need the method present so that open/fcntl allow direct I/O.
1412 */
1413 return -EINVAL;
1430}
1431
1432/*
1433 * Punch out the delalloc blocks we have already allocated.
1434 *
1435 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1436 * as the page is still locked at this point.
1437 */

--- 322 unchanged lines hidden ---
1414}
1415
1416/*
1417 * Punch out the delalloc blocks we have already allocated.
1418 *
1419 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1420 * as the page is still locked at this point.
1421 */

--- 322 unchanged lines hidden ---