?? xfs_aops.c
字號:
int block_bits, xfs_iomap_t *iomapp, struct writeback_control *wbc, int startio, int all_bh){ struct buffer_head *bh = curr; xfs_iomap_t *tmp; xfs_buf_t *pb; loff_t offset, size; unsigned long nblocks = 0; offset = start_page->index; offset <<= PAGE_CACHE_SHIFT; offset += p_offset; /* get an "empty" pagebuf to manage IO completion * Proper values will be set before returning */ pb = pagebuf_lookup(iomapp->iomap_target, 0, 0, 0); if (!pb) return -EAGAIN; /* Take a reference to the inode to prevent it from * being reclaimed while we have outstanding unwritten * extent IO on it. */ if ((igrab(inode)) != inode) { pagebuf_free(pb); return -EAGAIN; } /* Set the count to 1 initially, this will stop an I/O * completion callout which happens before we have started * all the I/O from calling pagebuf_iodone too early. */ atomic_set(&pb->pb_io_remaining, 1); /* First map forwards in the page consecutive buffers * covering this unwritten extent */ do { if (!buffer_unwritten(bh)) break; tmp = xfs_offset_to_map(start_page, iomapp, p_offset); if (!tmp) break; xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp); set_buffer_unwritten_io(bh); bh->b_private = pb; p_offset += bh->b_size; nblocks++; } while ((bh = bh->b_this_page) != head); atomic_add(nblocks, &pb->pb_io_remaining); /* If we reached the end of the page, map forwards in any * following pages which are also covered by this extent. */ if (bh == head) { struct address_space *mapping = inode->i_mapping; pgoff_t tindex, tloff, tlast; unsigned long bs; unsigned int pg_offset, bbits = inode->i_blkbits; struct page *page; tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT; tloff = min(tlast, tloff); for (tindex = start_page->index + 1; tindex < tloff; tindex++) { page = xfs_probe_unwritten_page(mapping, tindex, iomapp, pb, PAGE_CACHE_SIZE, &bs, bbits); if (!page) break; nblocks += bs; atomic_add(bs, &pb->pb_io_remaining); xfs_convert_page(inode, page, iomapp, wbc, pb, startio, all_bh); /* stop if converting the next page might add * enough blocks that the corresponding byte * count won't fit in our ulong page buf length */ if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits)) goto enough; } if (tindex == tlast && (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) { page = xfs_probe_unwritten_page(mapping, tindex, iomapp, pb, pg_offset, &bs, bbits); if (page) { nblocks += bs; atomic_add(bs, &pb->pb_io_remaining); xfs_convert_page(inode, page, iomapp, wbc, pb, startio, all_bh); if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits)) goto enough; } } }enough: size = nblocks; /* NB: using 64bit number here */ size <<= block_bits; /* convert fsb's to byte range */ XFS_BUF_DATAIO(pb); XFS_BUF_ASYNC(pb); XFS_BUF_SET_SIZE(pb, size); XFS_BUF_SET_COUNT(pb, size); XFS_BUF_SET_OFFSET(pb, offset); XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode)); XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert); if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { pagebuf_iodone(pb, 1, 1); } return 0;}STATIC voidxfs_submit_page( struct page *page, struct buffer_head *bh_arr[], int cnt){ struct buffer_head *bh; int i; BUG_ON(PageWriteback(page)); set_page_writeback(page); clear_page_dirty(page); unlock_page(page); if (cnt) { for (i = 0; i < cnt; i++) { bh = bh_arr[i]; mark_buffer_async_write(bh); if (buffer_unwritten(bh)) set_buffer_unwritten_io(bh); set_buffer_uptodate(bh); clear_buffer_dirty(bh); } for (i = 0; i < cnt; i++) submit_bh(WRITE, bh_arr[i]); } else end_page_writeback(page);}/* * Allocate & map buffers for page given the extent map. Write it out. * except for the original page of a writepage, this is called on * delalloc/unwritten pages only, for the original page it is possible * that the page has no mapping at all. */STATIC voidxfs_convert_page( struct inode *inode, struct page *page, xfs_iomap_t *iomapp, struct writeback_control *wbc, void *private, int startio, int all_bh){ struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head; xfs_iomap_t *mp = iomapp, *tmp; unsigned long end, offset; pgoff_t end_index; int i = 0, index = 0; int bbits = inode->i_blkbits; end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; if (page->index < end_index) { end = PAGE_CACHE_SIZE; } else { end = i_size_read(inode) & (PAGE_CACHE_SIZE-1); } bh = head = page_buffers(page); do { offset = i << bbits; if (!(PageUptodate(page) || buffer_uptodate(bh))) continue; if (buffer_mapped(bh) && all_bh && !buffer_unwritten(bh) && !buffer_delay(bh)) { if (startio && (offset < end)) { lock_buffer(bh); bh_arr[index++] = bh; } continue; } tmp = xfs_offset_to_map(page, mp, offset); if (!tmp) continue; ASSERT(!(tmp->iomap_flags & IOMAP_HOLE)); ASSERT(!(tmp->iomap_flags & IOMAP_DELAY)); /* If this is a new unwritten extent buffer (i.e. one * that we haven't passed in private data for, we must * now map this buffer too. */ if (buffer_unwritten(bh) && !bh->b_end_io) { ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN); xfs_map_unwritten(inode, page, head, bh, offset, bbits, tmp, wbc, startio, all_bh); } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) { xfs_map_at_offset(page, bh, offset, bbits, tmp); if (buffer_unwritten(bh)) { set_buffer_unwritten_io(bh); bh->b_private = private; ASSERT(private); } } if (startio && (offset < end)) { bh_arr[index++] = bh; } else { set_buffer_dirty(bh); unlock_buffer(bh); mark_buffer_dirty(bh); } } while (i++, (bh = bh->b_this_page) != head); if (startio) { wbc->nr_to_write--; xfs_submit_page(page, bh_arr, index); } else { unlock_page(page); }}/* * Convert & write out a cluster of pages in the same extent as defined * by mp and following the start page. */STATIC voidxfs_cluster_write( struct inode *inode, pgoff_t tindex, xfs_iomap_t *iomapp, struct writeback_control *wbc, int startio, int all_bh, pgoff_t tlast){ struct page *page; for (; tindex <= tlast; tindex++) { page = xfs_probe_delalloc_page(inode, tindex); if (!page) break; xfs_convert_page(inode, page, iomapp, wbc, NULL, startio, all_bh); }}/* * Calling this without startio set means we are being asked to make a dirty * page ready for freeing it's buffers. When called with startio set then * we are coming from writepage. * * When called with startio set it is important that we write the WHOLE * page if possible. * The bh->b_state's cannot know if any of the blocks or which block for * that matter are dirty due to mmap writes, and therefore bh uptodate is * only vaild if the page itself isn't completely uptodate. Some layers * may clear the page dirty flag prior to calling write page, under the * assumption the entire page will be written out; by not writing out the * whole page the page can be reused before all valid dirty data is * written out. Note: in the case of a page that has been dirty'd by * mapwrite and but partially setup by block_prepare_write the * bh->b_states's will not agree and only ones setup by BPW/BCW will have * valid state, thus the whole page must be written out thing. */STATIC intxfs_page_state_convert( struct inode *inode, struct page *page, struct writeback_control *wbc, int startio, int unmapped) /* also implies page uptodate */{ struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head; xfs_iomap_t *iomp, iomap; loff_t offset; unsigned long p_offset = 0; __uint64_t end_offset; pgoff_t end_index, last_index, tlast; int len, err, i, cnt = 0, uptodate = 1; int flags = startio ? 0 : BMAPI_TRYLOCK; int page_dirty = 1; int delalloc = 0; /* Are we off the end of the file ? */ offset = i_size_read(inode); end_index = offset >> PAGE_CACHE_SHIFT; last_index = (offset - 1) >> PAGE_CACHE_SHIFT; if (page->index >= end_index) { if ((page->index >= end_index + 1) || !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { err = -EIO; goto error; } } offset = (loff_t)page->index << PAGE_CACHE_SHIFT; end_offset = min_t(unsigned long long, offset + PAGE_CACHE_SIZE, i_size_read(inode)); bh = head = page_buffers(page); iomp = NULL; len = bh->b_size; do { if (offset >= end_offset) break; if (!buffer_uptodate(bh)) uptodate = 0; if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) continue; if (iomp) { iomp = xfs_offset_to_map(page, &iomap, p_offset); } /* * First case, map an unwritten extent and prepare for * extent state conversion transaction on completion. */ if (buffer_unwritten(bh)) { if (!startio) continue; if (!iomp) { err = xfs_map_blocks(inode, offset, len, &iomap, BMAPI_READ|BMAPI_IGNSTATE); if (err) { goto error; } iomp = xfs_offset_to_map(page, &iomap, p_offset); } if (iomp) { if (!bh->b_end_io) { err = xfs_map_unwritten(inode, page, head, bh, p_offset, inode->i_blkbits, iomp, wbc, startio, unmapped); if (err) { goto error; } } else { set_bit(BH_Lock, &bh->b_state); } BUG_ON(!buffer_locked(bh)); bh_arr[cnt++] = bh; page_dirty = 0; } /* * Second case, allocate space for a delalloc buffer. * We can return EAGAIN here in the release page case. */ } else if (buffer_delay(bh)) { if (!iomp) { delalloc = 1; err = xfs_map_blocks(inode, offset, len, &iomap, BMAPI_ALLOCATE | flags); if (err) { goto error; } iomp = xfs_offset_to_map(page, &iomap, p_offset); } if (iomp) { xfs_map_at_offset(page, bh, p_offset, inode->i_blkbits, iomp); if (startio) { bh_arr[cnt++] = bh; } else { set_buffer_dirty(bh); unlock_buffer(bh); mark_buffer_dirty(bh); } page_dirty = 0; } } else if ((buffer_uptodate(bh) || PageUptodate(page)) && (unmapped || startio)) { if (!buffer_mapped(bh)) { int size; /* * Getting here implies an unmapped buffer * was found, and we are in a path where we * need to write the whole page out. */ if (!iomp) { size = xfs_probe_unmapped_cluster( inode, page, bh, head); err = xfs_map_blocks(inode, offset, size, &iomap, BMAPI_WRITE|BMAPI_MMAP); if (err) { goto error; } iomp = xfs_offset_to_map(page, &iomap, p_offset); } if (iomp) { xfs_map_at_offset(page, bh, p_offset, inode->i_blkbits, iomp); if (startio) { bh_arr[cnt++] = bh; } else { set_buffer_dirty(bh); unlock_buffer(bh); mark_buffer_dirty(bh); } page_dirty = 0; } } else if (startio) { if (buffer_uptodate(bh) && !test_and_set_bit(BH_Lock, &bh->b_state)) { bh_arr[cnt++] = bh; page_dirty = 0; }
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -