?? xfs_aops.c
字號:
/* * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Further, this software is distributed without any warranty that it is * free of the rightful claim of any third person regarding infringement * or the like. Any license provided herein, whether implied or * otherwise, applies only to this software file. Patent licenses, if * any, provided herein do not apply to combinations of this program with * other software, or any other product whatsoever. * * You should have received a copy of the GNU General Public License along * with this program; if not, write the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy, * Mountain View, CA 94043, or: * * http://www.sgi.com * * For further information regarding this notice, see: * * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/ */#include "xfs.h"#include "xfs_inum.h"#include "xfs_log.h"#include "xfs_sb.h"#include "xfs_dir.h"#include "xfs_dir2.h"#include "xfs_trans.h"#include "xfs_dmapi.h"#include "xfs_mount.h"#include "xfs_bmap_btree.h"#include "xfs_alloc_btree.h"#include "xfs_ialloc_btree.h"#include "xfs_alloc.h"#include "xfs_btree.h"#include "xfs_attr_sf.h"#include "xfs_dir_sf.h"#include "xfs_dir2_sf.h"#include "xfs_dinode.h"#include "xfs_inode.h"#include "xfs_error.h"#include "xfs_rw.h"#include "xfs_iomap.h"#include <linux/mpage.h>#include <linux/writeback.h>STATIC void xfs_count_page_state(struct page *, int *, int *, int *);STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *, struct writeback_control *wbc, void *, int, int);#if defined(XFS_RW_TRACE)voidxfs_page_trace( int tag, struct inode *inode, struct page *page, int mask){ xfs_inode_t *ip; bhv_desc_t *bdp; vnode_t *vp = LINVFS_GET_VP(inode); loff_t isize = i_size_read(inode); loff_t offset = page->index << PAGE_CACHE_SHIFT; int delalloc = -1, unmapped = -1, unwritten = -1; if (page_has_buffers(page)) xfs_count_page_state(page, &delalloc, &unmapped, &unwritten); bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops); ip = XFS_BHVTOI(bdp); if (!ip->i_rwtrace) return; ktrace_enter(ip->i_rwtrace, (void *)((unsigned long)tag), (void *)ip, (void *)inode, (void *)page, (void *)((unsigned long)mask), (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)), (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)), (void *)((unsigned long)((isize >> 32) & 0xffffffff)), (void *)((unsigned long)(isize & 0xffffffff)), (void *)((unsigned long)((offset >> 32) & 0xffffffff)), (void *)((unsigned long)(offset & 0xffffffff)), (void *)((unsigned long)delalloc), (void *)((unsigned long)unmapped), (void *)((unsigned long)unwritten), (void *)NULL, (void *)NULL);}#else#define xfs_page_trace(tag, inode, page, mask)#endifvoidlinvfs_unwritten_done( struct buffer_head *bh, int uptodate){ xfs_buf_t *pb = (xfs_buf_t *)bh->b_private; ASSERT(buffer_unwritten(bh)); bh->b_end_io = NULL; clear_buffer_unwritten(bh); if (!uptodate) pagebuf_ioerror(pb, EIO); if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { pagebuf_iodone(pb, 1, 1); } end_buffer_async_write(bh, uptodate);}/* * Issue transactions to convert a buffer range from unwritten * to written extents (buffered IO). */STATIC voidlinvfs_unwritten_convert( xfs_buf_t *bp){ vnode_t *vp = XFS_BUF_FSPRIVATE(bp, vnode_t *); int error; BUG_ON(atomic_read(&bp->pb_hold) < 1); VOP_BMAP(vp, XFS_BUF_OFFSET(bp), XFS_BUF_SIZE(bp), BMAPI_UNWRITTEN, NULL, NULL, error); XFS_BUF_SET_FSPRIVATE(bp, NULL); XFS_BUF_CLR_IODONE_FUNC(bp); XFS_BUF_UNDATAIO(bp); iput(LINVFS_GET_IP(vp)); pagebuf_iodone(bp, 0, 0);}/* * Issue transactions to convert a buffer range from unwritten * to written extents (direct IO). */STATIC voidlinvfs_unwritten_convert_direct( struct inode *inode, loff_t offset, ssize_t size, void *private){ ASSERT(!private || inode == (struct inode *)private); /* private indicates an unwritten extent lay beneath this IO, * see linvfs_get_block_core. */ if (private && size > 0) { vnode_t *vp = LINVFS_GET_VP(inode); int error; VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error); }}STATIC intxfs_map_blocks( struct inode *inode, loff_t offset, ssize_t count, xfs_iomap_t *mapp, int flags){ vnode_t *vp = LINVFS_GET_VP(inode); int error, nmaps = 1; VOP_BMAP(vp, offset, count, flags, mapp, &nmaps, error); if (!error && (flags & (BMAPI_WRITE|BMAPI_ALLOCATE))) VMODIFY(vp); return -error;}/* * Finds the corresponding mapping in block @map array of the * given @offset within a @page. */STATIC xfs_iomap_t *xfs_offset_to_map( struct page *page, xfs_iomap_t *iomapp, unsigned long offset){ loff_t full_offset; /* offset from start of file */ ASSERT(offset < PAGE_CACHE_SIZE); full_offset = page->index; /* NB: using 64bit number */ full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */ full_offset += offset; /* offset from page start */ if (full_offset < iomapp->iomap_offset) return NULL; if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset) return iomapp; return NULL;}STATIC voidxfs_map_at_offset( struct page *page, struct buffer_head *bh, unsigned long offset, int block_bits, xfs_iomap_t *iomapp){ xfs_daddr_t bn; loff_t delta; int sector_shift; ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE)); ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY)); ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL); delta = page->index; delta <<= PAGE_CACHE_SHIFT; delta += offset; delta -= iomapp->iomap_offset; delta >>= block_bits; sector_shift = block_bits - BBSHIFT; bn = iomapp->iomap_bn >> sector_shift; bn += delta; ASSERT((bn << sector_shift) >= iomapp->iomap_bn); lock_buffer(bh); bh->b_blocknr = bn; bh->b_bdev = iomapp->iomap_target->pbr_bdev; set_buffer_mapped(bh); clear_buffer_delay(bh);}/* * Look for a page at index which is unlocked and contains our * unwritten extent flagged buffers at its head. Returns page * locked and with an extra reference count, and length of the * unwritten extent component on this page that we can write, * in units of filesystem blocks. */STATIC struct page *xfs_probe_unwritten_page( struct address_space *mapping, pgoff_t index, xfs_iomap_t *iomapp, xfs_buf_t *pb, unsigned long max_offset, unsigned long *fsbs, unsigned int bbits){ struct page *page; page = find_trylock_page(mapping, index); if (!page) return 0; if (PageWriteback(page)) goto out; if (page->mapping && page_has_buffers(page)) { struct buffer_head *bh, *head; unsigned long p_offset = 0; *fsbs = 0; bh = head = page_buffers(page); do { if (!buffer_unwritten(bh) || !buffer_uptodate(bh)) break; if (!xfs_offset_to_map(page, iomapp, p_offset)) break; if (p_offset >= max_offset) break; xfs_map_at_offset(page, bh, p_offset, bbits, iomapp); set_buffer_unwritten_io(bh); bh->b_private = pb; p_offset += bh->b_size; (*fsbs)++; } while ((bh = bh->b_this_page) != head); if (p_offset) return page; }out: unlock_page(page); return NULL;}/* * Look for a page at index which is unlocked and not mapped * yet - clustering for mmap write case. */STATIC unsigned intxfs_probe_unmapped_page( struct address_space *mapping, pgoff_t index, unsigned int pg_offset){ struct page *page; int ret = 0; page = find_trylock_page(mapping, index); if (!page) return 0; if (PageWriteback(page)) goto out; if (page->mapping && PageDirty(page)) { if (page_has_buffers(page)) { struct buffer_head *bh, *head; bh = head = page_buffers(page); do { if (buffer_mapped(bh) || !buffer_uptodate(bh)) break; ret += bh->b_size; if (ret >= pg_offset) break; } while ((bh = bh->b_this_page) != head); } else ret = PAGE_CACHE_SIZE; }out: unlock_page(page); return ret;}STATIC unsigned intxfs_probe_unmapped_cluster( struct inode *inode, struct page *startpage, struct buffer_head *bh, struct buffer_head *head){ pgoff_t tindex, tlast, tloff; unsigned int pg_offset, len, total = 0; struct address_space *mapping = inode->i_mapping; /* First sum forwards in this page */ do { if (buffer_mapped(bh)) break; total += bh->b_size; } while ((bh = bh->b_this_page) != head); /* If we reached the end of the page, sum forwards in * following pages. */ if (bh == head) { tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; /* Prune this back to avoid pathological behavior */ tloff = min(tlast, startpage->index + 64); for (tindex = startpage->index + 1; tindex < tloff; tindex++) { len = xfs_probe_unmapped_page(mapping, tindex, PAGE_CACHE_SIZE); if (!len) return total; total += len; } if (tindex == tlast && (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { total += xfs_probe_unmapped_page(mapping, tindex, pg_offset); } } return total;}/* * Probe for a given page (index) in the inode and test if it is delayed * and without unwritten buffers. Returns page locked and with an extra * reference count. */STATIC struct page *xfs_probe_delalloc_page( struct inode *inode, pgoff_t index){ struct page *page; page = find_trylock_page(inode->i_mapping, index); if (!page) return NULL; if (PageWriteback(page)) goto out; if (page->mapping && page_has_buffers(page)) { struct buffer_head *bh, *head; int acceptable = 0; bh = head = page_buffers(page); do { if (buffer_unwritten(bh)) { acceptable = 0; break; } else if (buffer_delay(bh)) { acceptable = 1; } } while ((bh = bh->b_this_page) != head); if (acceptable) return page; }out: unlock_page(page); return NULL;}STATIC intxfs_map_unwritten( struct inode *inode, struct page *start_page, struct buffer_head *head, struct buffer_head *curr, unsigned long p_offset,
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -