Subversion Repositories shark

Rev

Rev 422 | Blame | Compare with Previous | Last modification | View Log | RSS feed

/*
 * include/linux/buffer_head.h
 *
 * Everything to do with buffer_heads.
 */


#ifndef _LINUX_BUFFER_HEAD_H
#define _LINUX_BUFFER_HEAD_H

#include <linux/types.h>
#include <linux/fs.h>
#include <linux/linkage.h>
#include <linux/wait.h>
#include <asm/atomic.h>

enum bh_state_bits {
        BH_Uptodate,    /* Contains valid data */
        BH_Dirty,       /* Is dirty */
        BH_Lock,        /* Is locked */
        BH_Req,         /* Has been submitted for I/O */

        BH_Mapped,      /* Has a disk mapping */
        BH_New,         /* Disk mapping was newly created by get_block */
        BH_Async_Read,  /* Is under end_buffer_async_read I/O */
        BH_Async_Write, /* Is under end_buffer_async_write I/O */
        BH_Delay,       /* Buffer is not yet allocated on disk */
        BH_Boundary,    /* Block is followed by a discontiguity */
        BH_Write_EIO,   /* I/O error on write */

        BH_PrivateStart,/* not a state bit, but the first bit available
                         * for private allocation by other entities
                         */

};

#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)

struct page;
struct buffer_head;
struct address_space;
typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);

/*
 * Keep related fields in common cachelines.  The most commonly accessed
 * field (b_state) goes at the start so the compiler does not generate
 * indexed addressing for it.
 */

struct buffer_head {
        /* First cache line: */
        unsigned long b_state;          /* buffer state bitmap (see above) */
        atomic_t b_count;               /* users using this block */
        struct buffer_head *b_this_page;/* circular list of page's buffers */
        struct page *b_page;            /* the page this bh is mapped to */

        sector_t b_blocknr;             /* block number */
        u32 b_size;                     /* block size */
        char *b_data;                   /* pointer to data block */

        struct block_device *b_bdev;
        bh_end_io_t *b_end_io;          /* I/O completion */
        void *b_private;                /* reserved for b_end_io */
        struct list_head b_assoc_buffers; /* associated with another mapping */
};

/*
 * Debug
 */


void __buffer_error(char *file, int line);
#define buffer_error() __buffer_error(__FILE__, __LINE__)

/*
 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
 * and buffer_foo() functions.
 */

#define BUFFER_FNS(bit, name)                                           \
static inline void set_buffer_##name(struct buffer_head *bh)            \
{                                                                       \
        set_bit(BH_##bit, &(bh)->b_state);                              \
}                                                                       \
static inline void clear_buffer_##name(struct buffer_head *bh)          \
{                                                                       \
        clear_bit(BH_##bit, &(bh)->b_state);                            \
}                                                                       \
static inline int buffer_##name(struct buffer_head *bh)                 \
{                                                                       \
        return test_bit(BH_##bit, &(bh)->b_state);                      \
}


/*
 * test_set_buffer_foo() and test_clear_buffer_foo()
 */

#define TAS_BUFFER_FNS(bit, name)                                       \
static inline int test_set_buffer_##name(struct buffer_head *bh)        \
{                                                                       \
        return test_and_set_bit(BH_##bit, &(bh)->b_state);              \
}                                                                       \
static inline int test_clear_buffer_##name(struct buffer_head *bh)      \
{                                                                       \
        return test_and_clear_bit(BH_##bit, &(bh)->b_state);            \
}                                                                       \

/*
 * Emit the buffer bitops functions.   Note that there are also functions
 * of the form "mark_buffer_foo()".  These are higher-level functions which
 * do something in addition to setting a b_state bit.
 */

BUFFER_FNS(Uptodate, uptodate)
BUFFER_FNS(Dirty, dirty)
TAS_BUFFER_FNS(Dirty, dirty)
BUFFER_FNS(Lock, locked)
TAS_BUFFER_FNS(Lock, locked)
BUFFER_FNS(Req, req)
TAS_BUFFER_FNS(Req, req)
BUFFER_FNS(Mapped, mapped)
BUFFER_FNS(New, new)
BUFFER_FNS(Async_Read, async_read)
BUFFER_FNS(Async_Write, async_write)
BUFFER_FNS(Delay, delay)
BUFFER_FNS(Boundary, boundary)
BUFFER_FNS(Write_EIO,write_io_error)

#define bh_offset(bh)           ((unsigned long)(bh)->b_data & ~PAGE_MASK)
#define touch_buffer(bh)        mark_page_accessed(bh->b_page)

/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page)                                      \
        ({                                                      \
                if (!PagePrivate(page))                         \
                        BUG();                                  \
                ((struct buffer_head *)(page)->private);        \
        })

#define page_has_buffers(page)  PagePrivate(page)

/*
 * Declarations
 */


void FASTCALL(mark_buffer_dirty(struct buffer_head *bh));
void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
void set_bh_page(struct buffer_head *bh,
                struct page *page, unsigned long offset);
int try_to_free_buffers(struct page *);
void create_empty_buffers(struct page *, unsigned long,
                        unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
void end_buffer_async_write(struct buffer_head *bh, int uptodate);

/* Things to do with buffers at mapping->private_list */
void buffer_insert_list(spinlock_t *lock,
                        struct buffer_head *, struct list_head *);
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
int inode_has_buffers(struct inode *);
void invalidate_inode_buffers(struct inode *);
int remove_inode_buffers(struct inode *inode);
int fsync_buffers_list(spinlock_t *lock, struct list_head *);
int sync_mapping_buffers(struct address_space *mapping);
void unmap_underlying_metadata(struct block_device *bdev, sector_t block);

void mark_buffer_async_read(struct buffer_head *bh);
void mark_buffer_async_write(struct buffer_head *bh);
void invalidate_bdev(struct block_device *, int);
int sync_blockdev(struct block_device *bdev);
void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
void wake_up_buffer(struct buffer_head *bh);
int fsync_bdev(struct block_device *);
int fsync_super(struct super_block *);
int fsync_no_super(struct block_device *);
struct buffer_head *__find_get_block(struct block_device *, sector_t, int);
struct buffer_head * __getblk(struct block_device *, sector_t, int);
void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, int size);
struct buffer_head *__bread(struct block_device *, sector_t block, int size);
struct buffer_head *alloc_buffer_head(int gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void FASTCALL(unlock_buffer(struct buffer_head *bh));
void ll_rw_block(int, int, struct buffer_head * bh[]);
void sync_dirty_buffer(struct buffer_head *bh);
int submit_bh(int, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
                        sector_t bblock, unsigned blocksize);

extern int buffer_heads_over_limit;

/*
 * Generic address_space_operations implementations for buffer_head-backed
 * address_spaces.
 */

int try_to_release_page(struct page * page, int gfp_mask);
int block_invalidatepage(struct page *page, unsigned long offset);
int block_write_full_page(struct page *page, get_block_t *get_block,
                                struct writeback_control *wbc);
int block_read_full_page(struct page*, get_block_t*);
int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
                                loff_t *);
int generic_cont_expand(struct inode *inode, loff_t size) ;
int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_sync_page(struct page *);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
int file_fsync(struct file *, struct dentry *, int);
int nobh_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
int nobh_commit_write(struct file *, struct page *, unsigned, unsigned);
int nobh_truncate_page(struct address_space *, loff_t);

#define OSYNC_METADATA  (1<<0)
#define OSYNC_DATA      (1<<1)
#define OSYNC_INODE     (1<<2)
int generic_osync_inode(struct inode *, int);


/*
 * inline definitions
 */


static inline void get_bh(struct buffer_head *bh)
{
        atomic_inc(&bh->b_count);
}

static inline void put_bh(struct buffer_head *bh)
{
        smp_mb__before_atomic_dec();
        atomic_dec(&bh->b_count);
}

static inline void brelse(struct buffer_head *bh)
{
        if (bh)
                __brelse(bh);
}

static inline void bforget(struct buffer_head *bh)
{
        if (bh)
                __bforget(bh);
}

static inline struct buffer_head *
sb_bread(struct super_block *sb, sector_t block)
{
        return __bread(sb->s_bdev, block, sb->s_blocksize);
}

static inline void
sb_breadahead(struct super_block *sb, sector_t block)
{
        __breadahead(sb->s_bdev, block, sb->s_blocksize);
}

static inline struct buffer_head *
sb_getblk(struct super_block *sb, sector_t block)
{
        return __getblk(sb->s_bdev, block, sb->s_blocksize);
}

static inline struct buffer_head *
sb_find_get_block(struct super_block *sb, sector_t block)
{
        return __find_get_block(sb->s_bdev, block, sb->s_blocksize);
}

static inline void
map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block)
{
        set_buffer_mapped(bh);
        bh->b_bdev = sb->s_bdev;
        bh->b_blocknr = block;
}

/*
 * Calling wait_on_buffer() for a zero-ref buffer is illegal, so we call into
 * __wait_on_buffer() just to trip a debug check.  Because debug code in inline
 * functions is bloaty.
 */

static inline void wait_on_buffer(struct buffer_head *bh)
{
        if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0)
                __wait_on_buffer(bh);
}

static inline void lock_buffer(struct buffer_head *bh)
{
        while (test_set_buffer_locked(bh))
                __wait_on_buffer(bh);
}

#endif /* _LINUX_BUFFER_HEAD_H */