Rev 422 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
422 | giacomo | 1 | /* |
2 | * include/linux/buffer_head.h |
||
3 | * |
||
4 | * Everything to do with buffer_heads. |
||
5 | */ |
||
6 | |||
7 | #ifndef _LINUX_BUFFER_HEAD_H |
||
8 | #define _LINUX_BUFFER_HEAD_H |
||
9 | |||
10 | #include <linux/types.h> |
||
11 | #include <linux/fs.h> |
||
12 | #include <linux/linkage.h> |
||
13 | #include <linux/wait.h> |
||
14 | #include <asm/atomic.h> |
||
15 | |||
16 | enum bh_state_bits { |
||
17 | BH_Uptodate, /* Contains valid data */ |
||
18 | BH_Dirty, /* Is dirty */ |
||
19 | BH_Lock, /* Is locked */ |
||
20 | BH_Req, /* Has been submitted for I/O */ |
||
21 | |||
22 | BH_Mapped, /* Has a disk mapping */ |
||
23 | BH_New, /* Disk mapping was newly created by get_block */ |
||
24 | BH_Async_Read, /* Is under end_buffer_async_read I/O */ |
||
25 | BH_Async_Write, /* Is under end_buffer_async_write I/O */ |
||
26 | BH_Delay, /* Buffer is not yet allocated on disk */ |
||
27 | BH_Boundary, /* Block is followed by a discontiguity */ |
||
28 | BH_Write_EIO, /* I/O error on write */ |
||
29 | |||
30 | BH_PrivateStart,/* not a state bit, but the first bit available |
||
31 | * for private allocation by other entities |
||
32 | */ |
||
33 | }; |
||
34 | |||
35 | #define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) |
||
36 | |||
37 | struct page; |
||
38 | struct buffer_head; |
||
39 | struct address_space; |
||
40 | typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); |
||
41 | |||
42 | /* |
||
43 | * Keep related fields in common cachelines. The most commonly accessed |
||
44 | * field (b_state) goes at the start so the compiler does not generate |
||
45 | * indexed addressing for it. |
||
46 | */ |
||
47 | struct buffer_head { |
||
48 | /* First cache line: */ |
||
49 | unsigned long b_state; /* buffer state bitmap (see above) */ |
||
50 | atomic_t b_count; /* users using this block */ |
||
51 | struct buffer_head *b_this_page;/* circular list of page's buffers */ |
||
52 | struct page *b_page; /* the page this bh is mapped to */ |
||
53 | |||
54 | sector_t b_blocknr; /* block number */ |
||
55 | u32 b_size; /* block size */ |
||
56 | char *b_data; /* pointer to data block */ |
||
57 | |||
58 | struct block_device *b_bdev; |
||
59 | bh_end_io_t *b_end_io; /* I/O completion */ |
||
60 | void *b_private; /* reserved for b_end_io */ |
||
61 | struct list_head b_assoc_buffers; /* associated with another mapping */ |
||
62 | }; |
||
63 | |||
64 | /* |
||
65 | * Debug |
||
66 | */ |
||
67 | |||
68 | void __buffer_error(char *file, int line); |
||
69 | #define buffer_error() __buffer_error(__FILE__, __LINE__) |
||
70 | |||
71 | /* |
||
72 | * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() |
||
73 | * and buffer_foo() functions. |
||
74 | */ |
||
75 | #define BUFFER_FNS(bit, name) \ |
||
76 | static inline void set_buffer_##name(struct buffer_head *bh) \ |
||
77 | { \ |
||
78 | set_bit(BH_##bit, &(bh)->b_state); \ |
||
79 | } \ |
||
80 | static inline void clear_buffer_##name(struct buffer_head *bh) \ |
||
81 | { \ |
||
82 | clear_bit(BH_##bit, &(bh)->b_state); \ |
||
83 | } \ |
||
84 | static inline int buffer_##name(struct buffer_head *bh) \ |
||
85 | { \ |
||
86 | return test_bit(BH_##bit, &(bh)->b_state); \ |
||
87 | } |
||
88 | |||
89 | /* |
||
90 | * test_set_buffer_foo() and test_clear_buffer_foo() |
||
91 | */ |
||
92 | #define TAS_BUFFER_FNS(bit, name) \ |
||
93 | static inline int test_set_buffer_##name(struct buffer_head *bh) \ |
||
94 | { \ |
||
95 | return test_and_set_bit(BH_##bit, &(bh)->b_state); \ |
||
96 | } \ |
||
97 | static inline int test_clear_buffer_##name(struct buffer_head *bh) \ |
||
98 | { \ |
||
99 | return test_and_clear_bit(BH_##bit, &(bh)->b_state); \ |
||
100 | } \ |
||
101 | |||
102 | /* |
||
103 | * Emit the buffer bitops functions. Note that there are also functions |
||
104 | * of the form "mark_buffer_foo()". These are higher-level functions which |
||
105 | * do something in addition to setting a b_state bit. |
||
106 | */ |
||
107 | BUFFER_FNS(Uptodate, uptodate) |
||
108 | BUFFER_FNS(Dirty, dirty) |
||
109 | TAS_BUFFER_FNS(Dirty, dirty) |
||
110 | BUFFER_FNS(Lock, locked) |
||
111 | TAS_BUFFER_FNS(Lock, locked) |
||
112 | BUFFER_FNS(Req, req) |
||
113 | TAS_BUFFER_FNS(Req, req) |
||
114 | BUFFER_FNS(Mapped, mapped) |
||
115 | BUFFER_FNS(New, new) |
||
116 | BUFFER_FNS(Async_Read, async_read) |
||
117 | BUFFER_FNS(Async_Write, async_write) |
||
118 | BUFFER_FNS(Delay, delay) |
||
119 | BUFFER_FNS(Boundary, boundary) |
||
120 | BUFFER_FNS(Write_EIO,write_io_error) |
||
121 | |||
122 | #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) |
||
123 | #define touch_buffer(bh) mark_page_accessed(bh->b_page) |
||
124 | |||
125 | /* If we *know* page->private refers to buffer_heads */ |
||
126 | #define page_buffers(page) \ |
||
127 | ({ \ |
||
128 | if (!PagePrivate(page)) \ |
||
129 | BUG(); \ |
||
130 | ((struct buffer_head *)(page)->private); \ |
||
131 | }) |
||
132 | #define page_has_buffers(page) PagePrivate(page) |
||
133 | |||
134 | /* |
||
135 | * Declarations |
||
136 | */ |
||
137 | |||
138 | void FASTCALL(mark_buffer_dirty(struct buffer_head *bh)); |
||
139 | void init_buffer(struct buffer_head *, bh_end_io_t *, void *); |
||
140 | void set_bh_page(struct buffer_head *bh, |
||
141 | struct page *page, unsigned long offset); |
||
142 | int try_to_free_buffers(struct page *); |
||
143 | void create_empty_buffers(struct page *, unsigned long, |
||
144 | unsigned long b_state); |
||
145 | void end_buffer_read_sync(struct buffer_head *bh, int uptodate); |
||
146 | void end_buffer_write_sync(struct buffer_head *bh, int uptodate); |
||
147 | void end_buffer_async_write(struct buffer_head *bh, int uptodate); |
||
148 | |||
149 | /* Things to do with buffers at mapping->private_list */ |
||
150 | void buffer_insert_list(spinlock_t *lock, |
||
151 | struct buffer_head *, struct list_head *); |
||
152 | void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); |
||
153 | int inode_has_buffers(struct inode *); |
||
154 | void invalidate_inode_buffers(struct inode *); |
||
155 | int remove_inode_buffers(struct inode *inode); |
||
156 | int fsync_buffers_list(spinlock_t *lock, struct list_head *); |
||
157 | int sync_mapping_buffers(struct address_space *mapping); |
||
158 | void unmap_underlying_metadata(struct block_device *bdev, sector_t block); |
||
159 | |||
160 | void mark_buffer_async_read(struct buffer_head *bh); |
||
161 | void mark_buffer_async_write(struct buffer_head *bh); |
||
162 | void invalidate_bdev(struct block_device *, int); |
||
163 | int sync_blockdev(struct block_device *bdev); |
||
164 | void __wait_on_buffer(struct buffer_head *); |
||
165 | wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); |
||
166 | void wake_up_buffer(struct buffer_head *bh); |
||
167 | int fsync_bdev(struct block_device *); |
||
168 | int fsync_super(struct super_block *); |
||
169 | int fsync_no_super(struct block_device *); |
||
170 | struct buffer_head *__find_get_block(struct block_device *, sector_t, int); |
||
171 | struct buffer_head * __getblk(struct block_device *, sector_t, int); |
||
172 | void __brelse(struct buffer_head *); |
||
173 | void __bforget(struct buffer_head *); |
||
174 | void __breadahead(struct block_device *, sector_t block, int size); |
||
175 | struct buffer_head *__bread(struct block_device *, sector_t block, int size); |
||
176 | struct buffer_head *alloc_buffer_head(int gfp_flags); |
||
177 | void free_buffer_head(struct buffer_head * bh); |
||
178 | void FASTCALL(unlock_buffer(struct buffer_head *bh)); |
||
179 | void ll_rw_block(int, int, struct buffer_head * bh[]); |
||
180 | void sync_dirty_buffer(struct buffer_head *bh); |
||
181 | int submit_bh(int, struct buffer_head *); |
||
182 | void write_boundary_block(struct block_device *bdev, |
||
183 | sector_t bblock, unsigned blocksize); |
||
184 | |||
185 | extern int buffer_heads_over_limit; |
||
186 | |||
187 | /* |
||
188 | * Generic address_space_operations implementations for buffer_head-backed |
||
189 | * address_spaces. |
||
190 | */ |
||
191 | int try_to_release_page(struct page * page, int gfp_mask); |
||
192 | int block_invalidatepage(struct page *page, unsigned long offset); |
||
193 | int block_write_full_page(struct page *page, get_block_t *get_block, |
||
194 | struct writeback_control *wbc); |
||
195 | int block_read_full_page(struct page*, get_block_t*); |
||
196 | int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*); |
||
197 | int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*, |
||
198 | loff_t *); |
||
199 | int generic_cont_expand(struct inode *inode, loff_t size) ; |
||
200 | int block_commit_write(struct page *page, unsigned from, unsigned to); |
||
201 | int block_sync_page(struct page *); |
||
202 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); |
||
203 | int generic_commit_write(struct file *, struct page *, unsigned, unsigned); |
||
204 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); |
||
205 | int file_fsync(struct file *, struct dentry *, int); |
||
206 | int nobh_prepare_write(struct page*, unsigned, unsigned, get_block_t*); |
||
207 | int nobh_commit_write(struct file *, struct page *, unsigned, unsigned); |
||
208 | int nobh_truncate_page(struct address_space *, loff_t); |
||
209 | |||
210 | #define OSYNC_METADATA (1<<0) |
||
211 | #define OSYNC_DATA (1<<1) |
||
212 | #define OSYNC_INODE (1<<2) |
||
213 | int generic_osync_inode(struct inode *, int); |
||
214 | |||
215 | |||
216 | /* |
||
217 | * inline definitions |
||
218 | */ |
||
219 | |||
220 | static inline void get_bh(struct buffer_head *bh) |
||
221 | { |
||
222 | atomic_inc(&bh->b_count); |
||
223 | } |
||
224 | |||
225 | static inline void put_bh(struct buffer_head *bh) |
||
226 | { |
||
227 | smp_mb__before_atomic_dec(); |
||
228 | atomic_dec(&bh->b_count); |
||
229 | } |
||
230 | |||
231 | static inline void brelse(struct buffer_head *bh) |
||
232 | { |
||
233 | if (bh) |
||
234 | __brelse(bh); |
||
235 | } |
||
236 | |||
237 | static inline void bforget(struct buffer_head *bh) |
||
238 | { |
||
239 | if (bh) |
||
240 | __bforget(bh); |
||
241 | } |
||
242 | |||
243 | static inline struct buffer_head * |
||
244 | sb_bread(struct super_block *sb, sector_t block) |
||
245 | { |
||
246 | return __bread(sb->s_bdev, block, sb->s_blocksize); |
||
247 | } |
||
248 | |||
249 | static inline void |
||
250 | sb_breadahead(struct super_block *sb, sector_t block) |
||
251 | { |
||
252 | __breadahead(sb->s_bdev, block, sb->s_blocksize); |
||
253 | } |
||
254 | |||
255 | static inline struct buffer_head * |
||
256 | sb_getblk(struct super_block *sb, sector_t block) |
||
257 | { |
||
258 | return __getblk(sb->s_bdev, block, sb->s_blocksize); |
||
259 | } |
||
260 | |||
261 | static inline struct buffer_head * |
||
262 | sb_find_get_block(struct super_block *sb, sector_t block) |
||
263 | { |
||
264 | return __find_get_block(sb->s_bdev, block, sb->s_blocksize); |
||
265 | } |
||
266 | |||
267 | static inline void |
||
268 | map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) |
||
269 | { |
||
270 | set_buffer_mapped(bh); |
||
271 | bh->b_bdev = sb->s_bdev; |
||
272 | bh->b_blocknr = block; |
||
273 | } |
||
274 | |||
275 | /* |
||
276 | * Calling wait_on_buffer() for a zero-ref buffer is illegal, so we call into |
||
277 | * __wait_on_buffer() just to trip a debug check. Because debug code in inline |
||
278 | * functions is bloaty. |
||
279 | */ |
||
280 | static inline void wait_on_buffer(struct buffer_head *bh) |
||
281 | { |
||
282 | if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0) |
||
283 | __wait_on_buffer(bh); |
||
284 | } |
||
285 | |||
286 | static inline void lock_buffer(struct buffer_head *bh) |
||
287 | { |
||
288 | while (test_set_buffer_locked(bh)) |
||
289 | __wait_on_buffer(bh); |
||
290 | } |
||
291 | |||
292 | #endif /* _LINUX_BUFFER_HEAD_H */ |