Subversion Repositories shark

Rev

Rev 422 | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
422 giacomo 1
/*
2
 * 2.5 block I/O model
3
 *
4
 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License version 2 as
8
 * published by the Free Software Foundation.
9
 *
10
 * This program is distributed in the hope that it will be useful,
11
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
 
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
 * GNU General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU General Public Licens
17
 * along with this program; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
19
 */
20
#ifndef __LINUX_BIO_H
21
#define __LINUX_BIO_H
22
 
23
#include <linux/highmem.h>
24
#include <linux/mempool.h>
25
 
26
/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
27
#include <asm/io.h>
28
#ifndef BIO_VMERGE_BOUNDARY
29
#define BIO_VMERGE_BOUNDARY     0
30
#endif
31
 
32
#define BIO_DEBUG
33
 
34
#ifdef BIO_DEBUG
35
#define BIO_BUG_ON      BUG_ON
36
#else
37
#define BIO_BUG_ON
38
#endif
39
 
40
#define BIO_MAX_PAGES           (256)
41
#define BIO_MAX_SIZE            (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
42
#define BIO_MAX_SECTORS         (BIO_MAX_SIZE >> 9)
43
 
44
/*
45
 * was unsigned short, but we might as well be ready for > 64kB I/O pages
46
 */
47
struct bio_vec {
48
        struct page     *bv_page;
49
        unsigned int    bv_len;
50
        unsigned int    bv_offset;
51
};
52
 
53
struct bio;
54
typedef int (bio_end_io_t) (struct bio *, unsigned int, int);
55
typedef void (bio_destructor_t) (struct bio *);
56
 
57
/*
58
 * main unit of I/O for the block layer and lower layers (ie drivers and
59
 * stacking drivers)
60
 */
61
struct bio {
62
        sector_t                bi_sector;
63
        struct bio              *bi_next;       /* request queue link */
64
        struct block_device     *bi_bdev;
65
        unsigned long           bi_flags;       /* status, command, etc */
66
        unsigned long           bi_rw;          /* bottom bits READ/WRITE,
67
                                                 * top bits priority
68
                                                 */
69
 
70
        unsigned short          bi_vcnt;        /* how many bio_vec's */
71
        unsigned short          bi_idx;         /* current index into bvl_vec */
72
 
73
        /* Number of segments in this BIO after
74
         * physical address coalescing is performed.
75
         */
76
        unsigned short          bi_phys_segments;
77
 
78
        /* Number of segments after physical and DMA remapping
79
         * hardware coalescing is performed.
80
         */
81
        unsigned short          bi_hw_segments;
82
 
83
        unsigned int            bi_size;        /* residual I/O count */
84
        unsigned int            bi_max_vecs;    /* max bvl_vecs we can hold */
85
 
86
        struct bio_vec          *bi_io_vec;     /* the actual vec list */
87
 
88
        bio_end_io_t            *bi_end_io;
89
        atomic_t                bi_cnt;         /* pin count */
90
 
91
        void                    *bi_private;
92
 
93
        bio_destructor_t        *bi_destructor; /* destructor */
94
};
95
 
96
/*
97
 * bio flags
98
 */
99
#define BIO_UPTODATE    0       /* ok after I/O completion */
100
#define BIO_RW_BLOCK    1       /* RW_AHEAD set, and read/write would block */
101
#define BIO_EOF         2       /* out-out-bounds error */
102
#define BIO_SEG_VALID   3       /* nr_hw_seg valid */
103
#define BIO_CLONED      4       /* doesn't own data */
104
#define BIO_BOUNCED     5       /* bio is a bounce bio */
105
#define bio_flagged(bio, flag)  ((bio)->bi_flags & (1 << (flag)))
106
 
107
/*
108
 * top 4 bits of bio flags indicate the pool this bio came from
109
 */
110
#define BIO_POOL_BITS           (4)
111
#define BIO_POOL_OFFSET         (BITS_PER_LONG - BIO_POOL_BITS)
112
#define BIO_POOL_MASK           (1UL << BIO_POOL_OFFSET)
113
#define BIO_POOL_IDX(bio)       ((bio)->bi_flags >> BIO_POOL_OFFSET)    
114
 
115
/*
116
 * bio bi_rw flags
117
 *
118
 * bit 0 -- read (not set) or write (set)
119
 * bit 1 -- rw-ahead when set
120
 * bit 2 -- barrier
121
 * bit 3 -- fail fast, don't want low level driver retries
122
 */
123
#define BIO_RW          0
124
#define BIO_RW_AHEAD    1
125
#define BIO_RW_BARRIER  2
126
#define BIO_RW_FAILFAST 3
127
 
128
/*
129
 * various member access, note that bio_data should of course not be used
130
 * on highmem page vectors
131
 */
132
#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
133
#define bio_iovec(bio)          bio_iovec_idx((bio), (bio)->bi_idx)
134
#define bio_page(bio)           bio_iovec((bio))->bv_page
135
#define bio_offset(bio)         bio_iovec((bio))->bv_offset
136
#define bio_segments(bio)       ((bio)->bi_vcnt - (bio)->bi_idx)
137
#define bio_sectors(bio)        ((bio)->bi_size >> 9)
138
#define bio_cur_sectors(bio)    (bio_iovec(bio)->bv_len >> 9)
139
#define bio_data(bio)           (page_address(bio_page((bio))) + bio_offset((bio)))
140
#define bio_barrier(bio)        ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
141
 
142
/*
143
 * will die
144
 */
145
#define bio_to_phys(bio)        (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
146
#define bvec_to_phys(bv)        (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
147
 
148
/*
149
 * queues that have highmem support enabled may still need to revert to
150
 * PIO transfers occasionally and thus map high pages temporarily. For
151
 * permanent PIO fall back, user is probably better off disabling highmem
152
 * I/O completely on that queue (see ide-dma for example)
153
 */
154
#define __bio_kmap_atomic(bio, idx, kmtype)                             \
155
        (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) +    \
156
                bio_iovec_idx((bio), (idx))->bv_offset)
157
 
158
#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype)
159
 
160
/*
161
 * merge helpers etc
162
 */
163
 
164
#define __BVEC_END(bio)         bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
165
#define __BVEC_START(bio)       bio_iovec_idx((bio), 0)
166
#define BIOVEC_PHYS_MERGEABLE(vec1, vec2)       \
167
        ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
168
#define BIOVEC_VIRT_MERGEABLE(vec1, vec2)       \
169
        ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
170
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
171
        (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
172
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
173
        __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
174
#define BIO_SEG_BOUNDARY(q, b1, b2) \
175
        BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
176
 
177
#define bio_io_error(bio, bytes) bio_endio((bio), (bytes), -EIO)
178
 
179
/*
180
 * drivers should not use the __ version unless they _really_ want to
181
 * run through the entire bio and not just pending pieces
182
 */
183
#define __bio_for_each_segment(bvl, bio, i, start_idx)                  \
184
        for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx);  \
185
             i < (bio)->bi_vcnt;                                        \
186
             bvl++, i++)
187
 
188
#define bio_for_each_segment(bvl, bio, i)                               \
189
        __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
190
 
191
/*
192
 * get a reference to a bio, so it won't disappear. the intended use is
193
 * something like:
194
 *
195
 * bio_get(bio);
196
 * submit_bio(rw, bio);
197
 * if (bio->bi_flags ...)
198
 *      do_something
199
 * bio_put(bio);
200
 *
201
 * without the bio_get(), it could potentially complete I/O before submit_bio
202
 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
203
 * runs
204
 */
205
#define bio_get(bio)    atomic_inc(&(bio)->bi_cnt)
206
 
207
 
208
/*
209
 * A bio_pair is used when we need to split a bio.
210
 * This can only happen for a bio that refers to just one
211
 * page of data, and in the unusual situation when the
212
 * page crosses a chunk/device boundary
213
 *
214
 * The address of the master bio is stored in bio1.bi_private
215
 * The address of the pool the pair was allocated from is stored
216
 *   in bio2.bi_private
217
 */
218
struct bio_pair {
219
        struct bio      bio1, bio2;
220
        struct bio_vec  bv1, bv2;
221
        atomic_t        cnt;
222
        int             error;
223
};
224
extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
225
                                  int first_sectors);
226
extern mempool_t *bio_split_pool;
227
extern void bio_pair_release(struct bio_pair *dbio);
228
 
229
extern struct bio *bio_alloc(int, int);
230
extern void bio_put(struct bio *);
231
 
232
extern void bio_endio(struct bio *, unsigned int, int);
233
struct request_queue;
234
extern inline int bio_phys_segments(struct request_queue *, struct bio *);
235
extern inline int bio_hw_segments(struct request_queue *, struct bio *);
236
 
237
extern inline void __bio_clone(struct bio *, struct bio *);
238
extern struct bio *bio_clone(struct bio *, int);
239
 
240
extern inline void bio_init(struct bio *);
241
 
242
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
243
extern int bio_get_nr_vecs(struct block_device *);
244
extern struct bio *bio_map_user(struct block_device *, unsigned long,
245
                                unsigned int, int);
246
extern void bio_unmap_user(struct bio *, int);
247
extern void bio_set_pages_dirty(struct bio *bio);
248
extern void bio_check_pages_dirty(struct bio *bio);
249
 
250
#ifdef CONFIG_HIGHMEM
251
/*
252
 * remember to add offset! and never ever reenable interrupts between a
253
 * bvec_kmap_irq and bvec_kunmap_irq!!
254
 *
255
 * This function MUST be inlined - it plays with the CPU interrupt flags.
256
 * Hence the `extern inline'.
257
 */
258
extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
259
{
260
        unsigned long addr;
261
 
262
        /*
263
         * might not be a highmem page, but the preempt/irq count
264
         * balancing is a lot nicer this way
265
         */
266
        local_irq_save(*flags);
267
        addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
268
 
269
        if (addr & ~PAGE_MASK)
270
                BUG();
271
 
272
        return (char *) addr + bvec->bv_offset;
273
}
274
 
275
extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
276
{
277
        unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
278
 
279
        kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ);
280
        local_irq_restore(*flags);
281
}
282
 
283
#else
284
#define bvec_kmap_irq(bvec, flags)      (page_address((bvec)->bv_page) + (bvec)->bv_offset)
285
#define bvec_kunmap_irq(buf, flags)     do { *(flags) = 0; } while (0)
286
#endif
287
 
288
extern inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
289
                                   unsigned long *flags)
290
{
291
        return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
292
}
293
#define __bio_kunmap_irq(buf, flags)    bvec_kunmap_irq(buf, flags)
294
 
295
#define bio_kmap_irq(bio, flags) \
296
        __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
297
#define bio_kunmap_irq(buf,flags)       __bio_kunmap_irq(buf, flags)
298
 
299
#endif /* __LINUX_BIO_H */