Rev 422 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
422 | giacomo | 1 | #ifndef _LINUX_PAGEMAP_H |
2 | #define _LINUX_PAGEMAP_H |
||
3 | |||
4 | /* |
||
5 | * Copyright 1995 Linus Torvalds |
||
6 | */ |
||
7 | #include <linux/mm.h> |
||
8 | #include <linux/fs.h> |
||
9 | #include <linux/list.h> |
||
10 | #include <linux/highmem.h> |
||
11 | #include <linux/pagemap.h> |
||
12 | #include <asm/uaccess.h> |
||
13 | #include <linux/gfp.h> |
||
14 | |||
15 | /* |
||
16 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page |
||
17 | * allocation mode flags. |
||
18 | */ |
||
19 | #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ |
||
20 | #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ |
||
21 | |||
22 | static inline int mapping_gfp_mask(struct address_space * mapping) |
||
23 | { |
||
24 | return mapping->flags & __GFP_BITS_MASK; |
||
25 | } |
||
26 | |||
27 | /* |
||
28 | * This is non-atomic. Only to be used before the mapping is activated. |
||
29 | * Probably needs a barrier... |
||
30 | */ |
||
31 | static inline void mapping_set_gfp_mask(struct address_space *m, int mask) |
||
32 | { |
||
33 | m->flags = (m->flags & ~__GFP_BITS_MASK) | mask; |
||
34 | } |
||
35 | |||
36 | /* |
||
37 | * The page cache can done in larger chunks than |
||
38 | * one page, because it allows for more efficient |
||
39 | * throughput (it can then be mapped into user |
||
40 | * space in smaller chunks for same flexibility). |
||
41 | * |
||
42 | * Or rather, it _will_ be done in larger chunks. |
||
43 | */ |
||
44 | #define PAGE_CACHE_SHIFT PAGE_SHIFT |
||
45 | #define PAGE_CACHE_SIZE PAGE_SIZE |
||
46 | #define PAGE_CACHE_MASK PAGE_MASK |
||
47 | #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) |
||
48 | |||
49 | #define page_cache_get(page) get_page(page) |
||
50 | #define page_cache_release(page) put_page(page) |
||
51 | void release_pages(struct page **pages, int nr, int cold); |
||
52 | |||
53 | static inline struct page *page_cache_alloc(struct address_space *x) |
||
54 | { |
||
55 | return alloc_pages(mapping_gfp_mask(x), 0); |
||
56 | } |
||
57 | |||
58 | static inline struct page *page_cache_alloc_cold(struct address_space *x) |
||
59 | { |
||
60 | return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); |
||
61 | } |
||
62 | |||
63 | typedef int filler_t(void *, struct page *); |
||
64 | |||
65 | extern struct page * find_get_page(struct address_space *mapping, |
||
66 | unsigned long index); |
||
67 | extern struct page * find_lock_page(struct address_space *mapping, |
||
68 | unsigned long index); |
||
69 | extern struct page * find_trylock_page(struct address_space *mapping, |
||
70 | unsigned long index); |
||
71 | extern struct page * find_or_create_page(struct address_space *mapping, |
||
72 | unsigned long index, unsigned int gfp_mask); |
||
73 | extern unsigned int find_get_pages(struct address_space *mapping, |
||
74 | pgoff_t start, unsigned int nr_pages, |
||
75 | struct page **pages); |
||
76 | |||
77 | /* |
||
78 | * Returns locked page at given index in given cache, creating it if needed. |
||
79 | */ |
||
80 | static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index) |
||
81 | { |
||
82 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); |
||
83 | } |
||
84 | |||
85 | extern struct page * grab_cache_page_nowait(struct address_space *mapping, |
||
86 | unsigned long index); |
||
87 | extern struct page * read_cache_page(struct address_space *mapping, |
||
88 | unsigned long index, filler_t *filler, |
||
89 | void *data); |
||
90 | extern int read_cache_pages(struct address_space *mapping, |
||
91 | struct list_head *pages, filler_t *filler, void *data); |
||
92 | |||
93 | int add_to_page_cache(struct page *page, struct address_space *mapping, |
||
94 | unsigned long index, int gfp_mask); |
||
95 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
||
96 | unsigned long index, int gfp_mask); |
||
97 | extern void remove_from_page_cache(struct page *page); |
||
98 | extern void __remove_from_page_cache(struct page *page); |
||
99 | |||
100 | extern atomic_t nr_pagecache; |
||
101 | |||
102 | #ifdef CONFIG_SMP |
||
103 | |||
104 | #define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2) |
||
105 | DECLARE_PER_CPU(long, nr_pagecache_local); |
||
106 | |||
107 | /* |
||
108 | * pagecache_acct implements approximate accounting for pagecache. |
||
109 | * vm_enough_memory() do not need high accuracy. Writers will keep |
||
110 | * an offset in their per-cpu arena and will spill that into the |
||
111 | * global count whenever the absolute value of the local count |
||
112 | * exceeds the counter's threshold. |
||
113 | * |
||
114 | * MUST be protected from preemption. |
||
115 | * current protection is mapping->page_lock. |
||
116 | */ |
||
117 | static inline void pagecache_acct(int count) |
||
118 | { |
||
119 | long *local; |
||
120 | |||
121 | local = &__get_cpu_var(nr_pagecache_local); |
||
122 | *local += count; |
||
123 | if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) { |
||
124 | atomic_add(*local, &nr_pagecache); |
||
125 | *local = 0; |
||
126 | } |
||
127 | } |
||
128 | |||
129 | #else |
||
130 | |||
131 | static inline void pagecache_acct(int count) |
||
132 | { |
||
133 | atomic_add(count, &nr_pagecache); |
||
134 | } |
||
135 | #endif |
||
136 | |||
137 | static inline unsigned long get_page_cache_size(void) |
||
138 | { |
||
139 | return atomic_read(&nr_pagecache); |
||
140 | } |
||
141 | |||
142 | static inline void ___add_to_page_cache(struct page *page, |
||
143 | struct address_space *mapping, unsigned long index) |
||
144 | { |
||
145 | list_add(&page->list, &mapping->clean_pages); |
||
146 | page->mapping = mapping; |
||
147 | page->index = index; |
||
148 | |||
149 | mapping->nrpages++; |
||
150 | pagecache_acct(1); |
||
151 | } |
||
152 | |||
153 | extern void FASTCALL(__lock_page(struct page *page)); |
||
154 | extern void FASTCALL(unlock_page(struct page *page)); |
||
155 | |||
156 | static inline void lock_page(struct page *page) |
||
157 | { |
||
158 | if (TestSetPageLocked(page)) |
||
159 | __lock_page(page); |
||
160 | } |
||
161 | |||
162 | /* |
||
163 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. |
||
164 | * Never use this directly! |
||
165 | */ |
||
166 | extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr)); |
||
167 | |||
168 | /* |
||
169 | * Wait for a page to be unlocked. |
||
170 | * |
||
171 | * This must be called with the caller "holding" the page, |
||
172 | * ie with increased "page->count" so that the page won't |
||
173 | * go away during the wait.. |
||
174 | */ |
||
175 | static inline void wait_on_page_locked(struct page *page) |
||
176 | { |
||
177 | if (PageLocked(page)) |
||
178 | wait_on_page_bit(page, PG_locked); |
||
179 | } |
||
180 | |||
181 | /* |
||
182 | * Wait for a page to complete writeback |
||
183 | */ |
||
184 | static inline void wait_on_page_writeback(struct page *page) |
||
185 | { |
||
186 | if (PageWriteback(page)) |
||
187 | wait_on_page_bit(page, PG_writeback); |
||
188 | } |
||
189 | |||
190 | extern void end_page_writeback(struct page *page); |
||
191 | |||
192 | /* |
||
193 | * Fault a userspace page into pagetables. Return non-zero on a fault. |
||
194 | * |
||
195 | * This assumes that two userspace pages are always sufficient. That's |
||
196 | * not true if PAGE_CACHE_SIZE > PAGE_SIZE. |
||
197 | */ |
||
198 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) |
||
199 | { |
||
200 | int ret; |
||
201 | |||
202 | /* |
||
203 | * Writing zeroes into userspace here is OK, because we know that if |
||
204 | * the zero gets there, we'll be overwriting it. |
||
205 | */ |
||
206 | ret = __put_user(0, uaddr); |
||
207 | if (ret == 0) { |
||
208 | char __user *end = uaddr + size - 1; |
||
209 | |||
210 | /* |
||
211 | * If the page was already mapped, this will get a cache miss |
||
212 | * for sure, so try to avoid doing it. |
||
213 | */ |
||
214 | if (((unsigned long)uaddr & PAGE_MASK) != |
||
215 | ((unsigned long)end & PAGE_MASK)) |
||
216 | ret = __put_user(0, end); |
||
217 | } |
||
218 | return ret; |
||
219 | } |
||
220 | |||
221 | static inline void fault_in_pages_readable(const char __user *uaddr, int size) |
||
222 | { |
||
223 | volatile char c; |
||
224 | int ret; |
||
225 | |||
226 | ret = __get_user(c, (char *)uaddr); |
||
227 | if (ret == 0) { |
||
228 | const char __user *end = uaddr + size - 1; |
||
229 | |||
230 | if (((unsigned long)uaddr & PAGE_MASK) != |
||
231 | ((unsigned long)end & PAGE_MASK)) |
||
232 | __get_user(c, (char *)end); |
||
233 | } |
||
234 | } |
||
235 | |||
236 | #endif /* _LINUX_PAGEMAP_H */ |