Rev 456 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
430 | giacomo | 1 | #include <linuxcomp.h> |
2 | |||
428 | giacomo | 3 | #include <linux/pci.h> |
4 | #include <linux/slab.h> |
||
5 | #include <linux/module.h> |
||
6 | |||
7 | /* |
||
8 | * Pool allocator ... wraps the pci_alloc_consistent page allocator, so |
||
9 | * small blocks are easily used by drivers for bus mastering controllers. |
||
10 | * This should probably be sharing the guts of the slab allocator. |
||
11 | */ |
||
12 | |||
13 | struct pci_pool { /* the pool */ |
||
14 | struct list_head page_list; |
||
15 | spinlock_t lock; |
||
16 | size_t blocks_per_page; |
||
17 | size_t size; |
||
18 | struct pci_dev *dev; |
||
19 | size_t allocation; |
||
20 | char name [32]; |
||
21 | wait_queue_head_t waitq; |
||
22 | struct list_head pools; |
||
23 | }; |
||
24 | |||
25 | struct pci_page { /* cacheable header for 'allocation' bytes */ |
||
26 | struct list_head page_list; |
||
27 | void *vaddr; |
||
28 | dma_addr_t dma; |
||
29 | unsigned in_use; |
||
30 | unsigned long bitmap [0]; |
||
31 | }; |
||
32 | |||
33 | #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) |
||
34 | #define POOL_POISON_FREED 0xa7 /* !inuse */ |
||
35 | #define POOL_POISON_ALLOCATED 0xa9 /* !initted */ |
||
36 | |||
37 | static DECLARE_MUTEX (pools_lock); |
||
38 | |||
39 | static ssize_t |
||
40 | show_pools (struct device *dev, char *buf) |
||
41 | { |
||
42 | struct pci_dev *pdev; |
||
43 | unsigned temp, size; |
||
44 | char *next; |
||
45 | struct list_head *i, *j; |
||
46 | |||
47 | pdev = container_of (dev, struct pci_dev, dev); |
||
48 | next = buf; |
||
49 | size = PAGE_SIZE; |
||
50 | |||
456 | giacomo | 51 | temp = snprintf26(next, size, "poolinfo - 0.1\n"); |
428 | giacomo | 52 | size -= temp; |
53 | next += temp; |
||
54 | |||
55 | down (&pools_lock); |
||
56 | list_for_each (i, &pdev->pools) { |
||
57 | struct pci_pool *pool; |
||
58 | unsigned pages = 0, blocks = 0; |
||
59 | |||
60 | pool = list_entry (i, struct pci_pool, pools); |
||
61 | |||
62 | list_for_each (j, &pool->page_list) { |
||
63 | struct pci_page *page; |
||
64 | |||
65 | page = list_entry (j, struct pci_page, page_list); |
||
66 | pages++; |
||
67 | blocks += page->in_use; |
||
68 | } |
||
69 | |||
70 | /* per-pool info, no real statistics yet */ |
||
456 | giacomo | 71 | temp = snprintf26(next, size, "%-16s %4u %4Zu %4Zu %2u\n", |
428 | giacomo | 72 | pool->name, |
430 | giacomo | 73 | blocks, (unsigned int)(pages * pool->blocks_per_page), |
74 | (unsigned int)(pool->size), pages); |
||
428 | giacomo | 75 | size -= temp; |
76 | next += temp; |
||
77 | } |
||
78 | up (&pools_lock); |
||
79 | |||
80 | return PAGE_SIZE - size; |
||
81 | } |
||
82 | static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL); |
||
83 | |||
84 | /** |
||
85 | * pci_pool_create - Creates a pool of pci consistent memory blocks, for dma. |
||
86 | * @name: name of pool, for diagnostics |
||
87 | * @pdev: pci device that will be doing the DMA |
||
88 | * @size: size of the blocks in this pool. |
||
89 | * @align: alignment requirement for blocks; must be a power of two |
||
90 | * @allocation: returned blocks won't cross this boundary (or zero) |
||
91 | * Context: !in_interrupt() |
||
92 | * |
||
93 | * Returns a pci allocation pool with the requested characteristics, or |
||
94 | * null if one can't be created. Given one of these pools, pci_pool_alloc() |
||
95 | * may be used to allocate memory. Such memory will all have "consistent" |
||
96 | * DMA mappings, accessible by the device and its driver without using |
||
97 | * cache flushing primitives. The actual size of blocks allocated may be |
||
98 | * larger than requested because of alignment. |
||
99 | * |
||
100 | * If allocation is nonzero, objects returned from pci_pool_alloc() won't |
||
101 | * cross that size boundary. This is useful for devices which have |
||
102 | * addressing restrictions on individual DMA transfers, such as not crossing |
||
103 | * boundaries of 4KBytes. |
||
104 | */ |
||
105 | struct pci_pool * |
||
106 | pci_pool_create (const char *name, struct pci_dev *pdev, |
||
107 | size_t size, size_t align, size_t allocation) |
||
108 | { |
||
109 | struct pci_pool *retval; |
||
110 | |||
111 | if (align == 0) |
||
112 | align = 1; |
||
113 | if (size == 0) |
||
114 | return 0; |
||
115 | else if (size < align) |
||
116 | size = align; |
||
117 | else if ((size % align) != 0) { |
||
118 | size += align + 1; |
||
119 | size &= ~(align - 1); |
||
120 | } |
||
121 | |||
122 | if (allocation == 0) { |
||
123 | if (PAGE_SIZE < size) |
||
124 | allocation = size; |
||
125 | else |
||
126 | allocation = PAGE_SIZE; |
||
127 | // FIXME: round up for less fragmentation |
||
128 | } else if (allocation < size) |
||
129 | return 0; |
||
130 | |||
131 | if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL))) |
||
132 | return retval; |
||
133 | |||
134 | strlcpy (retval->name, name, sizeof retval->name); |
||
135 | |||
136 | retval->dev = pdev; |
||
137 | |||
138 | INIT_LIST_HEAD (&retval->page_list); |
||
139 | spin_lock_init (&retval->lock); |
||
140 | retval->size = size; |
||
141 | retval->allocation = allocation; |
||
142 | retval->blocks_per_page = allocation / size; |
||
143 | init_waitqueue_head (&retval->waitq); |
||
144 | |||
145 | if (pdev) { |
||
146 | down (&pools_lock); |
||
147 | if (list_empty (&pdev->pools)) |
||
148 | device_create_file (&pdev->dev, &dev_attr_pools); |
||
149 | /* note: not currently insisting "name" be unique */ |
||
150 | list_add (&retval->pools, &pdev->pools); |
||
151 | up (&pools_lock); |
||
152 | } else |
||
153 | INIT_LIST_HEAD (&retval->pools); |
||
154 | |||
155 | return retval; |
||
156 | } |
||
157 | |||
158 | |||
159 | static struct pci_page * |
||
160 | pool_alloc_page (struct pci_pool *pool, int mem_flags) |
||
161 | { |
||
162 | struct pci_page *page; |
||
163 | int mapsize; |
||
164 | |||
165 | mapsize = pool->blocks_per_page; |
||
166 | mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; |
||
167 | mapsize *= sizeof (long); |
||
168 | |||
169 | page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags); |
||
170 | if (!page) |
||
171 | return 0; |
||
172 | page->vaddr = pci_alloc_consistent (pool->dev, |
||
173 | pool->allocation, |
||
174 | &page->dma); |
||
175 | if (page->vaddr) { |
||
176 | memset (page->bitmap, 0xff, mapsize); // bit set == free |
||
177 | #ifdef CONFIG_DEBUG_SLAB |
||
178 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); |
||
179 | #endif |
||
180 | list_add (&page->page_list, &pool->page_list); |
||
181 | page->in_use = 0; |
||
182 | } else { |
||
183 | kfree (page); |
||
184 | page = 0; |
||
185 | } |
||
186 | return page; |
||
187 | } |
||
188 | |||
189 | |||
190 | static inline int |
||
191 | is_page_busy (int blocks, unsigned long *bitmap) |
||
192 | { |
||
193 | while (blocks > 0) { |
||
194 | if (*bitmap++ != ~0UL) |
||
195 | return 1; |
||
196 | blocks -= BITS_PER_LONG; |
||
197 | } |
||
198 | return 0; |
||
199 | } |
||
200 | |||
201 | static void |
||
202 | pool_free_page (struct pci_pool *pool, struct pci_page *page) |
||
203 | { |
||
204 | dma_addr_t dma = page->dma; |
||
205 | |||
206 | #ifdef CONFIG_DEBUG_SLAB |
||
207 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); |
||
208 | #endif |
||
209 | pci_free_consistent (pool->dev, pool->allocation, page->vaddr, dma); |
||
210 | list_del (&page->page_list); |
||
211 | kfree (page); |
||
212 | } |
||
213 | |||
214 | |||
215 | /** |
||
216 | * pci_pool_destroy - destroys a pool of pci memory blocks. |
||
217 | * @pool: pci pool that will be destroyed |
||
218 | * Context: !in_interrupt() |
||
219 | * |
||
220 | * Caller guarantees that no more memory from the pool is in use, |
||
221 | * and that nothing will try to use the pool after this call. |
||
222 | */ |
||
223 | void |
||
224 | pci_pool_destroy (struct pci_pool *pool) |
||
225 | { |
||
226 | down (&pools_lock); |
||
227 | list_del (&pool->pools); |
||
228 | if (pool->dev && list_empty (&pool->dev->pools)) |
||
229 | device_remove_file (&pool->dev->dev, &dev_attr_pools); |
||
230 | up (&pools_lock); |
||
231 | |||
232 | while (!list_empty (&pool->page_list)) { |
||
233 | struct pci_page *page; |
||
234 | page = list_entry (pool->page_list.next, |
||
235 | struct pci_page, page_list); |
||
236 | if (is_page_busy (pool->blocks_per_page, page->bitmap)) { |
||
237 | printk (KERN_ERR "pci_pool_destroy %s/%s, %p busy\n", |
||
238 | pool->dev ? pci_name(pool->dev) : NULL, |
||
239 | pool->name, page->vaddr); |
||
240 | /* leak the still-in-use consistent memory */ |
||
241 | list_del (&page->page_list); |
||
242 | kfree (page); |
||
243 | } else |
||
244 | pool_free_page (pool, page); |
||
245 | } |
||
246 | |||
247 | kfree (pool); |
||
248 | } |
||
249 | |||
250 | |||
251 | /** |
||
252 | * pci_pool_alloc - get a block of consistent memory |
||
253 | * @pool: pci pool that will produce the block |
||
254 | * @mem_flags: SLAB_KERNEL or SLAB_ATOMIC |
||
255 | * @handle: pointer to dma address of block |
||
256 | * |
||
257 | * This returns the kernel virtual address of a currently unused block, |
||
258 | * and reports its dma address through the handle. |
||
259 | * If such a memory block can't be allocated, null is returned. |
||
260 | */ |
||
261 | void * |
||
262 | pci_pool_alloc (struct pci_pool *pool, int mem_flags, dma_addr_t *handle) |
||
263 | { |
||
264 | unsigned long flags; |
||
265 | struct list_head *entry; |
||
266 | struct pci_page *page; |
||
267 | int map, block; |
||
268 | size_t offset; |
||
269 | void *retval; |
||
270 | |||
271 | restart: |
||
272 | spin_lock_irqsave (&pool->lock, flags); |
||
273 | list_for_each (entry, &pool->page_list) { |
||
274 | int i; |
||
275 | page = list_entry (entry, struct pci_page, page_list); |
||
276 | /* only cachable accesses here ... */ |
||
277 | for (map = 0, i = 0; |
||
278 | i < pool->blocks_per_page; |
||
279 | i += BITS_PER_LONG, map++) { |
||
280 | if (page->bitmap [map] == 0) |
||
281 | continue; |
||
282 | block = ffz (~ page->bitmap [map]); |
||
283 | if ((i + block) < pool->blocks_per_page) { |
||
284 | clear_bit (block, &page->bitmap [map]); |
||
285 | offset = (BITS_PER_LONG * map) + block; |
||
286 | offset *= pool->size; |
||
287 | goto ready; |
||
288 | } |
||
289 | } |
||
290 | } |
||
291 | if (!(page = pool_alloc_page (pool, SLAB_ATOMIC))) { |
||
292 | if (mem_flags == SLAB_KERNEL) { |
||
293 | DECLARE_WAITQUEUE (wait, current); |
||
294 | |||
537 | giacomo | 295 | //current->state = TASK_INTERRUPTIBLE; |
428 | giacomo | 296 | add_wait_queue (&pool->waitq, &wait); |
297 | spin_unlock_irqrestore (&pool->lock, flags); |
||
298 | |||
299 | schedule_timeout (POOL_TIMEOUT_JIFFIES); |
||
300 | |||
301 | remove_wait_queue (&pool->waitq, &wait); |
||
302 | goto restart; |
||
303 | } |
||
304 | retval = 0; |
||
305 | goto done; |
||
306 | } |
||
307 | |||
308 | clear_bit (0, &page->bitmap [0]); |
||
309 | offset = 0; |
||
310 | ready: |
||
311 | page->in_use++; |
||
312 | retval = offset + page->vaddr; |
||
313 | *handle = offset + page->dma; |
||
314 | #ifdef CONFIG_DEBUG_SLAB |
||
315 | memset (retval, POOL_POISON_ALLOCATED, pool->size); |
||
316 | #endif |
||
317 | done: |
||
318 | spin_unlock_irqrestore (&pool->lock, flags); |
||
319 | return retval; |
||
320 | } |
||
321 | |||
322 | |||
323 | static struct pci_page * |
||
324 | pool_find_page (struct pci_pool *pool, dma_addr_t dma) |
||
325 | { |
||
326 | unsigned long flags; |
||
327 | struct list_head *entry; |
||
328 | struct pci_page *page; |
||
329 | |||
330 | spin_lock_irqsave (&pool->lock, flags); |
||
331 | list_for_each (entry, &pool->page_list) { |
||
332 | page = list_entry (entry, struct pci_page, page_list); |
||
333 | if (dma < page->dma) |
||
334 | continue; |
||
335 | if (dma < (page->dma + pool->allocation)) |
||
336 | goto done; |
||
337 | } |
||
338 | page = 0; |
||
339 | done: |
||
340 | spin_unlock_irqrestore (&pool->lock, flags); |
||
341 | return page; |
||
342 | } |
||
343 | |||
344 | |||
345 | /** |
||
346 | * pci_pool_free - put block back into pci pool |
||
347 | * @pool: the pci pool holding the block |
||
348 | * @vaddr: virtual address of block |
||
349 | * @dma: dma address of block |
||
350 | * |
||
351 | * Caller promises neither device nor driver will again touch this block |
||
352 | * unless it is first re-allocated. |
||
353 | */ |
||
354 | void |
||
355 | pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma) |
||
356 | { |
||
357 | struct pci_page *page; |
||
358 | unsigned long flags; |
||
359 | int map, block; |
||
360 | |||
361 | if ((page = pool_find_page (pool, dma)) == 0) { |
||
362 | printk (KERN_ERR "pci_pool_free %s/%s, %p/%lx (bad dma)\n", |
||
363 | pool->dev ? pci_name(pool->dev) : NULL, |
||
364 | pool->name, vaddr, (unsigned long) dma); |
||
365 | return; |
||
366 | } |
||
367 | |||
368 | block = dma - page->dma; |
||
369 | block /= pool->size; |
||
370 | map = block / BITS_PER_LONG; |
||
371 | block %= BITS_PER_LONG; |
||
372 | |||
373 | #ifdef CONFIG_DEBUG_SLAB |
||
374 | if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { |
||
375 | printk (KERN_ERR "pci_pool_free %s/%s, %p (bad vaddr)/%Lx\n", |
||
376 | pool->dev ? pci_name(pool->dev) : NULL, |
||
377 | pool->name, vaddr, (unsigned long long) dma); |
||
378 | return; |
||
379 | } |
||
380 | if (page->bitmap [map] & (1UL << block)) { |
||
381 | printk (KERN_ERR "pci_pool_free %s/%s, dma %Lx already free\n", |
||
382 | pool->dev ? pci_name(pool->dev) : NULL, |
||
383 | pool->name, (unsigned long long)dma); |
||
384 | return; |
||
385 | } |
||
386 | memset (vaddr, POOL_POISON_FREED, pool->size); |
||
387 | #endif |
||
388 | |||
389 | spin_lock_irqsave (&pool->lock, flags); |
||
390 | page->in_use--; |
||
391 | set_bit (block, &page->bitmap [map]); |
||
392 | if (waitqueue_active (&pool->waitq)) |
||
393 | wake_up (&pool->waitq); |
||
394 | /* |
||
395 | * Resist a temptation to do |
||
396 | * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); |
||
397 | * it is not interrupt safe. Better have empty pages hang around. |
||
398 | */ |
||
399 | spin_unlock_irqrestore (&pool->lock, flags); |
||
400 | } |
||
401 | |||
402 | |||
403 | EXPORT_SYMBOL (pci_pool_create); |
||
404 | EXPORT_SYMBOL (pci_pool_destroy); |
||
405 | EXPORT_SYMBOL (pci_pool_alloc); |
||
406 | EXPORT_SYMBOL (pci_pool_free); |