Rev 1047 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
587 | giacomo | 1 | #include <linuxcomp.h> |
2 | |||
3 | #include <linux/pci.h> |
||
4 | #include <linux/slab.h> |
||
5 | #include <linux/module.h> |
||
6 | |||
7 | /* |
||
8 | * Pool allocator ... wraps the pci_alloc_consistent page allocator, so |
||
9 | * small blocks are easily used by drivers for bus mastering controllers. |
||
10 | * This should probably be sharing the guts of the slab allocator. |
||
11 | */ |
||
12 | |||
13 | struct pci_pool { /* the pool */ |
||
14 | struct list_head page_list; |
||
15 | spinlock_t lock; |
||
16 | size_t blocks_per_page; |
||
17 | size_t size; |
||
18 | struct pci_dev *dev; |
||
19 | size_t allocation; |
||
20 | char name [32]; |
||
21 | wait_queue_head_t waitq; |
||
22 | struct list_head pools; |
||
23 | }; |
||
24 | |||
25 | struct pci_page { /* cacheable header for 'allocation' bytes */ |
||
26 | struct list_head page_list; |
||
27 | void *vaddr; |
||
28 | dma_addr_t dma; |
||
29 | unsigned in_use; |
||
30 | unsigned long bitmap [0]; |
||
31 | }; |
||
32 | |||
33 | #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) |
||
34 | #define POOL_POISON_FREED 0xa7 /* !inuse */ |
||
35 | #define POOL_POISON_ALLOCATED 0xa9 /* !initted */ |
||
36 | |||
37 | static DECLARE_MUTEX (pools_lock); |
||
38 | |||
39 | static ssize_t |
||
40 | show_pools (struct device *dev, char *buf) |
||
41 | { |
||
42 | struct pci_dev *pdev; |
||
43 | unsigned temp, size; |
||
44 | char *next; |
||
45 | struct list_head *i, *j; |
||
46 | |||
47 | pdev = container_of (dev, struct pci_dev, dev); |
||
48 | next = buf; |
||
49 | size = PAGE_SIZE; |
||
50 | |||
51 | temp = snprintf26(next, size, "poolinfo - 0.1\n"); |
||
52 | size -= temp; |
||
53 | next += temp; |
||
54 | |||
55 | down (&pools_lock); |
||
56 | list_for_each (i, &pdev->pools) { |
||
57 | struct pci_pool *pool; |
||
58 | unsigned pages = 0, blocks = 0; |
||
59 | |||
60 | pool = list_entry (i, struct pci_pool, pools); |
||
61 | |||
62 | list_for_each (j, &pool->page_list) { |
||
63 | struct pci_page *page; |
||
64 | |||
65 | page = list_entry (j, struct pci_page, page_list); |
||
66 | pages++; |
||
67 | blocks += page->in_use; |
||
68 | } |
||
69 | |||
70 | /* per-pool info, no real statistics yet */ |
||
71 | temp = snprintf26(next, size, "%-16s %4u %4Zu %4Zu %2u\n", |
||
72 | pool->name, |
||
73 | blocks, (unsigned int)(pages * pool->blocks_per_page), |
||
74 | (unsigned int)(pool->size), pages); |
||
75 | size -= temp; |
||
76 | next += temp; |
||
77 | } |
||
78 | up (&pools_lock); |
||
79 | |||
80 | return PAGE_SIZE - size; |
||
81 | } |
||
82 | static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL); |
||
83 | |||
84 | /** |
||
85 | * pci_pool_create - Creates a pool of pci consistent memory blocks, for dma. |
||
86 | * @name: name of pool, for diagnostics |
||
87 | * @pdev: pci device that will be doing the DMA |
||
88 | * @size: size of the blocks in this pool. |
||
89 | * @align: alignment requirement for blocks; must be a power of two |
||
90 | * @allocation: returned blocks won't cross this boundary (or zero) |
||
91 | * Context: !in_interrupt() |
||
92 | * |
||
93 | * Returns a pci allocation pool with the requested characteristics, or |
||
94 | * null if one can't be created. Given one of these pools, pci_pool_alloc() |
||
95 | * may be used to allocate memory. Such memory will all have "consistent" |
||
96 | * DMA mappings, accessible by the device and its driver without using |
||
97 | * cache flushing primitives. The actual size of blocks allocated may be |
||
98 | * larger than requested because of alignment. |
||
99 | * |
||
100 | * If allocation is nonzero, objects returned from pci_pool_alloc() won't |
||
101 | * cross that size boundary. This is useful for devices which have |
||
102 | * addressing restrictions on individual DMA transfers, such as not crossing |
||
103 | * boundaries of 4KBytes. |
||
104 | */ |
||
105 | struct pci_pool * |
||
106 | pci_pool_create (const char *name, struct pci_dev *pdev, |
||
107 | size_t size, size_t align, size_t allocation) |
||
108 | { |
||
109 | struct pci_pool *retval; |
||
110 | |||
111 | if (align == 0) |
||
112 | align = 1; |
||
113 | if (size == 0) |
||
114 | return 0; |
||
115 | else if (size < align) |
||
116 | size = align; |
||
117 | else if ((size % align) != 0) { |
||
118 | size += align + 1; |
||
119 | size &= ~(align - 1); |
||
120 | } |
||
121 | |||
122 | if (allocation == 0) { |
||
123 | if (PAGE_SIZE < size) |
||
124 | allocation = size; |
||
125 | else |
||
126 | allocation = PAGE_SIZE; |
||
127 | // FIXME: round up for less fragmentation |
||
128 | } else if (allocation < size) |
||
129 | return 0; |
||
130 | |||
131 | if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL))) |
||
132 | return retval; |
||
133 | |||
134 | strlcpy (retval->name, name, sizeof retval->name); |
||
135 | |||
136 | retval->dev = pdev; |
||
137 | |||
138 | INIT_LIST_HEAD (&retval->page_list); |
||
139 | spin_lock_init (&retval->lock); |
||
140 | retval->size = size; |
||
141 | retval->allocation = allocation; |
||
142 | retval->blocks_per_page = allocation / size; |
||
143 | init_waitqueue_head (&retval->waitq); |
||
144 | |||
145 | if (pdev) { |
||
146 | down (&pools_lock); |
||
147 | if (list_empty (&pdev->pools)) |
||
148 | device_create_file (&pdev->dev, &dev_attr_pools); |
||
149 | /* note: not currently insisting "name" be unique */ |
||
150 | list_add (&retval->pools, &pdev->pools); |
||
151 | up (&pools_lock); |
||
152 | } else |
||
153 | INIT_LIST_HEAD (&retval->pools); |
||
154 | |||
155 | return retval; |
||
156 | } |
||
157 | |||
158 | |||
159 | static struct pci_page * |
||
160 | pool_alloc_page (struct pci_pool *pool, int mem_flags) |
||
161 | { |
||
162 | struct pci_page *page; |
||
163 | int mapsize; |
||
164 | |||
165 | mapsize = pool->blocks_per_page; |
||
166 | mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; |
||
167 | mapsize *= sizeof (long); |
||
168 | |||
169 | page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags); |
||
170 | if (!page) |
||
171 | return 0; |
||
172 | page->vaddr = pci_alloc_consistent (pool->dev, |
||
173 | pool->allocation, |
||
174 | &page->dma); |
||
175 | if (page->vaddr) { |
||
176 | memset (page->bitmap, 0xff, mapsize); // bit set == free |
||
177 | #ifdef CONFIG_DEBUG_SLAB |
||
178 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); |
||
179 | #endif |
||
180 | list_add (&page->page_list, &pool->page_list); |
||
181 | page->in_use = 0; |
||
182 | } else { |
||
183 | kfree (page); |
||
184 | page = 0; |
||
185 | } |
||
186 | return page; |
||
187 | } |
||
188 | |||
1047 | mauro | 189 | static struct pci_page * |
190 | pool_alloc_page_usb (struct pci_pool *pool, int mem_flags) |
||
191 | { |
||
192 | struct pci_page *page; |
||
193 | int mapsize; |
||
587 | giacomo | 194 | |
1047 | mauro | 195 | mapsize = pool->blocks_per_page; |
196 | mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; |
||
197 | mapsize *= sizeof (long); |
||
198 | |||
199 | page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags); |
||
200 | if (!page) |
||
201 | return 0; |
||
202 | page->vaddr = pci_alloc_consistent_usb (pool->dev, pool->allocation, &page->dma); |
||
203 | if (page->vaddr) { |
||
204 | memset (page->bitmap, 0xff, mapsize); // bit set == free |
||
205 | #ifdef CONFIG_DEBUG_SLAB |
||
206 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); |
||
207 | #endif |
||
208 | list_add (&page->page_list, &pool->page_list); |
||
209 | page->in_use = 0; |
||
210 | } else { |
||
211 | kfree (page); |
||
212 | page = 0; |
||
213 | } |
||
214 | return page; |
||
215 | } |
||
216 | |||
587 | giacomo | 217 | static inline int |
218 | is_page_busy (int blocks, unsigned long *bitmap) |
||
219 | { |
||
220 | while (blocks > 0) { |
||
221 | if (*bitmap++ != ~0UL) |
||
222 | return 1; |
||
223 | blocks -= BITS_PER_LONG; |
||
224 | } |
||
225 | return 0; |
||
226 | } |
||
227 | |||
228 | static void |
||
229 | pool_free_page (struct pci_pool *pool, struct pci_page *page) |
||
230 | { |
||
231 | dma_addr_t dma = page->dma; |
||
232 | |||
233 | #ifdef CONFIG_DEBUG_SLAB |
||
234 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); |
||
235 | #endif |
||
236 | pci_free_consistent (pool->dev, pool->allocation, page->vaddr, dma); |
||
237 | list_del (&page->page_list); |
||
238 | kfree (page); |
||
239 | } |
||
240 | |||
241 | /** |
||
242 | * pci_pool_destroy - destroys a pool of pci memory blocks. |
||
243 | * @pool: pci pool that will be destroyed |
||
244 | * Context: !in_interrupt() |
||
245 | * |
||
246 | * Caller guarantees that no more memory from the pool is in use, |
||
247 | * and that nothing will try to use the pool after this call. |
||
248 | */ |
||
249 | void |
||
250 | pci_pool_destroy (struct pci_pool *pool) |
||
251 | { |
||
252 | down (&pools_lock); |
||
253 | list_del (&pool->pools); |
||
254 | if (pool->dev && list_empty (&pool->dev->pools)) |
||
255 | device_remove_file (&pool->dev->dev, &dev_attr_pools); |
||
256 | up (&pools_lock); |
||
257 | |||
258 | while (!list_empty (&pool->page_list)) { |
||
259 | struct pci_page *page; |
||
260 | page = list_entry (pool->page_list.next, |
||
261 | struct pci_page, page_list); |
||
262 | if (is_page_busy (pool->blocks_per_page, page->bitmap)) { |
||
263 | printk (KERN_ERR "pci_pool_destroy %s/%s, %p busy\n", |
||
264 | pool->dev ? pci_name(pool->dev) : NULL, |
||
265 | pool->name, page->vaddr); |
||
266 | /* leak the still-in-use consistent memory */ |
||
267 | list_del (&page->page_list); |
||
268 | kfree (page); |
||
269 | } else |
||
270 | pool_free_page (pool, page); |
||
271 | } |
||
272 | |||
273 | kfree (pool); |
||
274 | } |
||
275 | |||
276 | /** |
||
277 | * pci_pool_alloc - get a block of consistent memory |
||
278 | * @pool: pci pool that will produce the block |
||
279 | * @mem_flags: SLAB_KERNEL or SLAB_ATOMIC |
||
280 | * @handle: pointer to dma address of block |
||
281 | * |
||
282 | * This returns the kernel virtual address of a currently unused block, |
||
283 | * and reports its dma address through the handle. |
||
284 | * If such a memory block can't be allocated, null is returned. |
||
285 | */ |
||
286 | void * |
||
287 | pci_pool_alloc (struct pci_pool *pool, int mem_flags, dma_addr_t *handle) |
||
288 | { |
||
289 | unsigned long flags; |
||
290 | struct list_head *entry; |
||
291 | struct pci_page *page; |
||
292 | int map, block; |
||
293 | size_t offset; |
||
294 | void *retval; |
||
295 | |||
296 | restart: |
||
297 | spin_lock_irqsave (&pool->lock, flags); |
||
298 | list_for_each (entry, &pool->page_list) { |
||
299 | int i; |
||
300 | page = list_entry (entry, struct pci_page, page_list); |
||
301 | /* only cachable accesses here ... */ |
||
302 | for (map = 0, i = 0; |
||
303 | i < pool->blocks_per_page; |
||
304 | i += BITS_PER_LONG, map++) { |
||
305 | if (page->bitmap [map] == 0) |
||
306 | continue; |
||
307 | block = ffz (~ page->bitmap [map]); |
||
308 | if ((i + block) < pool->blocks_per_page) { |
||
309 | clear_bit (block, &page->bitmap [map]); |
||
310 | offset = (BITS_PER_LONG * map) + block; |
||
311 | offset *= pool->size; |
||
312 | goto ready; |
||
313 | } |
||
314 | } |
||
315 | } |
||
316 | if (!(page = pool_alloc_page (pool, SLAB_ATOMIC))) { |
||
317 | if (mem_flags == SLAB_KERNEL) { |
||
318 | DECLARE_WAITQUEUE (wait, current); |
||
319 | |||
320 | //current->state = TASK_INTERRUPTIBLE; |
||
321 | add_wait_queue (&pool->waitq, &wait); |
||
322 | spin_unlock_irqrestore (&pool->lock, flags); |
||
323 | |||
324 | schedule_timeout (POOL_TIMEOUT_JIFFIES); |
||
325 | |||
326 | remove_wait_queue (&pool->waitq, &wait); |
||
327 | goto restart; |
||
328 | } |
||
329 | retval = 0; |
||
330 | goto done; |
||
331 | } |
||
332 | |||
333 | clear_bit (0, &page->bitmap [0]); |
||
334 | offset = 0; |
||
335 | ready: |
||
336 | page->in_use++; |
||
337 | retval = offset + page->vaddr; |
||
338 | *handle = offset + page->dma; |
||
339 | #ifdef CONFIG_DEBUG_SLAB |
||
340 | memset (retval, POOL_POISON_ALLOCATED, pool->size); |
||
341 | #endif |
||
342 | done: |
||
343 | spin_unlock_irqrestore (&pool->lock, flags); |
||
344 | return retval; |
||
345 | } |
||
346 | |||
1047 | mauro | 347 | /** |
348 | * pci_pool_alloc_usb - get a block of consistent memory |
||
349 | * @pool: pci pool that will produce the block |
||
350 | * @mem_flags: SLAB_KERNEL or SLAB_ATOMIC |
||
351 | * @handle: pointer to dma address of block |
||
352 | * |
||
353 | * This returns the kernel virtual address of a currently unused block, |
||
354 | * and reports its dma address through the handle. |
||
355 | * If such a memory block can't be allocated, null is returned. |
||
356 | */ |
||
357 | void * |
||
358 | pci_pool_alloc_usb (struct pci_pool *pool, int mem_flags, dma_addr_t *handle) |
||
359 | { |
||
360 | unsigned long flags; |
||
361 | struct list_head *entry; |
||
362 | struct pci_page *page; |
||
363 | int map, block; |
||
364 | size_t offset; |
||
365 | void *retval; |
||
587 | giacomo | 366 | |
1047 | mauro | 367 | restart: |
368 | spin_lock_irqsave (&pool->lock, flags); |
||
369 | list_for_each (entry, &pool->page_list) { |
||
370 | int i; |
||
371 | page = list_entry (entry, struct pci_page, page_list); |
||
372 | /* only cachable accesses here ... */ |
||
373 | for (map = 0, i = 0; |
||
374 | i < pool->blocks_per_page; |
||
375 | i += BITS_PER_LONG, map++) { |
||
376 | if (page->bitmap [map] == 0) |
||
377 | continue; |
||
378 | block = ffz (~ page->bitmap [map]); |
||
379 | if ((i + block) < pool->blocks_per_page) { |
||
380 | clear_bit (block, &page->bitmap [map]); |
||
381 | offset = (BITS_PER_LONG * map) + block; |
||
382 | offset *= pool->size; |
||
383 | goto ready; |
||
384 | } |
||
385 | } |
||
386 | } |
||
387 | if (!(page = pool_alloc_page_usb (pool, SLAB_ATOMIC))) { |
||
388 | if (mem_flags == SLAB_KERNEL) { |
||
389 | DECLARE_WAITQUEUE (wait, current); |
||
390 | |||
391 | //current->state = TASK_INTERRUPTIBLE; |
||
392 | add_wait_queue (&pool->waitq, &wait); |
||
393 | spin_unlock_irqrestore (&pool->lock, flags); |
||
394 | |||
395 | schedule_timeout (POOL_TIMEOUT_JIFFIES); |
||
396 | |||
397 | remove_wait_queue (&pool->waitq, &wait); |
||
398 | goto restart; |
||
399 | } |
||
400 | retval = 0; |
||
401 | goto done; |
||
402 | } |
||
403 | |||
404 | clear_bit (0, &page->bitmap [0]); |
||
405 | offset = 0; |
||
406 | ready: |
||
407 | page->in_use++; |
||
408 | retval = offset + page->vaddr; |
||
409 | *handle = offset + page->dma; |
||
410 | #ifdef CONFIG_DEBUG_SLAB |
||
411 | memset (retval, POOL_POISON_ALLOCATED, pool->size); |
||
412 | #endif |
||
413 | done: |
||
414 | spin_unlock_irqrestore (&pool->lock, flags); |
||
415 | return retval; |
||
416 | } |
||
417 | |||
587 | giacomo | 418 | static struct pci_page * |
419 | pool_find_page (struct pci_pool *pool, dma_addr_t dma) |
||
420 | { |
||
421 | unsigned long flags; |
||
422 | struct list_head *entry; |
||
423 | struct pci_page *page; |
||
424 | |||
425 | spin_lock_irqsave (&pool->lock, flags); |
||
426 | list_for_each (entry, &pool->page_list) { |
||
427 | page = list_entry (entry, struct pci_page, page_list); |
||
428 | if (dma < page->dma) |
||
429 | continue; |
||
430 | if (dma < (page->dma + pool->allocation)) |
||
431 | goto done; |
||
432 | } |
||
433 | page = 0; |
||
434 | done: |
||
435 | spin_unlock_irqrestore (&pool->lock, flags); |
||
436 | return page; |
||
437 | } |
||
438 | |||
439 | /** |
||
440 | * pci_pool_free - put block back into pci pool |
||
441 | * @pool: the pci pool holding the block |
||
442 | * @vaddr: virtual address of block |
||
443 | * @dma: dma address of block |
||
444 | * |
||
445 | * Caller promises neither device nor driver will again touch this block |
||
446 | * unless it is first re-allocated. |
||
447 | */ |
||
448 | void |
||
449 | pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma) |
||
450 | { |
||
451 | struct pci_page *page; |
||
452 | unsigned long flags; |
||
453 | int map, block; |
||
454 | |||
455 | if ((page = pool_find_page (pool, dma)) == 0) { |
||
456 | printk (KERN_ERR "pci_pool_free %s/%s, %p/%lx (bad dma)\n", |
||
457 | pool->dev ? pci_name(pool->dev) : NULL, |
||
458 | pool->name, vaddr, (unsigned long) dma); |
||
459 | return; |
||
460 | } |
||
461 | |||
462 | block = dma - page->dma; |
||
463 | block /= pool->size; |
||
464 | map = block / BITS_PER_LONG; |
||
465 | block %= BITS_PER_LONG; |
||
466 | |||
467 | #ifdef CONFIG_DEBUG_SLAB |
||
468 | if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { |
||
469 | printk (KERN_ERR "pci_pool_free %s/%s, %p (bad vaddr)/%Lx\n", |
||
470 | pool->dev ? pci_name(pool->dev) : NULL, |
||
471 | pool->name, vaddr, (unsigned long long) dma); |
||
472 | return; |
||
473 | } |
||
474 | if (page->bitmap [map] & (1UL << block)) { |
||
475 | printk (KERN_ERR "pci_pool_free %s/%s, dma %Lx already free\n", |
||
476 | pool->dev ? pci_name(pool->dev) : NULL, |
||
477 | pool->name, (unsigned long long)dma); |
||
478 | return; |
||
479 | } |
||
480 | memset (vaddr, POOL_POISON_FREED, pool->size); |
||
481 | #endif |
||
482 | |||
483 | spin_lock_irqsave (&pool->lock, flags); |
||
484 | page->in_use--; |
||
485 | set_bit (block, &page->bitmap [map]); |
||
486 | if (waitqueue_active (&pool->waitq)) |
||
487 | wake_up (&pool->waitq); |
||
488 | /* |
||
489 | * Resist a temptation to do |
||
490 | * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); |
||
491 | * it is not interrupt safe. Better have empty pages hang around. |
||
492 | */ |
||
493 | spin_unlock_irqrestore (&pool->lock, flags); |
||
494 | } |
||
495 | |||
496 | EXPORT_SYMBOL (pci_pool_create); |
||
497 | EXPORT_SYMBOL (pci_pool_destroy); |
||
498 | EXPORT_SYMBOL (pci_pool_alloc); |
||
499 | EXPORT_SYMBOL (pci_pool_free); |