Subversion Repositories shark

Rev

Rev 430 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
428 giacomo 1
#include <linux/pci.h>
2
#include <linux/slab.h>
3
#include <linux/module.h>
4
 
5
/*
6
 * Pool allocator ... wraps the pci_alloc_consistent page allocator, so
7
 * small blocks are easily used by drivers for bus mastering controllers.
8
 * This should probably be sharing the guts of the slab allocator.
9
 */
10
 
11
struct pci_pool {       /* the pool */
12
        struct list_head        page_list;
13
        spinlock_t              lock;
14
        size_t                  blocks_per_page;
15
        size_t                  size;
16
        struct pci_dev          *dev;
17
        size_t                  allocation;
18
        char                    name [32];
19
        wait_queue_head_t       waitq;
20
        struct list_head        pools;
21
};
22
 
23
struct pci_page {       /* cacheable header for 'allocation' bytes */
24
        struct list_head        page_list;
25
        void                    *vaddr;
26
        dma_addr_t              dma;
27
        unsigned                in_use;
28
        unsigned long           bitmap [0];
29
};
30
 
31
#define POOL_TIMEOUT_JIFFIES    ((100 /* msec */ * HZ) / 1000)
32
#define POOL_POISON_FREED       0xa7    /* !inuse */
33
#define POOL_POISON_ALLOCATED   0xa9    /* !initted */
34
 
35
static DECLARE_MUTEX (pools_lock);
36
 
37
static ssize_t
38
show_pools (struct device *dev, char *buf)
39
{
40
        struct pci_dev          *pdev;
41
        unsigned                temp, size;
42
        char                    *next;
43
        struct list_head        *i, *j;
44
 
45
        pdev = container_of (dev, struct pci_dev, dev);
46
        next = buf;
47
        size = PAGE_SIZE;
48
 
49
        temp = snprintf (next, size, "poolinfo - 0.1\n");
50
        size -= temp;
51
        next += temp;
52
 
53
        down (&pools_lock);
54
        list_for_each (i, &pdev->pools) {
55
                struct pci_pool *pool;
56
                unsigned        pages = 0, blocks = 0;
57
 
58
                pool = list_entry (i, struct pci_pool, pools);
59
 
60
                list_for_each (j, &pool->page_list) {
61
                        struct pci_page *page;
62
 
63
                        page = list_entry (j, struct pci_page, page_list);
64
                        pages++;
65
                        blocks += page->in_use;
66
                }
67
 
68
                /* per-pool info, no real statistics yet */
69
                temp = snprintf (next, size, "%-16s %4u %4Zu %4Zu %2u\n",
70
                                pool->name,
71
                                blocks, pages * pool->blocks_per_page,
72
                                pool->size, pages);
73
                size -= temp;
74
                next += temp;
75
        }
76
        up (&pools_lock);
77
 
78
        return PAGE_SIZE - size;
79
}
80
static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL);
81
 
82
/**
83
 * pci_pool_create - Creates a pool of pci consistent memory blocks, for dma.
84
 * @name: name of pool, for diagnostics
85
 * @pdev: pci device that will be doing the DMA
86
 * @size: size of the blocks in this pool.
87
 * @align: alignment requirement for blocks; must be a power of two
88
 * @allocation: returned blocks won't cross this boundary (or zero)
89
 * Context: !in_interrupt()
90
 *
91
 * Returns a pci allocation pool with the requested characteristics, or
92
 * null if one can't be created.  Given one of these pools, pci_pool_alloc()
93
 * may be used to allocate memory.  Such memory will all have "consistent"
94
 * DMA mappings, accessible by the device and its driver without using
95
 * cache flushing primitives.  The actual size of blocks allocated may be
96
 * larger than requested because of alignment.
97
 *
98
 * If allocation is nonzero, objects returned from pci_pool_alloc() won't
99
 * cross that size boundary.  This is useful for devices which have
100
 * addressing restrictions on individual DMA transfers, such as not crossing
101
 * boundaries of 4KBytes.
102
 */
103
struct pci_pool *
104
pci_pool_create (const char *name, struct pci_dev *pdev,
105
        size_t size, size_t align, size_t allocation)
106
{
107
        struct pci_pool         *retval;
108
 
109
        if (align == 0)
110
                align = 1;
111
        if (size == 0)
112
                return 0;
113
        else if (size < align)
114
                size = align;
115
        else if ((size % align) != 0) {
116
                size += align + 1;
117
                size &= ~(align - 1);
118
        }
119
 
120
        if (allocation == 0) {
121
                if (PAGE_SIZE < size)
122
                        allocation = size;
123
                else
124
                        allocation = PAGE_SIZE;
125
                // FIXME: round up for less fragmentation
126
        } else if (allocation < size)
127
                return 0;
128
 
129
        if (!(retval = kmalloc (sizeof *retval, SLAB_KERNEL)))
130
                return retval;
131
 
132
        strlcpy (retval->name, name, sizeof retval->name);
133
 
134
        retval->dev = pdev;
135
 
136
        INIT_LIST_HEAD (&retval->page_list);
137
        spin_lock_init (&retval->lock);
138
        retval->size = size;
139
        retval->allocation = allocation;
140
        retval->blocks_per_page = allocation / size;
141
        init_waitqueue_head (&retval->waitq);
142
 
143
        if (pdev) {
144
                down (&pools_lock);
145
                if (list_empty (&pdev->pools))
146
                        device_create_file (&pdev->dev, &dev_attr_pools);
147
                /* note:  not currently insisting "name" be unique */
148
                list_add (&retval->pools, &pdev->pools);
149
                up (&pools_lock);
150
        } else
151
                INIT_LIST_HEAD (&retval->pools);
152
 
153
        return retval;
154
}
155
 
156
 
157
static struct pci_page *
158
pool_alloc_page (struct pci_pool *pool, int mem_flags)
159
{
160
        struct pci_page *page;
161
        int             mapsize;
162
 
163
        mapsize = pool->blocks_per_page;
164
        mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
165
        mapsize *= sizeof (long);
166
 
167
        page = (struct pci_page *) kmalloc (mapsize + sizeof *page, mem_flags);
168
        if (!page)
169
                return 0;
170
        page->vaddr = pci_alloc_consistent (pool->dev,
171
                                            pool->allocation,
172
                                            &page->dma);
173
        if (page->vaddr) {
174
                memset (page->bitmap, 0xff, mapsize);   // bit set == free
175
#ifdef  CONFIG_DEBUG_SLAB
176
                memset (page->vaddr, POOL_POISON_FREED, pool->allocation);
177
#endif
178
                list_add (&page->page_list, &pool->page_list);
179
                page->in_use = 0;
180
        } else {
181
                kfree (page);
182
                page = 0;
183
        }
184
        return page;
185
}
186
 
187
 
188
static inline int
189
is_page_busy (int blocks, unsigned long *bitmap)
190
{
191
        while (blocks > 0) {
192
                if (*bitmap++ != ~0UL)
193
                        return 1;
194
                blocks -= BITS_PER_LONG;
195
        }
196
        return 0;
197
}
198
 
199
static void
200
pool_free_page (struct pci_pool *pool, struct pci_page *page)
201
{
202
        dma_addr_t      dma = page->dma;
203
 
204
#ifdef  CONFIG_DEBUG_SLAB
205
        memset (page->vaddr, POOL_POISON_FREED, pool->allocation);
206
#endif
207
        pci_free_consistent (pool->dev, pool->allocation, page->vaddr, dma);
208
        list_del (&page->page_list);
209
        kfree (page);
210
}
211
 
212
 
213
/**
214
 * pci_pool_destroy - destroys a pool of pci memory blocks.
215
 * @pool: pci pool that will be destroyed
216
 * Context: !in_interrupt()
217
 *
218
 * Caller guarantees that no more memory from the pool is in use,
219
 * and that nothing will try to use the pool after this call.
220
 */
221
void
222
pci_pool_destroy (struct pci_pool *pool)
223
{
224
        down (&pools_lock);
225
        list_del (&pool->pools);
226
        if (pool->dev && list_empty (&pool->dev->pools))
227
                device_remove_file (&pool->dev->dev, &dev_attr_pools);
228
        up (&pools_lock);
229
 
230
        while (!list_empty (&pool->page_list)) {
231
                struct pci_page         *page;
232
                page = list_entry (pool->page_list.next,
233
                                struct pci_page, page_list);
234
                if (is_page_busy (pool->blocks_per_page, page->bitmap)) {
235
                        printk (KERN_ERR "pci_pool_destroy %s/%s, %p busy\n",
236
                                pool->dev ? pci_name(pool->dev) : NULL,
237
                                pool->name, page->vaddr);
238
                        /* leak the still-in-use consistent memory */
239
                        list_del (&page->page_list);
240
                        kfree (page);
241
                } else
242
                        pool_free_page (pool, page);
243
        }
244
 
245
        kfree (pool);
246
}
247
 
248
 
249
/**
250
 * pci_pool_alloc - get a block of consistent memory
251
 * @pool: pci pool that will produce the block
252
 * @mem_flags: SLAB_KERNEL or SLAB_ATOMIC
253
 * @handle: pointer to dma address of block
254
 *
255
 * This returns the kernel virtual address of a currently unused block,
256
 * and reports its dma address through the handle.
257
 * If such a memory block can't be allocated, null is returned.
258
 */
259
void *
260
pci_pool_alloc (struct pci_pool *pool, int mem_flags, dma_addr_t *handle)
261
{
262
        unsigned long           flags;
263
        struct list_head        *entry;
264
        struct pci_page         *page;
265
        int                     map, block;
266
        size_t                  offset;
267
        void                    *retval;
268
 
269
restart:
270
        spin_lock_irqsave (&pool->lock, flags);
271
        list_for_each (entry, &pool->page_list) {
272
                int             i;
273
                page = list_entry (entry, struct pci_page, page_list);
274
                /* only cachable accesses here ... */
275
                for (map = 0, i = 0;
276
                                i < pool->blocks_per_page;
277
                                i += BITS_PER_LONG, map++) {
278
                        if (page->bitmap [map] == 0)
279
                                continue;
280
                        block = ffz (~ page->bitmap [map]);
281
                        if ((i + block) < pool->blocks_per_page) {
282
                                clear_bit (block, &page->bitmap [map]);
283
                                offset = (BITS_PER_LONG * map) + block;
284
                                offset *= pool->size;
285
                                goto ready;
286
                        }
287
                }
288
        }
289
        if (!(page = pool_alloc_page (pool, SLAB_ATOMIC))) {
290
                if (mem_flags == SLAB_KERNEL) {
291
                        DECLARE_WAITQUEUE (wait, current);
292
 
293
                        current->state = TASK_INTERRUPTIBLE;
294
                        add_wait_queue (&pool->waitq, &wait);
295
                        spin_unlock_irqrestore (&pool->lock, flags);
296
 
297
                        schedule_timeout (POOL_TIMEOUT_JIFFIES);
298
 
299
                        remove_wait_queue (&pool->waitq, &wait);
300
                        goto restart;
301
                }
302
                retval = 0;
303
                goto done;
304
        }
305
 
306
        clear_bit (0, &page->bitmap [0]);
307
        offset = 0;
308
ready:
309
        page->in_use++;
310
        retval = offset + page->vaddr;
311
        *handle = offset + page->dma;
312
#ifdef  CONFIG_DEBUG_SLAB
313
        memset (retval, POOL_POISON_ALLOCATED, pool->size);
314
#endif
315
done:
316
        spin_unlock_irqrestore (&pool->lock, flags);
317
        return retval;
318
}
319
 
320
 
321
static struct pci_page *
322
pool_find_page (struct pci_pool *pool, dma_addr_t dma)
323
{
324
        unsigned long           flags;
325
        struct list_head        *entry;
326
        struct pci_page         *page;
327
 
328
        spin_lock_irqsave (&pool->lock, flags);
329
        list_for_each (entry, &pool->page_list) {
330
                page = list_entry (entry, struct pci_page, page_list);
331
                if (dma < page->dma)
332
                        continue;
333
                if (dma < (page->dma + pool->allocation))
334
                        goto done;
335
        }
336
        page = 0;
337
done:
338
        spin_unlock_irqrestore (&pool->lock, flags);
339
        return page;
340
}
341
 
342
 
343
/**
344
 * pci_pool_free - put block back into pci pool
345
 * @pool: the pci pool holding the block
346
 * @vaddr: virtual address of block
347
 * @dma: dma address of block
348
 *
349
 * Caller promises neither device nor driver will again touch this block
350
 * unless it is first re-allocated.
351
 */
352
void
353
pci_pool_free (struct pci_pool *pool, void *vaddr, dma_addr_t dma)
354
{
355
        struct pci_page         *page;
356
        unsigned long           flags;
357
        int                     map, block;
358
 
359
        if ((page = pool_find_page (pool, dma)) == 0) {
360
                printk (KERN_ERR "pci_pool_free %s/%s, %p/%lx (bad dma)\n",
361
                        pool->dev ? pci_name(pool->dev) : NULL,
362
                        pool->name, vaddr, (unsigned long) dma);
363
                return;
364
        }
365
 
366
        block = dma - page->dma;
367
        block /= pool->size;
368
        map = block / BITS_PER_LONG;
369
        block %= BITS_PER_LONG;
370
 
371
#ifdef  CONFIG_DEBUG_SLAB
372
        if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
373
                printk (KERN_ERR "pci_pool_free %s/%s, %p (bad vaddr)/%Lx\n",
374
                        pool->dev ? pci_name(pool->dev) : NULL,
375
                        pool->name, vaddr, (unsigned long long) dma);
376
                return;
377
        }
378
        if (page->bitmap [map] & (1UL << block)) {
379
                printk (KERN_ERR "pci_pool_free %s/%s, dma %Lx already free\n",
380
                        pool->dev ? pci_name(pool->dev) : NULL,
381
                        pool->name, (unsigned long long)dma);
382
                return;
383
        }
384
        memset (vaddr, POOL_POISON_FREED, pool->size);
385
#endif
386
 
387
        spin_lock_irqsave (&pool->lock, flags);
388
        page->in_use--;
389
        set_bit (block, &page->bitmap [map]);
390
        if (waitqueue_active (&pool->waitq))
391
                wake_up (&pool->waitq);
392
        /*
393
         * Resist a temptation to do
394
         *    if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
395
         * it is not interrupt safe. Better have empty pages hang around.
396
         */
397
        spin_unlock_irqrestore (&pool->lock, flags);
398
}
399
 
400
 
401
EXPORT_SYMBOL (pci_pool_create);
402
EXPORT_SYMBOL (pci_pool_destroy);
403
EXPORT_SYMBOL (pci_pool_alloc);
404
EXPORT_SYMBOL (pci_pool_free);