Subversion Repositories shark

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
422 giacomo 1
#ifndef _ASM_I386_DMA_MAPPING_H
2
#define _ASM_I386_DMA_MAPPING_H
3
 
4
#include <asm/cache.h>
5
 
6
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
7
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
8
 
9
void *dma_alloc_coherent(struct device *dev, size_t size,
10
                           dma_addr_t *dma_handle, int flag);
11
 
12
void dma_free_coherent(struct device *dev, size_t size,
13
                         void *vaddr, dma_addr_t dma_handle);
14
 
15
static inline dma_addr_t
16
dma_map_single(struct device *dev, void *ptr, size_t size,
17
               enum dma_data_direction direction)
18
{
19
        BUG_ON(direction == DMA_NONE);
20
        flush_write_buffers();
21
        return virt_to_phys(ptr);
22
}
23
 
24
static inline void
25
dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
26
                 enum dma_data_direction direction)
27
{
28
        BUG_ON(direction == DMA_NONE);
29
}
30
 
31
static inline int
32
dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
33
           enum dma_data_direction direction)
34
{
35
        int i;
36
 
37
        BUG_ON(direction == DMA_NONE);
38
 
39
        for (i = 0; i < nents; i++ ) {
40
                BUG_ON(!sg[i].page);
41
 
42
                sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset;
43
        }
44
 
45
        flush_write_buffers();
46
        return nents;
47
}
48
 
49
static inline dma_addr_t
50
dma_map_page(struct device *dev, struct page *page, unsigned long offset,
51
             size_t size, enum dma_data_direction direction)
52
{
53
        BUG_ON(direction == DMA_NONE);
54
        return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset;
55
}
56
 
57
static inline void
58
dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
59
               enum dma_data_direction direction)
60
{
61
        BUG_ON(direction == DMA_NONE);
62
}
63
 
64
 
65
static inline void
66
dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
67
             enum dma_data_direction direction)
68
{
69
        BUG_ON(direction == DMA_NONE);
70
}
71
 
72
static inline void
73
dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size,
74
                enum dma_data_direction direction)
75
{
76
        flush_write_buffers();
77
}
78
 
79
static inline void
80
dma_sync_single_range(struct device *dev, dma_addr_t dma_handle,
81
                      unsigned long offset, size_t size,
82
                      enum dma_data_direction direction)
83
{
84
        flush_write_buffers();
85
}
86
 
87
 
88
static inline void
89
dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems,
90
                 enum dma_data_direction direction)
91
{
92
        flush_write_buffers();
93
}
94
 
95
static inline int
96
dma_supported(struct device *dev, u64 mask)
97
{
98
        /*
99
         * we fall back to GFP_DMA when the mask isn't all 1s,
100
         * so we can't guarantee allocations that must be
101
         * within a tighter range than GFP_DMA..
102
         */
103
        if(mask < 0x00ffffff)
104
                return 0;
105
 
106
        return 1;
107
}
108
 
109
static inline int
110
dma_set_mask(struct device *dev, u64 mask)
111
{
112
        if(!dev->dma_mask || !dma_supported(dev, mask))
113
                return -EIO;
114
 
115
        *dev->dma_mask = mask;
116
 
117
        return 0;
118
}
119
 
120
static inline int
121
dma_get_cache_alignment(void)
122
{
123
        /* no easy way to get cache size on all x86, so return the
124
         * maximum possible, to be safe */
125
        return (1 << L1_CACHE_SHIFT_MAX);
126
}
127
 
128
#define dma_is_consistent(d)    (1)
129
 
130
static inline void
131
dma_cache_sync(void *vaddr, size_t size,
132
               enum dma_data_direction direction)
133
{
134
        flush_write_buffers();
135
}
136
 
137
#endif