Rev 422 | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
422 | giacomo | 1 | #ifndef _ASM_I386_DMA_MAPPING_H |
2 | #define _ASM_I386_DMA_MAPPING_H |
||
3 | |||
4 | #include <asm/cache.h> |
||
5 | |||
6 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
||
7 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
||
8 | |||
9 | void *dma_alloc_coherent(struct device *dev, size_t size, |
||
1047 | mauro | 10 | dma_addr_t *dma_handle, int flag); |
422 | giacomo | 11 | |
1047 | mauro | 12 | void *dma_alloc_coherent_usb(struct device *dev, size_t size, |
13 | dma_addr_t *dma_handle, int flag); |
||
14 | |||
422 | giacomo | 15 | void dma_free_coherent(struct device *dev, size_t size, |
16 | void *vaddr, dma_addr_t dma_handle); |
||
17 | |||
18 | static inline dma_addr_t |
||
19 | dma_map_single(struct device *dev, void *ptr, size_t size, |
||
20 | enum dma_data_direction direction) |
||
21 | { |
||
22 | BUG_ON(direction == DMA_NONE); |
||
23 | flush_write_buffers(); |
||
24 | return virt_to_phys(ptr); |
||
25 | } |
||
26 | |||
27 | static inline void |
||
28 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
||
29 | enum dma_data_direction direction) |
||
30 | { |
||
31 | BUG_ON(direction == DMA_NONE); |
||
32 | } |
||
33 | |||
34 | static inline int |
||
35 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
||
36 | enum dma_data_direction direction) |
||
37 | { |
||
38 | int i; |
||
39 | |||
40 | BUG_ON(direction == DMA_NONE); |
||
41 | |||
42 | for (i = 0; i < nents; i++ ) { |
||
43 | BUG_ON(!sg[i].page); |
||
44 | |||
45 | sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; |
||
46 | } |
||
47 | |||
48 | flush_write_buffers(); |
||
49 | return nents; |
||
50 | } |
||
51 | |||
52 | static inline dma_addr_t |
||
53 | dma_map_page(struct device *dev, struct page *page, unsigned long offset, |
||
54 | size_t size, enum dma_data_direction direction) |
||
55 | { |
||
56 | BUG_ON(direction == DMA_NONE); |
||
57 | return (dma_addr_t)(page_to_pfn(page)) * PAGE_SIZE + offset; |
||
58 | } |
||
59 | |||
60 | static inline void |
||
61 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, |
||
62 | enum dma_data_direction direction) |
||
63 | { |
||
64 | BUG_ON(direction == DMA_NONE); |
||
65 | } |
||
66 | |||
67 | |||
68 | static inline void |
||
69 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, |
||
70 | enum dma_data_direction direction) |
||
71 | { |
||
72 | BUG_ON(direction == DMA_NONE); |
||
73 | } |
||
74 | |||
75 | static inline void |
||
76 | dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size, |
||
77 | enum dma_data_direction direction) |
||
78 | { |
||
79 | flush_write_buffers(); |
||
80 | } |
||
81 | |||
82 | static inline void |
||
83 | dma_sync_single_range(struct device *dev, dma_addr_t dma_handle, |
||
84 | unsigned long offset, size_t size, |
||
85 | enum dma_data_direction direction) |
||
86 | { |
||
87 | flush_write_buffers(); |
||
88 | } |
||
89 | |||
90 | |||
91 | static inline void |
||
92 | dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems, |
||
93 | enum dma_data_direction direction) |
||
94 | { |
||
95 | flush_write_buffers(); |
||
96 | } |
||
97 | |||
98 | static inline int |
||
99 | dma_supported(struct device *dev, u64 mask) |
||
100 | { |
||
101 | /* |
||
102 | * we fall back to GFP_DMA when the mask isn't all 1s, |
||
103 | * so we can't guarantee allocations that must be |
||
104 | * within a tighter range than GFP_DMA.. |
||
105 | */ |
||
106 | if(mask < 0x00ffffff) |
||
107 | return 0; |
||
108 | |||
109 | return 1; |
||
110 | } |
||
111 | |||
112 | static inline int |
||
113 | dma_set_mask(struct device *dev, u64 mask) |
||
114 | { |
||
115 | if(!dev->dma_mask || !dma_supported(dev, mask)) |
||
116 | return -EIO; |
||
117 | |||
118 | *dev->dma_mask = mask; |
||
119 | |||
120 | return 0; |
||
121 | } |
||
122 | |||
123 | static inline int |
||
124 | dma_get_cache_alignment(void) |
||
125 | { |
||
126 | /* no easy way to get cache size on all x86, so return the |
||
127 | * maximum possible, to be safe */ |
||
128 | return (1 << L1_CACHE_SHIFT_MAX); |
||
129 | } |
||
130 | |||
131 | #define dma_is_consistent(d) (1) |
||
132 | |||
133 | static inline void |
||
134 | dma_cache_sync(void *vaddr, size_t size, |
||
135 | enum dma_data_direction direction) |
||
136 | { |
||
137 | flush_write_buffers(); |
||
138 | } |
||
139 | |||
140 | #endif |