Details | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
422 | giacomo | 1 | /* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com |
2 | * |
||
3 | * Implements the generic device dma API via the existing pci_ one |
||
4 | * for unconverted architectures |
||
5 | */ |
||
6 | |||
7 | #ifndef _ASM_GENERIC_DMA_MAPPING_H |
||
8 | #define _ASM_GENERIC_DMA_MAPPING_H |
||
9 | |||
10 | /* we implement the API below in terms of the existing PCI one, |
||
11 | * so include it */ |
||
12 | #include <linux/pci.h> |
||
13 | /* need struct page definitions */ |
||
14 | #include <linux/mm.h> |
||
15 | |||
16 | static inline int |
||
17 | dma_supported(struct device *dev, u64 mask) |
||
18 | { |
||
19 | BUG_ON(dev->bus != &pci_bus_type); |
||
20 | |||
21 | return pci_dma_supported(to_pci_dev(dev), mask); |
||
22 | } |
||
23 | |||
24 | static inline int |
||
25 | dma_set_mask(struct device *dev, u64 dma_mask) |
||
26 | { |
||
27 | BUG_ON(dev->bus != &pci_bus_type); |
||
28 | |||
29 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); |
||
30 | } |
||
31 | |||
32 | static inline void * |
||
33 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
||
34 | int flag) |
||
35 | { |
||
36 | BUG_ON(dev->bus != &pci_bus_type); |
||
37 | |||
38 | return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle); |
||
39 | } |
||
40 | |||
41 | static inline void |
||
42 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
||
43 | dma_addr_t dma_handle) |
||
44 | { |
||
45 | BUG_ON(dev->bus != &pci_bus_type); |
||
46 | |||
47 | pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle); |
||
48 | } |
||
49 | |||
50 | static inline dma_addr_t |
||
51 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, |
||
52 | enum dma_data_direction direction) |
||
53 | { |
||
54 | BUG_ON(dev->bus != &pci_bus_type); |
||
55 | |||
56 | return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction); |
||
57 | } |
||
58 | |||
59 | static inline void |
||
60 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
||
61 | enum dma_data_direction direction) |
||
62 | { |
||
63 | BUG_ON(dev->bus != &pci_bus_type); |
||
64 | |||
65 | pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction); |
||
66 | } |
||
67 | |||
68 | static inline dma_addr_t |
||
69 | dma_map_page(struct device *dev, struct page *page, |
||
70 | unsigned long offset, size_t size, |
||
71 | enum dma_data_direction direction) |
||
72 | { |
||
73 | BUG_ON(dev->bus != &pci_bus_type); |
||
74 | |||
75 | return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction); |
||
76 | } |
||
77 | |||
78 | static inline void |
||
79 | dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, |
||
80 | enum dma_data_direction direction) |
||
81 | { |
||
82 | BUG_ON(dev->bus != &pci_bus_type); |
||
83 | |||
84 | pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction); |
||
85 | } |
||
86 | |||
87 | static inline int |
||
88 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
||
89 | enum dma_data_direction direction) |
||
90 | { |
||
91 | BUG_ON(dev->bus != &pci_bus_type); |
||
92 | |||
93 | return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction); |
||
94 | } |
||
95 | |||
96 | static inline void |
||
97 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, |
||
98 | enum dma_data_direction direction) |
||
99 | { |
||
100 | BUG_ON(dev->bus != &pci_bus_type); |
||
101 | |||
102 | pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction); |
||
103 | } |
||
104 | |||
105 | static inline void |
||
106 | dma_sync_single(struct device *dev, dma_addr_t dma_handle, size_t size, |
||
107 | enum dma_data_direction direction) |
||
108 | { |
||
109 | BUG_ON(dev->bus != &pci_bus_type); |
||
110 | |||
111 | pci_dma_sync_single(to_pci_dev(dev), dma_handle, size, (int)direction); |
||
112 | } |
||
113 | |||
114 | static inline void |
||
115 | dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems, |
||
116 | enum dma_data_direction direction) |
||
117 | { |
||
118 | BUG_ON(dev->bus != &pci_bus_type); |
||
119 | |||
120 | pci_dma_sync_sg(to_pci_dev(dev), sg, nelems, (int)direction); |
||
121 | } |
||
122 | |||
123 | /* Now for the API extensions over the pci_ one */ |
||
124 | |||
125 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
||
126 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
||
127 | #define dma_is_consistent(d) (1) |
||
128 | |||
129 | static inline int |
||
130 | dma_get_cache_alignment(void) |
||
131 | { |
||
132 | /* no easy way to get cache size on all processors, so return |
||
133 | * the maximum possible, to be safe */ |
||
134 | return (1 << L1_CACHE_SHIFT_MAX); |
||
135 | } |
||
136 | |||
137 | static inline void |
||
138 | dma_sync_single_range(struct device *dev, dma_addr_t dma_handle, |
||
139 | unsigned long offset, size_t size, |
||
140 | enum dma_data_direction direction) |
||
141 | { |
||
142 | /* just sync everything, that's all the pci API can do */ |
||
143 | dma_sync_single(dev, dma_handle, offset+size, direction); |
||
144 | } |
||
145 | |||
146 | static inline void |
||
147 | dma_cache_sync(void *vaddr, size_t size, |
||
148 | enum dma_data_direction direction) |
||
149 | { |
||
150 | /* could define this in terms of the dma_cache ... operations, |
||
151 | * but if you get this on a platform, you should convert the platform |
||
152 | * to using the generic device DMA API */ |
||
153 | BUG(); |
||
154 | } |
||
155 | |||
156 | #endif |
||
157 |