]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - include/asm-x86/dma-mapping.h
x86: move dma_*_coherent functions to include file
[linux-2.6-omap-h63xx.git] / include / asm-x86 / dma-mapping.h
1 #ifndef _ASM_DMA_MAPPING_H_
2 #define _ASM_DMA_MAPPING_H_
3
4 /*
5  * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6  * documentation.
7  */
8
9 #include <linux/scatterlist.h>
10 #include <asm/io.h>
11 #include <asm/swiotlb.h>
12 #include <asm-generic/dma-coherent.h>
13
14 extern dma_addr_t bad_dma_address;
15 extern int iommu_merge;
16 extern struct device x86_dma_fallback_dev;
17 extern int panic_on_overflow;
18 extern int force_iommu;
19
20 struct dma_mapping_ops {
21         int             (*mapping_error)(struct device *dev,
22                                          dma_addr_t dma_addr);
23         void*           (*alloc_coherent)(struct device *dev, size_t size,
24                                 dma_addr_t *dma_handle, gfp_t gfp);
25         void            (*free_coherent)(struct device *dev, size_t size,
26                                 void *vaddr, dma_addr_t dma_handle);
27         dma_addr_t      (*map_single)(struct device *hwdev, phys_addr_t ptr,
28                                 size_t size, int direction);
29         /* like map_single, but doesn't check the device mask */
30         dma_addr_t      (*map_simple)(struct device *hwdev, phys_addr_t ptr,
31                                 size_t size, int direction);
32         void            (*unmap_single)(struct device *dev, dma_addr_t addr,
33                                 size_t size, int direction);
34         void            (*sync_single_for_cpu)(struct device *hwdev,
35                                 dma_addr_t dma_handle, size_t size,
36                                 int direction);
37         void            (*sync_single_for_device)(struct device *hwdev,
38                                 dma_addr_t dma_handle, size_t size,
39                                 int direction);
40         void            (*sync_single_range_for_cpu)(struct device *hwdev,
41                                 dma_addr_t dma_handle, unsigned long offset,
42                                 size_t size, int direction);
43         void            (*sync_single_range_for_device)(struct device *hwdev,
44                                 dma_addr_t dma_handle, unsigned long offset,
45                                 size_t size, int direction);
46         void            (*sync_sg_for_cpu)(struct device *hwdev,
47                                 struct scatterlist *sg, int nelems,
48                                 int direction);
49         void            (*sync_sg_for_device)(struct device *hwdev,
50                                 struct scatterlist *sg, int nelems,
51                                 int direction);
52         int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
53                                 int nents, int direction);
54         void            (*unmap_sg)(struct device *hwdev,
55                                 struct scatterlist *sg, int nents,
56                                 int direction);
57         int             (*dma_supported)(struct device *hwdev, u64 mask);
58         int             is_phys;
59 };
60
61 extern struct dma_mapping_ops *dma_ops;
62
63 static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
64 {
65 #ifdef CONFIG_X86_32
66         return dma_ops;
67 #else
68         if (unlikely(!dev) || !dev->archdata.dma_ops)
69                 return dma_ops;
70         else
71                 return dev->archdata.dma_ops;
72 #endif
73 }
74
75 /* Make sure we keep the same behaviour */
76 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
77 {
78 #ifdef CONFIG_X86_32
79         return 0;
80 #else
81         struct dma_mapping_ops *ops = get_dma_ops(dev);
82         if (ops->mapping_error)
83                 return ops->mapping_error(dev, dma_addr);
84
85         return (dma_addr == bad_dma_address);
86 #endif
87 }
88
89 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
90 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
91 #define dma_is_consistent(d, h) (1)
92
93 extern int dma_supported(struct device *hwdev, u64 mask);
94 extern int dma_set_mask(struct device *dev, u64 mask);
95
96 static inline dma_addr_t
97 dma_map_single(struct device *hwdev, void *ptr, size_t size,
98                int direction)
99 {
100         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
101
102         BUG_ON(!valid_dma_direction(direction));
103         return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
104 }
105
106 static inline void
107 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
108                  int direction)
109 {
110         struct dma_mapping_ops *ops = get_dma_ops(dev);
111
112         BUG_ON(!valid_dma_direction(direction));
113         if (ops->unmap_single)
114                 ops->unmap_single(dev, addr, size, direction);
115 }
116
117 static inline int
118 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
119            int nents, int direction)
120 {
121         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
122
123         BUG_ON(!valid_dma_direction(direction));
124         return ops->map_sg(hwdev, sg, nents, direction);
125 }
126
127 static inline void
128 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
129              int direction)
130 {
131         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
132
133         BUG_ON(!valid_dma_direction(direction));
134         if (ops->unmap_sg)
135                 ops->unmap_sg(hwdev, sg, nents, direction);
136 }
137
138 static inline void
139 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
140                         size_t size, int direction)
141 {
142         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
143
144         BUG_ON(!valid_dma_direction(direction));
145         if (ops->sync_single_for_cpu)
146                 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
147         flush_write_buffers();
148 }
149
150 static inline void
151 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
152                            size_t size, int direction)
153 {
154         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
155
156         BUG_ON(!valid_dma_direction(direction));
157         if (ops->sync_single_for_device)
158                 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
159         flush_write_buffers();
160 }
161
162 static inline void
163 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
164                               unsigned long offset, size_t size, int direction)
165 {
166         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
167
168         BUG_ON(!valid_dma_direction(direction));
169         if (ops->sync_single_range_for_cpu)
170                 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
171                                                size, direction);
172         flush_write_buffers();
173 }
174
175 static inline void
176 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
177                                  unsigned long offset, size_t size,
178                                  int direction)
179 {
180         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
181
182         BUG_ON(!valid_dma_direction(direction));
183         if (ops->sync_single_range_for_device)
184                 ops->sync_single_range_for_device(hwdev, dma_handle,
185                                                   offset, size, direction);
186         flush_write_buffers();
187 }
188
189 static inline void
190 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
191                     int nelems, int direction)
192 {
193         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
194
195         BUG_ON(!valid_dma_direction(direction));
196         if (ops->sync_sg_for_cpu)
197                 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
198         flush_write_buffers();
199 }
200
201 static inline void
202 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
203                        int nelems, int direction)
204 {
205         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
206
207         BUG_ON(!valid_dma_direction(direction));
208         if (ops->sync_sg_for_device)
209                 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
210
211         flush_write_buffers();
212 }
213
214 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
215                                       size_t offset, size_t size,
216                                       int direction)
217 {
218         struct dma_mapping_ops *ops = get_dma_ops(dev);
219
220         BUG_ON(!valid_dma_direction(direction));
221         return ops->map_single(dev, page_to_phys(page) + offset,
222                                size, direction);
223 }
224
225 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
226                                   size_t size, int direction)
227 {
228         dma_unmap_single(dev, addr, size, direction);
229 }
230
231 static inline void
232 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
233         enum dma_data_direction dir)
234 {
235         flush_write_buffers();
236 }
237
238 static inline int dma_get_cache_alignment(void)
239 {
240         /* no easy way to get cache size on all x86, so return the
241          * maximum possible, to be safe */
242         return boot_cpu_data.x86_clflush_size;
243 }
244
245 static inline void *
246 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
247                 gfp_t gfp)
248 {
249         struct dma_mapping_ops *ops = get_dma_ops(dev);
250         void *memory;
251
252         if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
253                 return memory;
254
255         if (!dev) {
256                 dev = &x86_dma_fallback_dev;
257                 gfp |= GFP_DMA;
258         }
259
260         if (ops->alloc_coherent)
261                 return ops->alloc_coherent(dev, size,
262                                 dma_handle, gfp);
263         return NULL;
264 }
265
266 static inline void dma_free_coherent(struct device *dev, size_t size,
267                                      void *vaddr, dma_addr_t bus)
268 {
269         struct dma_mapping_ops *ops = get_dma_ops(dev);
270
271         WARN_ON(irqs_disabled());       /* for portability */
272
273         if (dma_release_from_coherent(dev, get_order(size), vaddr))
274                 return;
275
276         if (ops->free_coherent)
277                 ops->free_coherent(dev, size, vaddr, bus);
278 }
279
280 #endif