Home | History | Annotate | Download | only in asm-arm
      1 #ifndef ASMARM_DMA_MAPPING_H
      2 #define ASMARM_DMA_MAPPING_H
      3 
      4 #ifdef __KERNEL__
      5 
      6 #include <linux/mm.h> /* need struct page */
      7 #include <linux/device.h>
      8 
      9 #include <asm/scatterlist.h>
     10 
     11 /*
     12  * DMA-consistent mapping functions.  These allocate/free a region of
     13  * uncached, unwrite-buffered mapped memory space for use with DMA
     14  * devices.  This is the "generic" version.  The PCI specific version
     15  * is in pci.h
     16  */
     17 extern void consistent_sync(void *kaddr, size_t size, int rw);
     18 
     19 /*
     20  * Return whether the given device DMA address mask can be supported
     21  * properly.  For example, if your device can only drive the low 24-bits
     22  * during bus mastering, then you would pass 0x00ffffff as the mask
     23  * to this function.
     24  *
     25  * FIXME: This should really be a platform specific issue - we should
     26  * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
     27  */
     28 static inline int dma_supported(struct device *dev, u64 mask)
     29 {
     30 	return dev->dma_mask && *dev->dma_mask != 0;
     31 }
     32 
     33 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
     34 {
     35 	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
     36 		return -EIO;
     37 
     38 	*dev->dma_mask = dma_mask;
     39 
     40 	return 0;
     41 }
     42 
     43 static inline int dma_get_cache_alignment(void)
     44 {
     45 	return 32;
     46 }
     47 
     48 static inline int dma_is_consistent(dma_addr_t handle)
     49 {
     50 	return !!arch_is_coherent();
     51 }
     52 
     53 /*
     54  * DMA errors are defined by all-bits-set in the DMA address.
     55  */
     56 static inline int dma_mapping_error(dma_addr_t dma_addr)
     57 {
     58 	return dma_addr == ~0;
     59 }
     60 
     61 /**
     62  * dma_alloc_coherent - allocate consistent memory for DMA
     63  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
     64  * @size: required memory size
     65  * @handle: bus-specific DMA address
     66  *
     67  * Allocate some uncached, unbuffered memory for a device for
     68  * performing DMA.  This function allocates pages, and will
     69  * return the CPU-viewed address, and sets @handle to be the
     70  * device-viewed address.
     71  */
     72 extern void *
     73 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
     74 
     75 /**
     76  * dma_free_coherent - free memory allocated by dma_alloc_coherent
     77  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
     78  * @size: size of memory originally requested in dma_alloc_coherent
     79  * @cpu_addr: CPU-view address returned from dma_alloc_coherent
     80  * @handle: device-view address returned from dma_alloc_coherent
     81  *
     82  * Free (and unmap) a DMA buffer previously allocated by
     83  * dma_alloc_coherent().
     84  *
     85  * References to memory and mappings associated with cpu_addr/handle
     86  * during and after this call executing are illegal.
     87  */
     88 extern void
     89 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
     90 		  dma_addr_t handle);
     91 
     92 /**
     93  * dma_mmap_coherent - map a coherent DMA allocation into user space
     94  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
     95  * @vma: vm_area_struct describing requested user mapping
     96  * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
     97  * @handle: device-view address returned from dma_alloc_coherent
     98  * @size: size of memory originally requested in dma_alloc_coherent
     99  *
    100  * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
    101  * into user space.  The coherent DMA buffer must not be freed by the
    102  * driver until the user space mapping has been released.
    103  */
    104 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
    105 		      void *cpu_addr, dma_addr_t handle, size_t size);
    106 
    107 
    108 /**
    109  * dma_alloc_writecombine - allocate writecombining memory for DMA
    110  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
    111  * @size: required memory size
    112  * @handle: bus-specific DMA address
    113  *
    114  * Allocate some uncached, buffered memory for a device for
    115  * performing DMA.  This function allocates pages, and will
    116  * return the CPU-viewed address, and sets @handle to be the
    117  * device-viewed address.
    118  */
    119 extern void *
    120 dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
    121 
    122 #define dma_free_writecombine(dev,size,cpu_addr,handle) \
    123 	dma_free_coherent(dev,size,cpu_addr,handle)
    124 
    125 int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
    126 			  void *cpu_addr, dma_addr_t handle, size_t size);
    127 
    128 
    129 /**
    130  * dma_map_single - map a single buffer for streaming DMA
    131  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
    132  * @cpu_addr: CPU direct mapped address of buffer
    133  * @size: size of buffer to map
    134  * @dir: DMA transfer direction
    135  *
    136  * Ensure that any data held in the cache is appropriately discarded
    137  * or written back.
    138  *
    139  * The device owns this memory once this call has completed.  The CPU
    140  * can regain ownership by calling dma_unmap_single() or
    141  * dma_sync_single_for_cpu().
    142  */
    143 #ifndef CONFIG_DMABOUNCE
    144 static inline dma_addr_t
    145 dma_map_single(struct device *dev, void *cpu_addr, size_t size,
    146 	       enum dma_data_direction dir)
    147 {
    148 	if (!arch_is_coherent())
    149 		consistent_sync(cpu_addr, size, dir);
    150 
    151 	return virt_to_dma(dev, (unsigned long)cpu_addr);
    152 }
    153 #else
    154 extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
    155 #endif
    156 
    157 /**
    158  * dma_map_page - map a portion of a page for streaming DMA
    159  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
    160  * @page: page that buffer resides in
    161  * @offset: offset into page for start of buffer
    162  * @size: size of buffer to map
    163  * @dir: DMA transfer direction
    164  *
    165  * Ensure that any data held in the cache is appropriately discarded
    166  * or written back.
    167  *
    168  * The device owns this memory once this call has completed.  The CPU
    169  * can regain ownership by calling dma_unmap_page() or
    170  * dma_sync_single_for_cpu().
    171  */
    172 static inline dma_addr_t
    173 dma_map_page(struct device *dev, struct page *page,
    174 	     unsigned long offset, size_t size,
    175 	     enum dma_data_direction dir)
    176 {
    177 	return dma_map_single(dev, page_address(page) + offset, size, (int)dir);
    178 }
    179 
    180 /**
    181  * dma_unmap_single - unmap a single buffer previously mapped
    182  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
    183  * @handle: DMA address of buffer
    184  * @size: size of buffer to map
    185  * @dir: DMA transfer direction
    186  *
    187  * Unmap a single streaming mode DMA translation.  The handle and size
    188  * must match what was provided in the previous dma_map_single() call.
    189  * All other usages are undefined.
    190  *
    191  * After this call, reads by the CPU to the buffer are guaranteed to see
    192  * whatever the device wrote there.
    193  */
    194 #ifndef CONFIG_DMABOUNCE
    195 static inline void
    196 dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
    197 		 enum dma_data_direction dir)
    198 {
    199 	/* nothing to do */
    200 }
    201 #else
    202 extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction);
    203 #endif
    204 
    205 /**
    206  * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
    207  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
    208  * @handle: DMA address of buffer
    209  * @size: size of buffer to map
    210  * @dir: DMA transfer direction
    211  *
    212  * Unmap a single streaming mode DMA translation.  The handle and size
    213  * must match what was provided in the previous dma_map_single() call.
    214  * All other usages are undefined.
    215  *
    216  * After this call, reads by the CPU to the buffer are guaranteed to see
    217  * whatever the device wrote there.
    218  */
    219 static inline void
    220 dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
    221 	       enum dma_data_direction dir)
    222 {
    223 	dma_unmap_single(dev, handle, size, (int)dir);
    224 }
    225 
    226 /**
    227  * dma_map_sg - map a set of SG buffers for streaming mode DMA
    228  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
    229  * @sg: list of buffers
    230  * @nents: number of buffers to map
    231  * @dir: DMA transfer direction
    232  *
    233  * Map a set of buffers described by scatterlist in streaming
    234  * mode for DMA.  This is the scatter-gather version of the
    235  * above dma_map_single interface.  Here the scatter gather list
    236  * elements are each tagged with the appropriate dma address
    237  * and length.  They are obtained via sg_dma_{address,length}(SG).
    238  *
    239  * NOTE: An implementation may be able to use a smaller number of
    240  *       DMA address/length pairs than there are SG table elements.
    241  *       (for example via virtual mapping capabilities)
    242  *       The routine returns the number of addr/length pairs actually
    243  *       used, at most nents.
    244  *
    245  * Device ownership issues as mentioned above for dma_map_single are
    246  * the same here.
    247  */
    248 #ifndef CONFIG_DMABOUNCE
    249 static inline int
    250 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
    251 	   enum dma_data_direction dir)
    252 {
    253 	int i;
    254 
    255 	for (i = 0; i < nents; i++, sg++) {
    256 		char *virt;
    257 
    258 		sg->dma_address = page_to_dma(dev, sg->page) + sg->offset;
    259 		virt = page_address(sg->page) + sg->offset;
    260 
    261 		if (!arch_is_coherent())
    262 			consistent_sync(virt, sg->length, dir);
    263 	}
    264 
    265 	return nents;
    266 }
    267 #else
    268 extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
    269 #endif
    270 
    271 /**
    272  * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
    273  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
    274  * @sg: list of buffers
    275  * @nents: number of buffers to map
    276  * @dir: DMA transfer direction
    277  *
    278  * Unmap a set of streaming mode DMA translations.
    279  * Again, CPU read rules concerning calls here are the same as for
    280  * dma_unmap_single() above.
    281  */
    282 #ifndef CONFIG_DMABOUNCE
    283 static inline void
    284 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
    285 	     enum dma_data_direction dir)
    286 {
    287 
    288 	/* nothing to do */
    289 }
    290 #else
    291 extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
    292 #endif
    293 
    294 
    295 /**
    296  * dma_sync_single_for_cpu
    297  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
    298  * @handle: DMA address of buffer
    299  * @size: size of buffer to map
    300  * @dir: DMA transfer direction
    301  *
    302  * Make physical memory consistent for a single streaming mode DMA
    303  * translation after a transfer.
    304  *
    305  * If you perform a dma_map_single() but wish to interrogate the
    306  * buffer using the cpu, yet do not wish to teardown the PCI dma
    307  * mapping, you must call this function before doing so.  At the
    308  * next point you give the PCI dma address back to the card, you
    309  * must first the perform a dma_sync_for_device, and then the
    310  * device again owns the buffer.
    311  */
    312 #ifndef CONFIG_DMABOUNCE
    313 static inline void
    314 dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
    315 			enum dma_data_direction dir)
    316 {
    317 	if (!arch_is_coherent())
    318 		consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
    319 }
    320 
    321 static inline void
    322 dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
    323 			   enum dma_data_direction dir)
    324 {
    325 	if (!arch_is_coherent())
    326 		consistent_sync((void *)dma_to_virt(dev, handle), size, dir);
    327 }
    328 #else
    329 extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
    330 extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction);
    331 #endif
    332 
    333 
    334 /**
    335  * dma_sync_sg_for_cpu
    336  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
    337  * @sg: list of buffers
    338  * @nents: number of buffers to map
    339  * @dir: DMA transfer direction
    340  *
    341  * Make physical memory consistent for a set of streaming
    342  * mode DMA translations after a transfer.
    343  *
    344  * The same as dma_sync_single_for_* but for a scatter-gather list,
    345  * same rules and usage.
    346  */
    347 #ifndef CONFIG_DMABOUNCE
    348 static inline void
    349 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
    350 		    enum dma_data_direction dir)
    351 {
    352 	int i;
    353 
    354 	for (i = 0; i < nents; i++, sg++) {
    355 		char *virt = page_address(sg->page) + sg->offset;
    356 		if (!arch_is_coherent())
    357 			consistent_sync(virt, sg->length, dir);
    358 	}
    359 }
    360 
    361 static inline void
    362 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
    363 		       enum dma_data_direction dir)
    364 {
    365 	int i;
    366 
    367 	for (i = 0; i < nents; i++, sg++) {
    368 		char *virt = page_address(sg->page) + sg->offset;
    369 		if (!arch_is_coherent())
    370 			consistent_sync(virt, sg->length, dir);
    371 	}
    372 }
    373 #else
    374 extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction);
    375 extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction);
    376 #endif
    377 
    378 #ifdef CONFIG_DMABOUNCE
    379 /*
    380  * For SA-1111, IXP425, and ADI systems  the dma-mapping functions are "magic"
    381  * and utilize bounce buffers as needed to work around limited DMA windows.
    382  *
    383  * On the SA-1111, a bug limits DMA to only certain regions of RAM.
    384  * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
    385  * On some ADI engineering sytems, PCI inbound window is 32MB (12MB total RAM)
    386  *
    387  * The following are helper functions used by the dmabounce subystem
    388  *
    389  */
    390 
    391 /**
    392  * dmabounce_register_dev
    393  *
    394  * @dev: valid struct device pointer
    395  * @small_buf_size: size of buffers to use with small buffer pool
    396  * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
    397  *
    398  * This function should be called by low-level platform code to register
    399  * a device as requireing DMA buffer bouncing. The function will allocate
    400  * appropriate DMA pools for the device.
    401  *
    402  */
    403 extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long);
    404 
    405 /**
    406  * dmabounce_unregister_dev
    407  *
    408  * @dev: valid struct device pointer
    409  *
    410  * This function should be called by low-level platform code when device
    411  * that was previously registered with dmabounce_register_dev is removed
    412  * from the system.
    413  *
    414  */
    415 extern void dmabounce_unregister_dev(struct device *);
    416 
    417 /**
    418  * dma_needs_bounce
    419  *
    420  * @dev: valid struct device pointer
    421  * @dma_handle: dma_handle of unbounced buffer
    422  * @size: size of region being mapped
    423  *
    424  * Platforms that utilize the dmabounce mechanism must implement
    425  * this function.
    426  *
    427  * The dmabounce routines call this function whenever a dma-mapping
    428  * is requested to determine whether a given buffer needs to be bounced
    429  * or not. The function must return 0 if the the buffer is OK for
    430  * DMA access and 1 if the buffer needs to be bounced.
    431  *
    432  */
    433 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
    434 #endif /* CONFIG_DMABOUNCE */
    435 
    436 #endif /* __KERNEL__ */
    437 #endif
    438