Home | History | Annotate | Download | only in asm-arm

Lines Matching refs:device

7 #include <linux/device.h>
20 * Return whether the given device DMA address mask can be supported
21 * properly. For example, if your device can only drive the low 24-bits
28 static inline int dma_supported(struct device *dev, u64 mask)
33 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
63 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
67 * Allocate some uncached, unbuffered memory for a device for
70 * device-viewed address.
73 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
77 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
80 * @handle: device-view address returned from dma_alloc_coherent
89 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
94 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
97 * @handle: device-view address returned from dma_alloc_coherent
104 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
110 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
114 * Allocate some uncached, buffered memory for a device for
117 * device-viewed address.
120 dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp);
125 int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
131 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
139 * The device owns this memory once this call has completed. The CPU
145 dma_map_single(struct device *dev, void *cpu_addr, size_t size,
154 extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction);
159 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
168 * The device owns this memory once this call has completed. The CPU
173 dma_map_page(struct device *dev, struct page *page,
182 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
192 * whatever the device wrote there.
196 dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size,
202 extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction);
207 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
217 * whatever the device wrote there.
220 dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
228 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
245 * Device ownership issues as mentioned above for dma_map_single are
250 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
268 extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
273 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
284 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
291 extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction);
297 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
310 * device again owns the buffer.
314 dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
322 dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
329 extern void dma_sync_single_for_cpu(struct device*, dma_addr_t, size_t, enum dma_data_direction);
330 extern void dma_sync_single_for_device(struct device*, dma_addr_t, size_t, enum dma_data_direction);
336 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
349 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
362 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
374 extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction);
375 extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction);
394 * @dev: valid struct device pointer
399 * a device as requireing DMA buffer bouncing. The function will allocate
400 * appropriate DMA pools for the device.
403 extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long);
408 * @dev: valid struct device pointer
410 * This function should be called by low-level platform code when device
415 extern void dmabounce_unregister_dev(struct device *);
420 * @dev: valid struct device pointer
433 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);