blob: b1bc954eccf37438213d6744fe69ff1bc4d71365 [file] [log] [blame]
#ifndef _ASM_GENERIC_DMA_MAPPING_H
#define _ASM_GENERIC_DMA_MAPPING_H
#include <linux/kmemcheck.h>
#include <linux/bug.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <linux/dma-attrs.h>
#include <asm-generic/dma-coherent.h>
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
kmemcheck_mark_initialized(ptr, size);
BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
dir, attrs);
debug_dma_map_page(dev, virt_to_page(ptr),
(unsigned long)ptr & ~PAGE_MASK, size,
dir, addr, true);
return addr;
}
static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
size_t size,
enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
ops->unmap_page(dev, addr, size, dir, attrs);
debug_dma_unmap_page(dev, addr, size, dir, true);
}
/*
* dma_maps_sg_attrs returns 0 on error and > 0 on success.
* It should never return a value < 0.
*/
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
int i, ents;
struct scatterlist *s;
for_each_sg(sg, s, nents, i)
kmemcheck_mark_initialized(sg_virt(s), s->length);
BUG_ON(!valid_dma_direction(dir));
ents = ops->map_sg(dev, sg, nents, dir, attrs);
BUG_ON(ents < 0);
debug_dma_map_sg(dev, sg, nents, ents, dir);
return ents;
}
static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
debug_dma_unmap_sg(dev, sg, nents, dir);
if (ops->unmap_sg)
ops->unmap_sg(dev, sg, nents, dir, attrs);
}
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
kmemcheck_mark_initialized(page_address(page) + offset, size);
BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, page, offset, size, dir, NULL);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return addr;
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
ops->unmap_page(dev, addr, size, dir, NULL);
debug_dma_unmap_page(dev, addr, size, dir, false);
}
static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
size_t size,
enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(dev, addr, size, dir);
debug_dma_sync_single_for_cpu(dev, addr, size, dir);
}
static inline void dma_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_device)
ops->sync_single_for_device(dev, addr, size, dir);
debug_dma_sync_single_for_device(dev, addr, size, dir);
}
static inline void dma_sync_single_range_for_cpu(struct device *dev,
dma_addr_t addr,
unsigned long offset,
size_t size,
enum dma_data_direction dir)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(dev, addr + offset, size, dir);
debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
dma_addr_t addr,
unsigned long offset,
size_t size,
enum dma_data_direction dir)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_device)
ops->sync_single_for_device(dev, addr + offset, size, dir);
debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
}
static inline void
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_cpu)
ops->sync_sg_for_cpu(dev, sg, nelems, dir);
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
}
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_device)
ops->sync_sg_for_device(dev, sg, nelems, dir);
debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
}
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
void *dma_common_contiguous_remap(struct page *page, size_t size,
unsigned long vm_flags,
pgprot_t prot, const void *caller);
void *dma_common_pages_remap(struct page **pages, size_t size,
unsigned long vm_flags, pgprot_t prot,
const void *caller);
void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
/**
* dma_mmap_attrs - map a coherent DMA allocation into user space
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
* @vma: vm_area_struct describing requested user mapping
* @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
* @handle: device-view address returned from dma_alloc_attrs
* @size: size of memory originally requested in dma_alloc_attrs
* @attrs: attributes of mapping properties requested in dma_alloc_attrs
*
* Map a coherent DMA buffer previously allocated by dma_alloc_attrs
* into user space. The coherent DMA buffer must not be freed by the
* driver until the user space mapping has been released.
*/
static inline int
dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
if (ops->mmap)
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
static inline int
dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
if (ops->get_sgtable)
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
attrs);
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
}
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
#ifndef arch_dma_alloc_attrs
#define arch_dma_alloc_attrs(dev, flag) (true)
#endif
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr;
BUG_ON(!ops);
if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
return cpu_addr;
if (!arch_dma_alloc_attrs(&dev, &flag))
return NULL;
if (!ops->alloc)
return NULL;
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
return cpu_addr;
}
static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
WARN_ON(irqs_disabled());
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
return;
if (!ops->free)
return;
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
ops->free(dev, size, cpu_addr, dma_handle, attrs);
}
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
}
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
}
static inline void dma_free_noncoherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
debug_dma_mapping_error(dev, dma_addr);
if (get_dma_ops(dev)->mapping_error)
return get_dma_ops(dev)->mapping_error(dev, dma_addr);
#ifdef DMA_ERROR_CODE
return dma_addr == DMA_ERROR_CODE;
#else
return 0;
#endif
}
#ifndef HAVE_ARCH_DMA_SUPPORTED
static inline int dma_supported(struct device *dev, u64 mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
if (!ops)
return 0;
if (!ops->dma_supported)
return 1;
return ops->dma_supported(dev, mask);
}
#endif
#ifndef HAVE_ARCH_DMA_SET_MASK
static inline int dma_set_mask(struct device *dev, u64 mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
if (ops->set_dma_mask)
return ops->set_dma_mask(dev, mask);
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
*dev->dma_mask = mask;
return 0;
}
#endif
#endif