blob: 7e205e3313acb4b569ec62afe8aa1bb4d2fbff38 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
Andy Shevchenkof2b047f2017-07-06 14:41:51 +03002/*
3 * (C) Copyright 2007
4 * Stelian Pop <stelian@popies.net>
5 * Lead Tech Design <www.leadtechdesign.com>
Andy Shevchenkof2b047f2017-07-06 14:41:51 +03006 */
7#ifndef __ASM_X86_DMA_MAPPING_H
8#define __ASM_X86_DMA_MAPPING_H
9
Vignesh Raghavendra4a81a212020-01-16 14:23:45 +053010#include <common.h>
11#include <asm/cache.h>
12#include <cpu_func.h>
Masahiro Yamadaef205ea2017-08-26 00:50:17 +090013#include <linux/dma-direction.h>
Masahiro Yamada7167d672020-02-14 16:40:17 +090014#include <linux/types.h>
Vignesh Raghavendra4a81a212020-01-16 14:23:45 +053015#include <malloc.h>
Andy Shevchenkof2b047f2017-07-06 14:41:51 +030016
Masahiro Yamadaef205ea2017-08-26 00:50:17 +090017#define dma_mapping_error(x, y) 0
Andy Shevchenkof2b047f2017-07-06 14:41:51 +030018
19static inline void *dma_alloc_coherent(size_t len, unsigned long *handle)
20{
21 *handle = (unsigned long)memalign(ARCH_DMA_MINALIGN, len);
22 return (void *)*handle;
23}
24
25static inline void dma_free_coherent(void *addr)
26{
27 free(addr);
28}
29
Masahiro Yamada7167d672020-02-14 16:40:17 +090030static inline dma_addr_t dma_map_single(void *vaddr, size_t len,
31 enum dma_data_direction dir)
Andy Shevchenkof2b047f2017-07-06 14:41:51 +030032{
Vignesh Raghavendra4a81a212020-01-16 14:23:45 +053033 unsigned long addr = (unsigned long)vaddr;
34
35 len = ALIGN(len, ARCH_DMA_MINALIGN);
36
37 if (dir == DMA_FROM_DEVICE)
38 invalidate_dcache_range(addr, addr + len);
39 else
40 flush_dcache_range(addr, addr + len);
41
42 return addr;
Andy Shevchenkof2b047f2017-07-06 14:41:51 +030043}
44
45static inline void dma_unmap_single(volatile void *vaddr, size_t len,
Vignesh Raghavendra4a81a212020-01-16 14:23:45 +053046 enum dma_data_direction dir)
Andy Shevchenkof2b047f2017-07-06 14:41:51 +030047{
Vignesh Raghavendra4a81a212020-01-16 14:23:45 +053048 unsigned long addr = (unsigned long)vaddr;
49
50 len = ALIGN(len, ARCH_DMA_MINALIGN);
51
52 if (dir != DMA_TO_DEVICE)
53 invalidate_dcache_range(addr, addr + len);
Andy Shevchenkof2b047f2017-07-06 14:41:51 +030054}
55
56#endif /* __ASM_X86_DMA_MAPPING_H */