blob: c8876ceadda6a2e1eae73515ad8b9b2aef1e531e [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001/* SPDX-License-Identifier: GPL-2.0+ */
ken kuo9f128bc2013-08-06 01:00:53 +08002/*
3 * Copyright (C) 2013 Andes Technology Corporation
4 * Ken Kuo, Andes Technology Corporation <ken_kuo@andestech.com>
ken kuo9f128bc2013-08-06 01:00:53 +08005 */
6#ifndef __ASM_NDS_DMA_MAPPING_H
7#define __ASM_NDS_DMA_MAPPING_H
8
Vignesh Raghavendrac0a5a812020-01-16 14:23:45 +05309#include <common.h>
10#include <asm/cache.h>
11#include <cpu_func.h>
Masahiro Yamadab27af392017-08-26 00:50:17 +090012#include <linux/dma-direction.h>
Vignesh Raghavendrac0a5a812020-01-16 14:23:45 +053013#include <malloc.h>
ken kuo9f128bc2013-08-06 01:00:53 +080014
15static void *dma_alloc_coherent(size_t len, unsigned long *handle)
16{
17 *handle = (unsigned long)memalign(ARCH_DMA_MINALIGN, len);
18 return (void *)*handle;
19}
20
21static inline unsigned long dma_map_single(volatile void *vaddr, size_t len,
22 enum dma_data_direction dir)
23{
Vignesh Raghavendrac0a5a812020-01-16 14:23:45 +053024 unsigned long addr = (unsigned long)vaddr;
25
26 len = ALIGN(len, ARCH_DMA_MINALIGN);
27
28 if (dir == DMA_FROM_DEVICE)
29 invalidate_dcache_range(addr, addr + len);
30 else
31 flush_dcache_range(addr, addr + len);
32
33 return addr;
ken kuo9f128bc2013-08-06 01:00:53 +080034}
35
36static inline void dma_unmap_single(volatile void *vaddr, size_t len,
Vignesh Raghavendrac0a5a812020-01-16 14:23:45 +053037 enum dma_data_direction dir)
ken kuo9f128bc2013-08-06 01:00:53 +080038{
Vignesh Raghavendrac0a5a812020-01-16 14:23:45 +053039 unsigned long addr = (unsigned long)vaddr;
40
41 len = ALIGN(len, ARCH_DMA_MINALIGN);
42
43 if (dir != DMA_TO_DEVICE)
44 invalidate_dcache_range(addr, addr + len);
ken kuo9f128bc2013-08-06 01:00:53 +080045}
46
47#endif /* __ASM_NDS_DMA_MAPPING_H */