blob: 81ea8fb126fa2fec7230479ae77d744cde337e4c [file] [log] [blame]
Heiko Schocher4f7a9a32014-06-24 10:10:03 +02001
2#include <common.h>
Masahiro Yamada3c884402018-08-24 19:30:15 +09003#include <memalign.h>
Heiko Schocher4f7a9a32014-06-24 10:10:03 +02004#include <linux/compat.h>
5
6struct p_current cur = {
7 .pid = 1,
8};
9__maybe_unused struct p_current *current = &cur;
10
11unsigned long copy_from_user(void *dest, const void *src,
12 unsigned long count)
13{
14 memcpy((void *)dest, (void *)src, count);
15 return 0;
16}
17
18void *kmalloc(size_t size, int flags)
19{
Masahiro Yamada7b5ec7e2015-07-13 13:17:07 +090020 void *p;
21
Masahiro Yamada3c884402018-08-24 19:30:15 +090022 p = malloc_cache_aligned(size);
Marek Szyprowskife77e432019-10-02 14:37:20 +020023 if (p && flags & __GFP_ZERO)
Masahiro Yamada7b5ec7e2015-07-13 13:17:07 +090024 memset(p, 0, size);
Heiko Schocher4f7a9a32014-06-24 10:10:03 +020025
Masahiro Yamada7b5ec7e2015-07-13 13:17:07 +090026 return p;
Heiko Schocher4f7a9a32014-06-24 10:10:03 +020027}
28
Heiko Schocher4f7a9a32014-06-24 10:10:03 +020029struct kmem_cache *get_mem(int element_sz)
30{
31 struct kmem_cache *ret;
32
33 ret = memalign(ARCH_DMA_MINALIGN, sizeof(struct kmem_cache));
34 ret->sz = element_sz;
35
36 return ret;
37}
38
39void *kmem_cache_alloc(struct kmem_cache *obj, int flag)
40{
Masahiro Yamada3c884402018-08-24 19:30:15 +090041 return malloc_cache_aligned(obj->sz);
Heiko Schocher4f7a9a32014-06-24 10:10:03 +020042}