blob: 3f440deaa0d8a5d5a7d181772750786be29ba7e4 [file] [log] [blame]
Heiko Schocher4f7a9a32014-06-24 10:10:03 +02001
2#include <common.h>
Masahiro Yamada3c884402018-08-24 19:30:15 +09003#include <memalign.h>
Heiko Schocher4f7a9a32014-06-24 10:10:03 +02004#include <linux/compat.h>
5
6struct p_current cur = {
7 .pid = 1,
8};
9__maybe_unused struct p_current *current = &cur;
10
11unsigned long copy_from_user(void *dest, const void *src,
12 unsigned long count)
13{
14 memcpy((void *)dest, (void *)src, count);
15 return 0;
16}
17
18void *kmalloc(size_t size, int flags)
19{
Masahiro Yamada7b5ec7e2015-07-13 13:17:07 +090020 void *p;
21
Masahiro Yamada3c884402018-08-24 19:30:15 +090022 p = malloc_cache_aligned(size);
Marek Szyprowskife77e432019-10-02 14:37:20 +020023 if (p && flags & __GFP_ZERO)
Masahiro Yamada7b5ec7e2015-07-13 13:17:07 +090024 memset(p, 0, size);
Heiko Schocher4f7a9a32014-06-24 10:10:03 +020025
Masahiro Yamada7b5ec7e2015-07-13 13:17:07 +090026 return p;
Heiko Schocher4f7a9a32014-06-24 10:10:03 +020027}
28
Heiko Schocher4f7a9a32014-06-24 10:10:03 +020029struct kmem_cache *get_mem(int element_sz)
30{
31 struct kmem_cache *ret;
32
33 ret = memalign(ARCH_DMA_MINALIGN, sizeof(struct kmem_cache));
34 ret->sz = element_sz;
35
36 return ret;
37}
38
39void *kmem_cache_alloc(struct kmem_cache *obj, int flag)
40{
Masahiro Yamada3c884402018-08-24 19:30:15 +090041 return malloc_cache_aligned(obj->sz);
Heiko Schocher4f7a9a32014-06-24 10:10:03 +020042}
AKASHI Takahiro1d8d34d2019-11-13 09:44:47 +090043
44/**
45 * kmemdup - duplicate region of memory
46 *
47 * @src: memory region to duplicate
48 * @len: memory region length
49 * @gfp: GFP mask to use
50 *
51 * Return: newly allocated copy of @src or %NULL in case of error
52 */
53void *kmemdup(const void *src, size_t len, gfp_t gfp)
54{
55 void *p;
56
57 p = kmalloc(len, gfp);
58 if (p)
59 memcpy(p, src, len);
60 return p;
61}