blob: 77346b706ef9d5f05774cf6f6ef6e2d6de325053 [file] [log] [blame]
Willy Tarreaubaaee002006-06-26 02:48:02 +02001/*
Willy Tarreau2dd0d472006-06-29 17:53:05 +02002 include/common/memory.h
Willy Tarreaubaaee002006-06-26 02:48:02 +02003 Memory management definitions..
4
Willy Tarreau50e608d2007-05-13 18:26:08 +02005 Copyright (C) 2000-2007 Willy Tarreau - w@1wt.eu
Willy Tarreaubaaee002006-06-26 02:48:02 +02006
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation, version 2.1
10 exclusively.
11
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
16
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20*/
21
Willy Tarreau2dd0d472006-06-29 17:53:05 +020022#ifndef _COMMON_MEMORY_H
23#define _COMMON_MEMORY_H
Willy Tarreaubaaee002006-06-26 02:48:02 +020024
25#include <stdlib.h>
26
Willy Tarreau2dd0d472006-06-29 17:53:05 +020027#include <common/config.h>
Willy Tarreau50e608d2007-05-13 18:26:08 +020028#include <common/mini-clist.h>
Willy Tarreaubaaee002006-06-26 02:48:02 +020029
30#define sizeof_requri REQURI_LEN
31#define sizeof_capture CAPTURE_LEN
32/*
33 * Returns a pointer to an area of <__len> bytes taken from the pool <pool> or
34 * dynamically allocated. In the first case, <__pool> is updated to point to
35 * the next element in the list.
36 */
37#define pool_alloc_from(__pool, __len) \
38({ \
39 void *__p; \
40 if ((__p = (__pool)) == NULL) \
41 __p = malloc(((__len) >= sizeof (void *)) ? \
42 (__len) : sizeof(void *)); \
43 else { \
44 __pool = *(void **)(__pool); \
45 } \
46 __p; \
47})
48
49/*
50 * Puts a memory area back to the corresponding pool.
51 * Items are chained directly through a pointer that
52 * is written in the beginning of the memory area, so
53 * there's no need for any carrier cell. This implies
54 * that each memory area is at least as big as one
55 * pointer.
56 */
57#define pool_free_to(__pool, __ptr) \
58({ \
59 *(void **)(__ptr) = (void *)(__pool); \
60 __pool = (void *)(__ptr); \
61})
62
63
64#ifdef CONFIG_HAP_MEM_OPTIM
65/*
66 * Returns a pointer to type <type> taken from the
67 * pool <pool_type> or dynamically allocated. In the
68 * first case, <pool_type> is updated to point to the
69 * next element in the list.
70 */
71#define pool_alloc(type) \
72({ \
73 void *__p; \
74 if ((__p = pool_##type) == NULL) \
75 __p = malloc(sizeof_##type); \
76 else { \
77 pool_##type = *(void **)pool_##type; \
78 } \
79 __p; \
80})
81
82/*
83 * Puts a memory area back to the corresponding pool.
84 * Items are chained directly through a pointer that
85 * is written in the beginning of the memory area, so
86 * there's no need for any carrier cell. This implies
87 * that each memory area is at least as big as one
88 * pointer.
89 */
90#define pool_free(type, ptr) \
91({ \
92 *(void **)ptr = (void *)pool_##type; \
93 pool_##type = (void *)ptr; \
94})
95
96#else
97#define pool_alloc(type) (calloc(1,sizeof_##type))
98#define pool_free(type, ptr) (free(ptr))
99#endif /* CONFIG_HAP_MEM_OPTIM */
100
101/*
102 * This function destroys a pull by freeing it completely.
103 * This should be called only under extreme circumstances.
104 */
105static inline void pool_destroy(void **pool)
106{
107 void *temp, *next;
108 next = pool;
109 while (next) {
110 temp = next;
111 next = *(void **)temp;
112 free(temp);
113 }
114}
115
Willy Tarreau50e608d2007-05-13 18:26:08 +0200116
117/******* pools version 2 ********/
118
119#define MEM_F_SHARED 0x1
120
121struct pool_head {
122 void **free_list;
123 struct list list; /* list of all known pools */
124 unsigned int used; /* how many chunks are currently in use */
125 unsigned int allocated; /* how many chunks have been allocated */
126 unsigned int limit; /* hard limit on the number of chunks */
127 unsigned int minavail; /* how many chunks are expected to be used */
128 unsigned int size; /* chunk size */
129 unsigned int flags; /* MEM_F_* */
130 char name[9]; /* name of the pool */
131};
132
133
134/* Allocate a new entry for pool <pool>, and return it for immediate use.
135 * NULL is returned if no memory is available for a new creation.
136 */
137void *refill_pool_alloc(struct pool_head *pool);
138
139/* Try to find an existing shared pool with the same characteristics and
140 * returns it, otherwise creates this one. NULL is returned if no memory
141 * is available for a new creation.
142 */
143struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags);
144
145/* Dump statistics on pools usage.
146 */
147void dump_pools(void);
148
149/*
150 * Returns a pointer to type <type> taken from the
151 * pool <pool_type> or dynamically allocated. In the
152 * first case, <pool_type> is updated to point to the
153 * next element in the list.
154 */
155#define pool_alloc2(pool) \
156({ \
157 void *__p; \
158 if ((__p = pool.free_list) == NULL) \
159 __p = pool_refill_alloc(&pool); \
160 else { \
161 pool.free_list = *(void **)pool.free_list; \
162 pool.used++; \
163 } \
164 __p; \
165})
166
167/*
168 * Puts a memory area back to the corresponding pool.
169 * Items are chained directly through a pointer that
170 * is written in the beginning of the memory area, so
171 * there's no need for any carrier cell. This implies
172 * that each memory area is at least as big as one
173 * pointer.
174 */
175#define pool_free2(pool, ptr) \
176({ \
177 *(void **)ptr = (void *)pool.free_list; \
178 pool.free_list = (void *)ptr; \
179 pool.used--; \
180})
181
182
Willy Tarreau2dd0d472006-06-29 17:53:05 +0200183#endif /* _COMMON_MEMORY_H */
Willy Tarreaubaaee002006-06-26 02:48:02 +0200184
185/*
186 * Local variables:
187 * c-indent-level: 8
188 * c-basic-offset: 8
189 * End:
190 */