blob: 3827a9b60a7390f5d467c505eed53d2ade266c03 [file] [log] [blame]
Willy Tarreauc7e42382012-08-24 19:22:53 +02001/*
2 * include/common/buffer.h
3 * Buffer management definitions, macros and inline functions.
4 *
5 * Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#ifndef _COMMON_BUFFER_H
23#define _COMMON_BUFFER_H
24
25#include <stdio.h>
26#include <stdlib.h>
27#include <string.h>
28
Willy Tarreau41806d12018-07-11 09:39:05 +020029#include <common/buf.h>
Willy Tarreau8c89c202012-09-28 16:02:48 +020030#include <common/chunk.h>
Willy Tarreauc7e42382012-08-24 19:22:53 +020031#include <common/config.h>
Willy Tarreau6634b632017-09-22 15:02:54 +020032#include <common/ist.h>
Willy Tarreauea1b06d2018-07-12 09:02:47 +020033#include <common/istbuf.h>
Willy Tarreau9b28e032012-10-12 23:49:43 +020034#include <common/memory.h>
Willy Tarreauc7e42382012-08-24 19:22:53 +020035
36
Christopher Fauleta73e59b2016-12-09 17:30:18 +010037/* an element of the <buffer_wq> list. It represents an object that need to
38 * acquire a buffer to continue its process. */
39struct buffer_wait {
40 void *target; /* The waiting object that should be woken up */
41 int (*wakeup_cb)(void *); /* The function used to wake up the <target>, passed as argument */
42 struct list list; /* Next element in the <buffer_wq> list */
43};
44
Willy Tarreaubafbe012017-11-24 17:34:44 +010045extern struct pool_head *pool_head_buffer;
Willy Tarreau2a4b5432014-11-24 11:39:34 +010046extern struct buffer buf_empty;
Willy Tarreauf2f7d6b2014-11-24 11:55:08 +010047extern struct buffer buf_wanted;
Christopher Fauleta73e59b2016-12-09 17:30:18 +010048extern struct list buffer_wq;
Willy Tarreau53bae852017-11-26 11:00:37 +010049__decl_hathreads(extern HA_SPINLOCK_T buffer_wq_lock);
Willy Tarreauc7e42382012-08-24 19:22:53 +020050
Willy Tarreau9b28e032012-10-12 23:49:43 +020051int init_buffer();
Christopher Fauletad405f12017-08-29 15:30:11 +020052void deinit_buffer();
Willy Tarreauaf819352012-08-27 22:08:00 +020053int buffer_replace2(struct buffer *b, char *pos, char *end, const char *str, int len);
54int buffer_insert_line2(struct buffer *b, char *pos, const char *str, int len);
Willy Tarreauc7e42382012-08-24 19:22:53 +020055void buffer_dump(FILE *o, struct buffer *b, int from, int to);
Willy Tarreauc7e42382012-08-24 19:22:53 +020056
57/*****************************************************************/
58/* These functions are used to compute various buffer area sizes */
59/*****************************************************************/
60
Willy Tarreauc7e42382012-08-24 19:22:53 +020061
Willy Tarreaueac52592018-06-15 13:59:36 +020062
63/***** FIXME: OLD API BELOW *****/
Willy Tarreauc7e42382012-08-24 19:22:53 +020064
Willy Tarreauc7e42382012-08-24 19:22:53 +020065/* Normalizes a pointer after an addition */
66static inline char *buffer_wrap_add(const struct buffer *buf, char *ptr)
67{
Willy Tarreau591d4452018-06-15 17:21:00 +020068 if (ptr - buf->size >= b_orig(buf))
Willy Tarreauc7e42382012-08-24 19:22:53 +020069 ptr -= buf->size;
70 return ptr;
71}
72
Willy Tarreauc7e42382012-08-24 19:22:53 +020073/* Normalizes a pointer which is supposed to be relative to the beginning of a
74 * buffer, so that wrapping is correctly handled. The intent is to use this
75 * when increasing a pointer. Note that the wrapping test is only performed
76 * once, so the original pointer must be between ->data-size and ->data+2*size-1,
77 * otherwise an invalid pointer might be returned.
78 */
79static inline const char *buffer_pointer(const struct buffer *buf, const char *ptr)
80{
Willy Tarreau591d4452018-06-15 17:21:00 +020081 if (ptr < b_orig(buf))
Willy Tarreauc7e42382012-08-24 19:22:53 +020082 ptr += buf->size;
Willy Tarreau591d4452018-06-15 17:21:00 +020083 else if (ptr - buf->size >= b_orig(buf))
Willy Tarreauc7e42382012-08-24 19:22:53 +020084 ptr -= buf->size;
85 return ptr;
86}
87
88/* Returns the distance between two pointers, taking into account the ability
89 * to wrap around the buffer's end.
90 */
91static inline int buffer_count(const struct buffer *buf, const char *from, const char *to)
92{
93 int count = to - from;
Willy Tarreaubf439272013-04-02 01:25:57 +020094
95 count += count < 0 ? buf->size : 0;
Willy Tarreauc7e42382012-08-24 19:22:53 +020096 return count;
97}
98
Willy Tarreauc7e42382012-08-24 19:22:53 +020099/* Return 1 if the buffer has less than 1/4 of its capacity free, otherwise 0 */
100static inline int buffer_almost_full(const struct buffer *buf)
101{
Willy Tarreau4428a292014-11-28 20:54:13 +0100102 if (buf == &buf_empty)
103 return 0;
104
Willy Tarreaubbc68df2018-06-06 14:30:50 +0200105 return b_almost_full(buf);
Willy Tarreauc7e42382012-08-24 19:22:53 +0200106}
107
Willy Tarreauaf819352012-08-27 22:08:00 +0200108/* This function writes the string <str> at position <pos> which must be in
109 * buffer <b>, and moves <end> just after the end of <str>. <b>'s parameters
110 * (l, r, lr) are updated to be valid after the shift. the shift value
111 * (positive or negative) is returned. If there's no space left, the move is
112 * not done. The function does not adjust ->o because it does not make sense
113 * to use it on data scheduled to be sent.
114 */
115static inline int buffer_replace(struct buffer *b, char *pos, char *end, const char *str)
116{
117 return buffer_replace2(b, pos, end, str, strlen(str));
118}
119
Willy Tarreauf2f7d6b2014-11-24 11:55:08 +0100120/* Allocates a buffer and replaces *buf with this buffer. If no memory is
121 * available, &buf_wanted is used instead. No control is made to check if *buf
122 * already pointed to another buffer. The allocated buffer is returned, or
123 * NULL in case no memory is available.
Willy Tarreaue583ea52014-11-24 11:30:16 +0100124 */
125static inline struct buffer *b_alloc(struct buffer **buf)
126{
Willy Tarreauf2f7d6b2014-11-24 11:55:08 +0100127 struct buffer *b;
128
129 *buf = &buf_wanted;
Willy Tarreaubafbe012017-11-24 17:34:44 +0100130 b = pool_alloc_dirty(pool_head_buffer);
Willy Tarreauf2f7d6b2014-11-24 11:55:08 +0100131 if (likely(b)) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100132 b->size = pool_head_buffer->size - sizeof(struct buffer);
Willy Tarreauf2f7d6b2014-11-24 11:55:08 +0100133 b_reset(b);
134 *buf = b;
Willy Tarreaue583ea52014-11-24 11:30:16 +0100135 }
Willy Tarreauf2f7d6b2014-11-24 11:55:08 +0100136 return b;
Willy Tarreaue583ea52014-11-24 11:30:16 +0100137}
138
Willy Tarreau620bd6c2014-12-08 16:37:26 +0100139/* Allocates a buffer and replaces *buf with this buffer. If no memory is
140 * available, &buf_wanted is used instead. No control is made to check if *buf
141 * already pointed to another buffer. The allocated buffer is returned, or
142 * NULL in case no memory is available. The difference with b_alloc() is that
143 * this function only picks from the pool and never calls malloc(), so it can
144 * fail even if some memory is available.
145 */
146static inline struct buffer *b_alloc_fast(struct buffer **buf)
147{
148 struct buffer *b;
149
150 *buf = &buf_wanted;
Willy Tarreaubafbe012017-11-24 17:34:44 +0100151 b = pool_get_first(pool_head_buffer);
Willy Tarreau620bd6c2014-12-08 16:37:26 +0100152 if (likely(b)) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100153 b->size = pool_head_buffer->size - sizeof(struct buffer);
Willy Tarreau620bd6c2014-12-08 16:37:26 +0100154 b_reset(b);
155 *buf = b;
156 }
157 return b;
158}
159
Willy Tarreau2a4b5432014-11-24 11:39:34 +0100160/* Releases buffer *buf (no check of emptiness) */
161static inline void __b_drop(struct buffer **buf)
Willy Tarreau7dfca9d2014-11-25 19:45:11 +0100162{
Willy Tarreaubafbe012017-11-24 17:34:44 +0100163 pool_free(pool_head_buffer, *buf);
Willy Tarreau7dfca9d2014-11-25 19:45:11 +0100164}
165
Willy Tarreau2a4b5432014-11-24 11:39:34 +0100166/* Releases buffer *buf if allocated. */
167static inline void b_drop(struct buffer **buf)
168{
169 if (!(*buf)->size)
170 return;
171 __b_drop(buf);
172}
173
174/* Releases buffer *buf if allocated, and replaces it with &buf_empty. */
175static inline void b_free(struct buffer **buf)
176{
177 b_drop(buf);
178 *buf = &buf_empty;
179}
180
Willy Tarreauf4718e82014-12-02 13:54:01 +0100181/* Ensures that <buf> is allocated. If an allocation is needed, it ensures that
182 * there are still at least <margin> buffers available in the pool after this
183 * allocation so that we don't leave the pool in a condition where a session or
184 * a response buffer could not be allocated anymore, resulting in a deadlock.
185 * This means that we sometimes need to try to allocate extra entries even if
186 * only one buffer is needed.
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100187 *
188 * We need to lock the pool here to be sure to have <margin> buffers available
189 * after the allocation, regardless how many threads that doing it in the same
190 * time. So, we use internal and lockless memory functions (prefixed with '__').
Willy Tarreauf4718e82014-12-02 13:54:01 +0100191 */
192static inline struct buffer *b_alloc_margin(struct buffer **buf, int margin)
193{
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100194 struct buffer *b;
Willy Tarreauf4718e82014-12-02 13:54:01 +0100195
196 if ((*buf)->size)
197 return *buf;
198
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100199 *buf = &buf_wanted;
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100200#ifndef CONFIG_HAP_LOCKLESS_POOLS
Willy Tarreaubafbe012017-11-24 17:34:44 +0100201 HA_SPIN_LOCK(POOL_LOCK, &pool_head_buffer->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100202#endif
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100203
Willy Tarreauf4718e82014-12-02 13:54:01 +0100204 /* fast path */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100205 if ((pool_head_buffer->allocated - pool_head_buffer->used) > margin) {
206 b = __pool_get_first(pool_head_buffer);
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100207 if (likely(b)) {
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100208#ifndef CONFIG_HAP_LOCKLESS_POOLS
Willy Tarreaubafbe012017-11-24 17:34:44 +0100209 HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100210#endif
Willy Tarreaubafbe012017-11-24 17:34:44 +0100211 b->size = pool_head_buffer->size - sizeof(struct buffer);
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100212 b_reset(b);
213 *buf = b;
214 return b;
215 }
216 }
Willy Tarreauf4718e82014-12-02 13:54:01 +0100217
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100218 /* slow path, uses malloc() */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100219 b = __pool_refill_alloc(pool_head_buffer, margin);
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100220
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100221#ifndef CONFIG_HAP_LOCKLESS_POOLS
Willy Tarreaubafbe012017-11-24 17:34:44 +0100222 HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100223#endif
Willy Tarreauf4718e82014-12-02 13:54:01 +0100224
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100225 if (b) {
Willy Tarreaubafbe012017-11-24 17:34:44 +0100226 b->size = pool_head_buffer->size - sizeof(struct buffer);
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100227 b_reset(b);
228 *buf = b;
229 }
230 return b;
Willy Tarreauf4718e82014-12-02 13:54:01 +0100231}
232
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100233
Willy Tarreauc41b3e82018-03-02 10:27:12 +0100234/* Offer a buffer currently belonging to target <from> to whoever needs one.
235 * Any pointer is valid for <from>, including NULL. Its purpose is to avoid
236 * passing a buffer to oneself in case of failed allocations (e.g. need two
237 * buffers, get one, fail, release it and wake up self again). In case of
238 * normal buffer release where it is expected that the caller is not waiting
239 * for a buffer, NULL is fine.
240 */
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100241void __offer_buffer(void *from, unsigned int threshold);
242
243static inline void offer_buffers(void *from, unsigned int threshold)
244{
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100245 HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
Emeric Bruna1dd2432017-06-21 15:42:52 +0200246 if (LIST_ISEMPTY(&buffer_wq)) {
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100247 HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100248 return;
Emeric Bruna1dd2432017-06-21 15:42:52 +0200249 }
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100250 __offer_buffer(from, threshold);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100251 HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100252}
253
Willy Tarreaue5676e72017-09-22 15:47:51 +0200254
Willy Tarreauc7e42382012-08-24 19:22:53 +0200255#endif /* _COMMON_BUFFER_H */
256
257/*
258 * Local variables:
259 * c-indent-level: 8
260 * c-basic-offset: 8
261 * End:
262 */