blob: 34a3e95d2d613b088efd518eb73d2501f8f8c0e3 [file] [log] [blame]
Willy Tarreauc7e42382012-08-24 19:22:53 +02001/*
2 * include/common/buffer.h
3 * Buffer management definitions, macros and inline functions.
4 *
5 * Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation, version 2.1
10 * exclusively.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#ifndef _COMMON_BUFFER_H
23#define _COMMON_BUFFER_H
24
25#include <stdio.h>
26#include <stdlib.h>
27#include <string.h>
28
Willy Tarreau41806d12018-07-11 09:39:05 +020029#include <common/buf.h>
Willy Tarreau8c89c202012-09-28 16:02:48 +020030#include <common/chunk.h>
Willy Tarreauc7e42382012-08-24 19:22:53 +020031#include <common/config.h>
Willy Tarreau6634b632017-09-22 15:02:54 +020032#include <common/ist.h>
Willy Tarreauea1b06d2018-07-12 09:02:47 +020033#include <common/istbuf.h>
Willy Tarreau9b28e032012-10-12 23:49:43 +020034#include <common/memory.h>
Willy Tarreauc7e42382012-08-24 19:22:53 +020035
Willy Tarreaua8b2ce02019-05-28 17:04:16 +020036#include <proto/activity.h>
Willy Tarreauc7e42382012-08-24 19:22:53 +020037
Christopher Fauleta73e59b2016-12-09 17:30:18 +010038/* an element of the <buffer_wq> list. It represents an object that need to
39 * acquire a buffer to continue its process. */
40struct buffer_wait {
41 void *target; /* The waiting object that should be woken up */
42 int (*wakeup_cb)(void *); /* The function used to wake up the <target>, passed as argument */
43 struct list list; /* Next element in the <buffer_wq> list */
44};
45
Willy Tarreaubafbe012017-11-24 17:34:44 +010046extern struct pool_head *pool_head_buffer;
Christopher Fauleta73e59b2016-12-09 17:30:18 +010047extern struct list buffer_wq;
Willy Tarreau53bae852017-11-26 11:00:37 +010048__decl_hathreads(extern HA_SPINLOCK_T buffer_wq_lock);
Willy Tarreauc7e42382012-08-24 19:22:53 +020049
Willy Tarreau9b28e032012-10-12 23:49:43 +020050int init_buffer();
Willy Tarreauc7e42382012-08-24 19:22:53 +020051void buffer_dump(FILE *o, struct buffer *b, int from, int to);
Willy Tarreauc7e42382012-08-24 19:22:53 +020052
53/*****************************************************************/
54/* These functions are used to compute various buffer area sizes */
55/*****************************************************************/
56
Willy Tarreauc7e42382012-08-24 19:22:53 +020057/* Return 1 if the buffer has less than 1/4 of its capacity free, otherwise 0 */
58static inline int buffer_almost_full(const struct buffer *buf)
59{
Willy Tarreauc9fa0482018-07-10 17:43:27 +020060 if (b_is_null(buf))
Willy Tarreau4428a292014-11-28 20:54:13 +010061 return 0;
62
Willy Tarreaubbc68df2018-06-06 14:30:50 +020063 return b_almost_full(buf);
Willy Tarreauc7e42382012-08-24 19:22:53 +020064}
65
Willy Tarreau7b04cc42018-07-10 10:35:02 +020066/**************************************************/
67/* Functions below are used for buffer allocation */
68/**************************************************/
Willy Tarreauaf819352012-08-27 22:08:00 +020069
Willy Tarreauc9fa0482018-07-10 17:43:27 +020070/* Allocates a buffer and assigns it to *buf. If no memory is available,
71 * ((char *)1) is assigned instead with a zero size. No control is made to
72 * check if *buf already pointed to another buffer. The allocated buffer is
73 * returned, or NULL in case no memory is available.
Willy Tarreaue583ea52014-11-24 11:30:16 +010074 */
Willy Tarreauc9fa0482018-07-10 17:43:27 +020075static inline struct buffer *b_alloc(struct buffer *buf)
Willy Tarreaue583ea52014-11-24 11:30:16 +010076{
Willy Tarreauc9fa0482018-07-10 17:43:27 +020077 char *area;
Willy Tarreauf2f7d6b2014-11-24 11:55:08 +010078
Willy Tarreauc9fa0482018-07-10 17:43:27 +020079 *buf = BUF_WANTED;
80 area = pool_alloc_dirty(pool_head_buffer);
Willy Tarreaua8b2ce02019-05-28 17:04:16 +020081 if (unlikely(!area)) {
82 activity[tid].buf_wait++;
Willy Tarreauc9fa0482018-07-10 17:43:27 +020083 return NULL;
Willy Tarreaua8b2ce02019-05-28 17:04:16 +020084 }
Willy Tarreauc9fa0482018-07-10 17:43:27 +020085
86 buf->area = area;
87 buf->size = pool_head_buffer->size;
88 return buf;
Willy Tarreaue583ea52014-11-24 11:30:16 +010089}
90
Willy Tarreauc9fa0482018-07-10 17:43:27 +020091/* Allocates a buffer and assigns it to *buf. If no memory is available,
92 * ((char *)1) is assigned instead with a zero size. No control is made to
93 * check if *buf already pointed to another buffer. The allocated buffer is
94 * returned, or NULL in case no memory is available. The difference with
95 * b_alloc() is that this function only picks from the pool and never calls
96 * malloc(), so it can fail even if some memory is available.
Willy Tarreau620bd6c2014-12-08 16:37:26 +010097 */
Willy Tarreauc9fa0482018-07-10 17:43:27 +020098static inline struct buffer *b_alloc_fast(struct buffer *buf)
Willy Tarreau620bd6c2014-12-08 16:37:26 +010099{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200100 char *area;
Willy Tarreau620bd6c2014-12-08 16:37:26 +0100101
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200102 *buf = BUF_WANTED;
103 area = pool_get_first(pool_head_buffer);
104 if (unlikely(!area))
105 return NULL;
106
107 buf->area = area;
108 buf->size = pool_head_buffer->size;
109 return buf;
Willy Tarreau620bd6c2014-12-08 16:37:26 +0100110}
111
Willy Tarreau3b091f82019-08-08 07:53:20 +0200112/* Releases buffer <buf> (no check of emptiness). The buffer's head is marked
113 * empty.
114 */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200115static inline void __b_drop(struct buffer *buf)
Willy Tarreau7dfca9d2014-11-25 19:45:11 +0100116{
Willy Tarreau3b091f82019-08-08 07:53:20 +0200117 char *area = buf->area;
118
119 /* let's first clear the area to save an occasional "show sess all"
120 * glancing over our shoulder from getting a dangling pointer.
121 */
122 *buf = BUF_NULL;
123 __ha_barrier_store();
124 pool_free(pool_head_buffer, area);
Willy Tarreau7dfca9d2014-11-25 19:45:11 +0100125}
126
Willy Tarreau3b091f82019-08-08 07:53:20 +0200127/* Releases buffer <buf> if allocated, and marks it empty. */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200128static inline void b_drop(struct buffer *buf)
Willy Tarreau2a4b5432014-11-24 11:39:34 +0100129{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200130 if (buf->size)
131 __b_drop(buf);
Willy Tarreau2a4b5432014-11-24 11:39:34 +0100132}
133
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200134/* Releases buffer <buf> if allocated, and marks it empty. */
135static inline void b_free(struct buffer *buf)
Willy Tarreau2a4b5432014-11-24 11:39:34 +0100136{
137 b_drop(buf);
Willy Tarreau2a4b5432014-11-24 11:39:34 +0100138}
139
Willy Tarreauf4718e82014-12-02 13:54:01 +0100140/* Ensures that <buf> is allocated. If an allocation is needed, it ensures that
141 * there are still at least <margin> buffers available in the pool after this
142 * allocation so that we don't leave the pool in a condition where a session or
143 * a response buffer could not be allocated anymore, resulting in a deadlock.
144 * This means that we sometimes need to try to allocate extra entries even if
145 * only one buffer is needed.
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100146 *
147 * We need to lock the pool here to be sure to have <margin> buffers available
148 * after the allocation, regardless how many threads that doing it in the same
149 * time. So, we use internal and lockless memory functions (prefixed with '__').
Willy Tarreauf4718e82014-12-02 13:54:01 +0100150 */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200151static inline struct buffer *b_alloc_margin(struct buffer *buf, int margin)
Willy Tarreauf4718e82014-12-02 13:54:01 +0100152{
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200153 char *area;
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200154 ssize_t idx;
155 unsigned int cached;
Willy Tarreauf4718e82014-12-02 13:54:01 +0100156
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200157 if (buf->size)
158 return buf;
Willy Tarreauf4718e82014-12-02 13:54:01 +0100159
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200160 cached = 0;
161 idx = pool_get_index(pool_head_buffer);
162 if (idx >= 0)
Willy Tarreau7f0165e2018-11-26 17:09:46 +0100163 cached = pool_cache[tid][idx].count;
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200164
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200165 *buf = BUF_WANTED;
166
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100167#ifndef CONFIG_HAP_LOCKLESS_POOLS
Willy Tarreaubafbe012017-11-24 17:34:44 +0100168 HA_SPIN_LOCK(POOL_LOCK, &pool_head_buffer->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100169#endif
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100170
Willy Tarreauf4718e82014-12-02 13:54:01 +0100171 /* fast path */
Willy Tarreaue18db9e2018-10-16 10:28:54 +0200172 if ((pool_head_buffer->allocated - pool_head_buffer->used + cached) > margin) {
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200173 area = __pool_get_first(pool_head_buffer);
174 if (likely(area)) {
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100175#ifndef CONFIG_HAP_LOCKLESS_POOLS
Willy Tarreaubafbe012017-11-24 17:34:44 +0100176 HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100177#endif
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200178 goto done;
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100179 }
180 }
Willy Tarreauf4718e82014-12-02 13:54:01 +0100181
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100182 /* slow path, uses malloc() */
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200183 area = __pool_refill_alloc(pool_head_buffer, margin);
Christopher Fauletfa5c8122017-11-10 10:39:16 +0100184
Willy Tarreauf161d0f2018-02-22 14:05:55 +0100185#ifndef CONFIG_HAP_LOCKLESS_POOLS
Willy Tarreaubafbe012017-11-24 17:34:44 +0100186 HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
Olivier Houchardcf975d42018-01-24 18:38:31 +0100187#endif
Willy Tarreauf4718e82014-12-02 13:54:01 +0100188
Willy Tarreaua8b2ce02019-05-28 17:04:16 +0200189 if (unlikely(!area)) {
190 activity[tid].buf_wait++;
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200191 return NULL;
Willy Tarreaua8b2ce02019-05-28 17:04:16 +0200192 }
Willy Tarreauc9fa0482018-07-10 17:43:27 +0200193
194 done:
195 buf->area = area;
196 buf->size = pool_head_buffer->size;
197 return buf;
Willy Tarreauf4718e82014-12-02 13:54:01 +0100198}
199
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100200
Willy Tarreauc41b3e82018-03-02 10:27:12 +0100201/* Offer a buffer currently belonging to target <from> to whoever needs one.
202 * Any pointer is valid for <from>, including NULL. Its purpose is to avoid
203 * passing a buffer to oneself in case of failed allocations (e.g. need two
204 * buffers, get one, fail, release it and wake up self again). In case of
205 * normal buffer release where it is expected that the caller is not waiting
206 * for a buffer, NULL is fine.
207 */
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100208void __offer_buffer(void *from, unsigned int threshold);
209
210static inline void offer_buffers(void *from, unsigned int threshold)
211{
Willy Tarreau186e96e2019-05-28 17:21:18 +0200212 if (LIST_ISEMPTY(&buffer_wq))
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100213 return;
Willy Tarreau186e96e2019-05-28 17:21:18 +0200214
215 HA_SPIN_LOCK(BUF_WQ_LOCK, &buffer_wq_lock);
216 if (!LIST_ISEMPTY(&buffer_wq))
217 __offer_buffer(from, threshold);
Christopher Faulet2a944ee2017-11-07 10:42:54 +0100218 HA_SPIN_UNLOCK(BUF_WQ_LOCK, &buffer_wq_lock);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100219}
220
Willy Tarreaue5676e72017-09-22 15:47:51 +0200221
Willy Tarreauc7e42382012-08-24 19:22:53 +0200222#endif /* _COMMON_BUFFER_H */
223
224/*
225 * Local variables:
226 * c-indent-level: 8
227 * c-basic-offset: 8
228 * End:
229 */