blob: c8d7a69f2c1c1cfefcaef8d7140a98e7b79975fe [file] [log] [blame]
Willy Tarreauc7e42382012-08-24 19:22:53 +02001/*
2 * Buffer management functions.
3 *
4 * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
William Lallemandbe0efd82012-11-22 18:01:40 +010013#include <ctype.h>
Willy Tarreauc7e42382012-08-24 19:22:53 +020014#include <stdio.h>
15#include <string.h>
16
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020017#include <haproxy/api.h>
Willy Tarreauc7e42382012-08-24 19:22:53 +020018#include <common/buffer.h>
Willy Tarreau9b28e032012-10-12 23:49:43 +020019#include <common/memory.h>
Willy Tarreauc7e42382012-08-24 19:22:53 +020020
21#include <types/global.h>
22
Willy Tarreaubafbe012017-11-24 17:34:44 +010023struct pool_head *pool_head_buffer;
Willy Tarreau9b28e032012-10-12 23:49:43 +020024
Christopher Fauleta73e59b2016-12-09 17:30:18 +010025/* list of objects waiting for at least one buffer */
Willy Tarreau21046592020-02-26 10:39:36 +010026struct mt_list buffer_wq = LIST_HEAD_INIT(buffer_wq);
Willy Tarreau86abe442018-11-25 20:12:18 +010027__decl_aligned_spinlock(buffer_wq_lock);
Christopher Fauleta73e59b2016-12-09 17:30:18 +010028
Willy Tarreau9b28e032012-10-12 23:49:43 +020029/* perform minimal intializations, report 0 in case of error, 1 if OK. */
30int init_buffer()
31{
Willy Tarreaua24adf02014-11-27 01:11:56 +010032 void *buffer;
33
Willy Tarreauc9fa0482018-07-10 17:43:27 +020034 pool_head_buffer = create_pool("buffer", global.tune.bufsize, MEM_F_SHARED|MEM_F_EXACT);
Willy Tarreaubafbe012017-11-24 17:34:44 +010035 if (!pool_head_buffer)
Willy Tarreaua24adf02014-11-27 01:11:56 +010036 return 0;
37
38 /* The reserved buffer is what we leave behind us. Thus we always need
39 * at least one extra buffer in minavail otherwise we'll end up waking
40 * up tasks with no memory available, causing a lot of useless wakeups.
41 * That means that we always want to have at least 3 buffers available
42 * (2 for current session, one for next session that might be needed to
43 * release a server connection).
44 */
Willy Tarreaubafbe012017-11-24 17:34:44 +010045 pool_head_buffer->minavail = MAX(global.tune.reserved_bufs, 3);
Willy Tarreau33cb0652014-12-23 22:52:37 +010046 if (global.tune.buf_limit)
Willy Tarreaubafbe012017-11-24 17:34:44 +010047 pool_head_buffer->limit = global.tune.buf_limit;
Willy Tarreaua24adf02014-11-27 01:11:56 +010048
Willy Tarreaubafbe012017-11-24 17:34:44 +010049 buffer = pool_refill_alloc(pool_head_buffer, pool_head_buffer->minavail - 1);
Willy Tarreaua24adf02014-11-27 01:11:56 +010050 if (!buffer)
51 return 0;
52
Willy Tarreaubafbe012017-11-24 17:34:44 +010053 pool_free(pool_head_buffer, buffer);
Willy Tarreaua24adf02014-11-27 01:11:56 +010054 return 1;
Willy Tarreau9b28e032012-10-12 23:49:43 +020055}
56
Willy Tarreauaf819352012-08-27 22:08:00 +020057/*
Willy Tarreauc7e42382012-08-24 19:22:53 +020058 * Dumps part or all of a buffer.
59 */
60void buffer_dump(FILE *o, struct buffer *b, int from, int to)
61{
62 fprintf(o, "Dumping buffer %p\n", b);
Willy Tarreau81521ed2018-06-19 07:48:13 +020063 fprintf(o, " orig=%p size=%u head=%u tail=%u data=%u\n",
64 b_orig(b), (unsigned int)b_size(b), (unsigned int)b_head_ofs(b), (unsigned int)b_tail_ofs(b), (unsigned int)b_data(b));
Willy Tarreauc7e42382012-08-24 19:22:53 +020065
66 fprintf(o, "Dumping contents from byte %d to byte %d\n", from, to);
William Lallemandbe0efd82012-11-22 18:01:40 +010067 fprintf(o, " 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
68 /* dump hexa */
69 while (from < to) {
70 int i;
71
72 fprintf(o, " %04x: ", from);
73 for (i = 0; ((from + i) < to) && (i < 16) ; i++) {
Willy Tarreau81521ed2018-06-19 07:48:13 +020074 fprintf(o, "%02x ", (unsigned char)b_orig(b)[from + i]);
William Lallemandbe0efd82012-11-22 18:01:40 +010075 if (((from + i) & 15) == 7)
76 fprintf(o, "- ");
77 }
Godbachc08057c2013-11-14 10:15:20 +080078 if (to - from < 16) {
Godbachc3916a72013-11-21 10:21:22 +080079 int j = 0;
80
Godbachc08057c2013-11-14 10:15:20 +080081 for (j = 0; j < from + 16 - to; j++)
82 fprintf(o, " ");
Godbachc3916a72013-11-21 10:21:22 +080083 if (j > 8)
84 fprintf(o, " ");
Godbachc08057c2013-11-14 10:15:20 +080085 }
William Lallemandbe0efd82012-11-22 18:01:40 +010086 fprintf(o, " ");
87 for (i = 0; (from + i < to) && (i < 16) ; i++) {
Willy Tarreau90807112020-02-25 08:16:33 +010088 fprintf(o, "%c", isprint((unsigned char)b_orig(b)[from + i]) ? b_orig(b)[from + i] : '.') ;
William Lallemandbe0efd82012-11-22 18:01:40 +010089 if ((((from + i) & 15) == 15) && ((from + i) != to-1))
90 fprintf(o, "\n");
91 }
92 from += i;
Willy Tarreauc7e42382012-08-24 19:22:53 +020093 }
94 fprintf(o, "\n--\n");
William Lallemandbe0efd82012-11-22 18:01:40 +010095 fflush(o);
Willy Tarreauc7e42382012-08-24 19:22:53 +020096}
97
Willy Tarreauc41b3e82018-03-02 10:27:12 +010098/* see offer_buffer() for details */
Christopher Fauleta73e59b2016-12-09 17:30:18 +010099void __offer_buffer(void *from, unsigned int threshold)
100{
Willy Tarreau21046592020-02-26 10:39:36 +0100101 struct buffer_wait *wait;
102 struct mt_list *elt1, elt2;
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100103 int avail;
104
105 /* For now, we consider that all objects need 1 buffer, so we can stop
106 * waking up them once we have enough of them to eat all the available
107 * buffers. Note that we don't really know if they are streams or just
108 * other tasks, but that's a rough estimate. Similarly, for each cached
109 * event we'll need 1 buffer. If no buffer is currently used, always
110 * wake up the number of tasks we can offer a buffer based on what is
111 * allocated, and in any case at least one task per two reserved
112 * buffers.
113 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100114 avail = pool_head_buffer->allocated - pool_head_buffer->used - global.tune.reserved_bufs / 2;
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100115
Willy Tarreau21046592020-02-26 10:39:36 +0100116 mt_list_for_each_entry_safe(wait, &buffer_wq, list, elt1, elt2) {
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100117 if (avail <= threshold)
118 break;
119
120 if (wait->target == from || !wait->wakeup_cb(wait->target))
121 continue;
122
Olivier Houchard6c96fc12020-03-10 17:39:21 +0100123 MT_LIST_DEL_SAFE(elt1);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100124 avail--;
125 }
126}
Willy Tarreauc7e42382012-08-24 19:22:53 +0200127
128/*
129 * Local variables:
130 * c-indent-level: 8
131 * c-basic-offset: 8
132 * End:
133 */