blob: ad2cde926533faaaa5276422040d09410895f44e [file] [log] [blame]
Willy Tarreauc7e42382012-08-24 19:22:53 +02001/*
2 * Buffer management functions.
3 *
4 * Copyright 2000-2012 Willy Tarreau <w@1wt.eu>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
William Lallemandbe0efd82012-11-22 18:01:40 +010013#include <ctype.h>
Willy Tarreauc7e42382012-08-24 19:22:53 +020014#include <stdio.h>
15#include <string.h>
16
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020017#include <haproxy/api.h>
Willy Tarreau2741c8c2020-06-02 11:28:02 +020018#include <haproxy/dynbuf.h>
Willy Tarreaub2551052020-06-09 09:07:15 +020019#include <haproxy/global.h>
Willy Tarreau2741c8c2020-06-02 11:28:02 +020020#include <haproxy/list.h>
Willy Tarreaud0ef4392020-06-02 09:38:52 +020021#include <haproxy/pool.h>
Willy Tarreauc7e42382012-08-24 19:22:53 +020022
Willy Tarreaubafbe012017-11-24 17:34:44 +010023struct pool_head *pool_head_buffer;
Willy Tarreau9b28e032012-10-12 23:49:43 +020024
Christopher Fauleta73e59b2016-12-09 17:30:18 +010025/* list of objects waiting for at least one buffer */
Willy Tarreau21046592020-02-26 10:39:36 +010026struct mt_list buffer_wq = LIST_HEAD_INIT(buffer_wq);
Christopher Fauleta73e59b2016-12-09 17:30:18 +010027
Willy Tarreau9b28e032012-10-12 23:49:43 +020028/* perform minimal intializations, report 0 in case of error, 1 if OK. */
29int init_buffer()
30{
Willy Tarreaua24adf02014-11-27 01:11:56 +010031 void *buffer;
32
Willy Tarreauc9fa0482018-07-10 17:43:27 +020033 pool_head_buffer = create_pool("buffer", global.tune.bufsize, MEM_F_SHARED|MEM_F_EXACT);
Willy Tarreaubafbe012017-11-24 17:34:44 +010034 if (!pool_head_buffer)
Willy Tarreaua24adf02014-11-27 01:11:56 +010035 return 0;
36
37 /* The reserved buffer is what we leave behind us. Thus we always need
38 * at least one extra buffer in minavail otherwise we'll end up waking
39 * up tasks with no memory available, causing a lot of useless wakeups.
40 * That means that we always want to have at least 3 buffers available
41 * (2 for current session, one for next session that might be needed to
42 * release a server connection).
43 */
Willy Tarreaubafbe012017-11-24 17:34:44 +010044 pool_head_buffer->minavail = MAX(global.tune.reserved_bufs, 3);
Willy Tarreau33cb0652014-12-23 22:52:37 +010045 if (global.tune.buf_limit)
Willy Tarreaubafbe012017-11-24 17:34:44 +010046 pool_head_buffer->limit = global.tune.buf_limit;
Willy Tarreaua24adf02014-11-27 01:11:56 +010047
Willy Tarreaubafbe012017-11-24 17:34:44 +010048 buffer = pool_refill_alloc(pool_head_buffer, pool_head_buffer->minavail - 1);
Willy Tarreaua24adf02014-11-27 01:11:56 +010049 if (!buffer)
50 return 0;
51
Willy Tarreaubafbe012017-11-24 17:34:44 +010052 pool_free(pool_head_buffer, buffer);
Willy Tarreaua24adf02014-11-27 01:11:56 +010053 return 1;
Willy Tarreau9b28e032012-10-12 23:49:43 +020054}
55
Willy Tarreauaf819352012-08-27 22:08:00 +020056/*
Willy Tarreauc7e42382012-08-24 19:22:53 +020057 * Dumps part or all of a buffer.
58 */
59void buffer_dump(FILE *o, struct buffer *b, int from, int to)
60{
61 fprintf(o, "Dumping buffer %p\n", b);
Willy Tarreau81521ed2018-06-19 07:48:13 +020062 fprintf(o, " orig=%p size=%u head=%u tail=%u data=%u\n",
63 b_orig(b), (unsigned int)b_size(b), (unsigned int)b_head_ofs(b), (unsigned int)b_tail_ofs(b), (unsigned int)b_data(b));
Willy Tarreauc7e42382012-08-24 19:22:53 +020064
65 fprintf(o, "Dumping contents from byte %d to byte %d\n", from, to);
William Lallemandbe0efd82012-11-22 18:01:40 +010066 fprintf(o, " 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
67 /* dump hexa */
68 while (from < to) {
69 int i;
70
71 fprintf(o, " %04x: ", from);
72 for (i = 0; ((from + i) < to) && (i < 16) ; i++) {
Willy Tarreau81521ed2018-06-19 07:48:13 +020073 fprintf(o, "%02x ", (unsigned char)b_orig(b)[from + i]);
William Lallemandbe0efd82012-11-22 18:01:40 +010074 if (((from + i) & 15) == 7)
75 fprintf(o, "- ");
76 }
Godbachc08057c2013-11-14 10:15:20 +080077 if (to - from < 16) {
Godbachc3916a72013-11-21 10:21:22 +080078 int j = 0;
79
Godbachc08057c2013-11-14 10:15:20 +080080 for (j = 0; j < from + 16 - to; j++)
81 fprintf(o, " ");
Godbachc3916a72013-11-21 10:21:22 +080082 if (j > 8)
83 fprintf(o, " ");
Godbachc08057c2013-11-14 10:15:20 +080084 }
William Lallemandbe0efd82012-11-22 18:01:40 +010085 fprintf(o, " ");
86 for (i = 0; (from + i < to) && (i < 16) ; i++) {
Willy Tarreau90807112020-02-25 08:16:33 +010087 fprintf(o, "%c", isprint((unsigned char)b_orig(b)[from + i]) ? b_orig(b)[from + i] : '.') ;
William Lallemandbe0efd82012-11-22 18:01:40 +010088 if ((((from + i) & 15) == 15) && ((from + i) != to-1))
89 fprintf(o, "\n");
90 }
91 from += i;
Willy Tarreauc7e42382012-08-24 19:22:53 +020092 }
93 fprintf(o, "\n--\n");
William Lallemandbe0efd82012-11-22 18:01:40 +010094 fflush(o);
Willy Tarreauc7e42382012-08-24 19:22:53 +020095}
96
Willy Tarreauc41b3e82018-03-02 10:27:12 +010097/* see offer_buffer() for details */
Christopher Fauleta73e59b2016-12-09 17:30:18 +010098void __offer_buffer(void *from, unsigned int threshold)
99{
Willy Tarreau21046592020-02-26 10:39:36 +0100100 struct buffer_wait *wait;
101 struct mt_list *elt1, elt2;
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100102 int avail;
103
104 /* For now, we consider that all objects need 1 buffer, so we can stop
105 * waking up them once we have enough of them to eat all the available
106 * buffers. Note that we don't really know if they are streams or just
107 * other tasks, but that's a rough estimate. Similarly, for each cached
108 * event we'll need 1 buffer. If no buffer is currently used, always
109 * wake up the number of tasks we can offer a buffer based on what is
110 * allocated, and in any case at least one task per two reserved
111 * buffers.
112 */
Willy Tarreaubafbe012017-11-24 17:34:44 +0100113 avail = pool_head_buffer->allocated - pool_head_buffer->used - global.tune.reserved_bufs / 2;
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100114
Willy Tarreau21046592020-02-26 10:39:36 +0100115 mt_list_for_each_entry_safe(wait, &buffer_wq, list, elt1, elt2) {
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100116 if (avail <= threshold)
117 break;
118
119 if (wait->target == from || !wait->wakeup_cb(wait->target))
120 continue;
121
Olivier Houchard6c96fc12020-03-10 17:39:21 +0100122 MT_LIST_DEL_SAFE(elt1);
Christopher Fauleta73e59b2016-12-09 17:30:18 +0100123 avail--;
124 }
125}
Willy Tarreauc7e42382012-08-24 19:22:53 +0200126
127/*
128 * Local variables:
129 * c-indent-level: 8
130 * c-basic-offset: 8
131 * End:
132 */