Willy Tarreau | 41806d1 | 2018-07-11 09:39:05 +0200 | [diff] [blame] | 1 | /* |
| 2 | * include/common/buf.h |
| 3 | * Simple buffer handling. |
| 4 | * |
| 5 | * Copyright (C) 2000-2018 Willy Tarreau - w@1wt.eu |
| 6 | * |
| 7 | * Permission is hereby granted, free of charge, to any person obtaining |
| 8 | * a copy of this software and associated documentation files (the |
| 9 | * "Software"), to deal in the Software without restriction, including |
| 10 | * without limitation the rights to use, copy, modify, merge, publish, |
| 11 | * distribute, sublicense, and/or sell copies of the Software, and to |
| 12 | * permit persons to whom the Software is furnished to do so, subject to |
| 13 | * the following conditions: |
| 14 | * |
| 15 | * The above copyright notice and this permission notice shall be |
| 16 | * included in all copies or substantial portions of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 19 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES |
| 20 | * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 21 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT |
| 22 | * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, |
| 23 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 24 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 25 | * OTHER DEALINGS IN THE SOFTWARE. |
| 26 | */ |
| 27 | |
| 28 | #ifndef _COMMON_BUF_H |
| 29 | #define _COMMON_BUF_H |
| 30 | |
Willy Tarreau | a1bd1fa | 2019-03-29 17:26:33 +0100 | [diff] [blame] | 31 | #include <inttypes.h> |
Willy Tarreau | 35b51c6 | 2018-09-10 15:38:55 +0200 | [diff] [blame] | 32 | #include <string.h> |
Willy Tarreau | a7280a1 | 2018-11-26 19:41:40 +0100 | [diff] [blame] | 33 | #include <unistd.h> |
Willy Tarreau | 506a29a | 2018-07-18 10:07:58 +0200 | [diff] [blame] | 34 | |
Willy Tarreau | c4943d5 | 2019-05-24 14:55:06 +0200 | [diff] [blame] | 35 | #include <common/debug.h> |
| 36 | |
Willy Tarreau | 41806d1 | 2018-07-11 09:39:05 +0200 | [diff] [blame] | 37 | /* Structure defining a buffer's head */ |
| 38 | struct buffer { |
Willy Tarreau | 506a29a | 2018-07-18 10:07:58 +0200 | [diff] [blame] | 39 | size_t size; /* buffer size in bytes */ |
Willy Tarreau | c9fa048 | 2018-07-10 17:43:27 +0200 | [diff] [blame] | 40 | char *area; /* points to <size> bytes */ |
| 41 | size_t data; /* amount of data after head including wrapping */ |
| 42 | size_t head; /* start offset of remaining data relative to area */ |
Willy Tarreau | 41806d1 | 2018-07-11 09:39:05 +0200 | [diff] [blame] | 43 | }; |
| 44 | |
Willy Tarreau | c9fa048 | 2018-07-10 17:43:27 +0200 | [diff] [blame] | 45 | /* A buffer may be in 3 different states : |
| 46 | * - unallocated : size == 0, area == 0 (b_is_null() is true) |
Olivier Houchard | 203d735 | 2019-01-29 19:10:02 +0100 | [diff] [blame] | 47 | * - waiting : size == 0, area != 0 (b_is_null() is true) |
| 48 | * - allocated : size > 0, area > 0 (b_is_null() is false) |
Willy Tarreau | c9fa048 | 2018-07-10 17:43:27 +0200 | [diff] [blame] | 49 | */ |
| 50 | |
| 51 | /* initializers for certain buffer states. It is important that the NULL buffer |
| 52 | * remains the one with all fields initialized to zero so that a calloc() or a |
| 53 | * memset() on a struct automatically sets a NULL buffer. |
| 54 | */ |
| 55 | #define BUF_NULL ((struct buffer){ }) |
| 56 | #define BUF_WANTED ((struct buffer){ .area = (char *)1 }) |
Willy Tarreau | c4943d5 | 2019-05-24 14:55:06 +0200 | [diff] [blame] | 57 | #define BUF_RING ((struct buffer){ .area = (char *)2 }) |
Willy Tarreau | c9fa048 | 2018-07-10 17:43:27 +0200 | [diff] [blame] | 58 | |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 59 | |
| 60 | /***************************************************************************/ |
| 61 | /* Functions used to compute offsets and pointers. Most of them exist in */ |
| 62 | /* both wrapping-safe and unchecked ("__" prefix) variants. Some returning */ |
| 63 | /* a pointer are also provided with an "_ofs" suffix when they return an */ |
| 64 | /* offset relative to the storage area. */ |
| 65 | /***************************************************************************/ |
| 66 | |
Willy Tarreau | c9fa048 | 2018-07-10 17:43:27 +0200 | [diff] [blame] | 67 | /* b_is_null() : returns true if (and only if) the buffer is not yet allocated |
Olivier Houchard | 203d735 | 2019-01-29 19:10:02 +0100 | [diff] [blame] | 68 | * and thus has an empty size. Its pointer may then be anything, including NULL |
| 69 | * (unallocated) or an invalid pointer such as (char*)1 (allocation pending). |
Willy Tarreau | c9fa048 | 2018-07-10 17:43:27 +0200 | [diff] [blame] | 70 | */ |
| 71 | static inline int b_is_null(const struct buffer *buf) |
| 72 | { |
Olivier Houchard | 203d735 | 2019-01-29 19:10:02 +0100 | [diff] [blame] | 73 | return buf->size == 0; |
Willy Tarreau | c9fa048 | 2018-07-10 17:43:27 +0200 | [diff] [blame] | 74 | } |
| 75 | |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 76 | /* b_orig() : returns the pointer to the origin of the storage, which is the |
| 77 | * location of byte at offset zero. This is mostly used by functions which |
| 78 | * handle the wrapping by themselves. |
| 79 | */ |
| 80 | static inline char *b_orig(const struct buffer *b) |
| 81 | { |
Willy Tarreau | c9fa048 | 2018-07-10 17:43:27 +0200 | [diff] [blame] | 82 | return b->area; |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 83 | } |
| 84 | |
| 85 | /* b_size() : returns the size of the buffer. */ |
| 86 | static inline size_t b_size(const struct buffer *b) |
| 87 | { |
| 88 | return b->size; |
| 89 | } |
| 90 | |
| 91 | /* b_wrap() : returns the pointer to the wrapping position of the buffer area, |
| 92 | * which is by definition the first byte not part of the buffer. |
| 93 | */ |
| 94 | static inline char *b_wrap(const struct buffer *b) |
| 95 | { |
Willy Tarreau | c9fa048 | 2018-07-10 17:43:27 +0200 | [diff] [blame] | 96 | return b->area + b->size; |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 97 | } |
| 98 | |
| 99 | /* b_data() : returns the number of bytes present in the buffer. */ |
| 100 | static inline size_t b_data(const struct buffer *b) |
| 101 | { |
Willy Tarreau | bd1dba8 | 2018-07-10 10:43:27 +0200 | [diff] [blame] | 102 | return b->data; |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 103 | } |
| 104 | |
| 105 | /* b_room() : returns the amount of room left in the buffer */ |
| 106 | static inline size_t b_room(const struct buffer *b) |
| 107 | { |
| 108 | return b->size - b_data(b); |
| 109 | } |
| 110 | |
| 111 | /* b_full() : returns true if the buffer is full. */ |
| 112 | static inline size_t b_full(const struct buffer *b) |
| 113 | { |
| 114 | return !b_room(b); |
| 115 | } |
| 116 | |
| 117 | |
| 118 | /* b_stop() : returns the pointer to the byte following the end of the buffer, |
| 119 | * which may be out of the buffer if the buffer ends on the last byte of the |
| 120 | * area. |
| 121 | */ |
| 122 | static inline size_t __b_stop_ofs(const struct buffer *b) |
| 123 | { |
Willy Tarreau | bd1dba8 | 2018-07-10 10:43:27 +0200 | [diff] [blame] | 124 | return b->head + b->data; |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | static inline const char *__b_stop(const struct buffer *b) |
| 128 | { |
Willy Tarreau | d54a8ce | 2018-06-29 18:42:02 +0200 | [diff] [blame] | 129 | return b_orig(b) + __b_stop_ofs(b); |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | static inline size_t b_stop_ofs(const struct buffer *b) |
| 133 | { |
| 134 | size_t stop = __b_stop_ofs(b); |
| 135 | |
| 136 | if (stop > b->size) |
| 137 | stop -= b->size; |
| 138 | return stop; |
| 139 | } |
| 140 | |
| 141 | static inline const char *b_stop(const struct buffer *b) |
| 142 | { |
Willy Tarreau | d54a8ce | 2018-06-29 18:42:02 +0200 | [diff] [blame] | 143 | return b_orig(b) + b_stop_ofs(b); |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 144 | } |
| 145 | |
| 146 | |
| 147 | /* b_peek() : returns a pointer to the data at position <ofs> relative to the |
| 148 | * head of the buffer. Will typically point to input data if called with the |
| 149 | * amount of output data. The wrapped versions will only support wrapping once |
| 150 | * before the beginning or after the end. |
| 151 | */ |
| 152 | static inline size_t __b_peek_ofs(const struct buffer *b, size_t ofs) |
| 153 | { |
Willy Tarreau | d54a8ce | 2018-06-29 18:42:02 +0200 | [diff] [blame] | 154 | return b->head + ofs; |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 155 | } |
| 156 | |
| 157 | static inline char *__b_peek(const struct buffer *b, size_t ofs) |
| 158 | { |
Willy Tarreau | d54a8ce | 2018-06-29 18:42:02 +0200 | [diff] [blame] | 159 | return b_orig(b) + __b_peek_ofs(b, ofs); |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 160 | } |
| 161 | |
| 162 | static inline size_t b_peek_ofs(const struct buffer *b, size_t ofs) |
| 163 | { |
| 164 | size_t ret = __b_peek_ofs(b, ofs); |
| 165 | |
Willy Tarreau | d54a8ce | 2018-06-29 18:42:02 +0200 | [diff] [blame] | 166 | if (ret >= b->size) |
| 167 | ret -= b->size; |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 168 | |
| 169 | return ret; |
| 170 | } |
| 171 | |
| 172 | static inline char *b_peek(const struct buffer *b, size_t ofs) |
| 173 | { |
Willy Tarreau | d54a8ce | 2018-06-29 18:42:02 +0200 | [diff] [blame] | 174 | return b_orig(b) + b_peek_ofs(b, ofs); |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | |
| 178 | /* b_head() : returns the pointer to the buffer's head, which is the location |
| 179 | * of the next byte to be dequeued. Note that for buffers of size zero, the |
| 180 | * returned pointer may be outside of the buffer or even invalid. |
| 181 | */ |
| 182 | static inline size_t __b_head_ofs(const struct buffer *b) |
| 183 | { |
Willy Tarreau | d54a8ce | 2018-06-29 18:42:02 +0200 | [diff] [blame] | 184 | return b->head; |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 185 | } |
| 186 | |
| 187 | static inline char *__b_head(const struct buffer *b) |
| 188 | { |
Willy Tarreau | d54a8ce | 2018-06-29 18:42:02 +0200 | [diff] [blame] | 189 | return b_orig(b) + __b_head_ofs(b); |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 190 | } |
| 191 | |
| 192 | static inline size_t b_head_ofs(const struct buffer *b) |
| 193 | { |
Willy Tarreau | d54a8ce | 2018-06-29 18:42:02 +0200 | [diff] [blame] | 194 | return __b_head_ofs(b); |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | static inline char *b_head(const struct buffer *b) |
| 198 | { |
Willy Tarreau | d54a8ce | 2018-06-29 18:42:02 +0200 | [diff] [blame] | 199 | return __b_head(b); |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 200 | } |
| 201 | |
| 202 | |
| 203 | /* b_tail() : returns the pointer to the tail of the buffer, which is the |
| 204 | * location of the first byte where it is possible to enqueue new data. Note |
| 205 | * that for buffers of size zero, the returned pointer may be outside of the |
| 206 | * buffer or even invalid. |
| 207 | */ |
| 208 | static inline size_t __b_tail_ofs(const struct buffer *b) |
| 209 | { |
| 210 | return __b_peek_ofs(b, b_data(b)); |
| 211 | } |
| 212 | |
| 213 | static inline char *__b_tail(const struct buffer *b) |
| 214 | { |
| 215 | return __b_peek(b, b_data(b)); |
| 216 | } |
| 217 | |
| 218 | static inline size_t b_tail_ofs(const struct buffer *b) |
| 219 | { |
| 220 | return b_peek_ofs(b, b_data(b)); |
| 221 | } |
| 222 | |
| 223 | static inline char *b_tail(const struct buffer *b) |
| 224 | { |
| 225 | return b_peek(b, b_data(b)); |
| 226 | } |
| 227 | |
| 228 | |
| 229 | /* b_next() : for an absolute pointer <p> or a relative offset <o> pointing to |
| 230 | * a valid location within buffer <b>, returns either the absolute pointer or |
| 231 | * the relative offset pointing to the next byte, which usually is at (p + 1) |
| 232 | * unless p reaches the wrapping point and wrapping is needed. |
| 233 | */ |
| 234 | static inline size_t b_next_ofs(const struct buffer *b, size_t o) |
| 235 | { |
| 236 | o++; |
| 237 | if (o == b->size) |
| 238 | o = 0; |
| 239 | return o; |
| 240 | } |
| 241 | |
| 242 | static inline char *b_next(const struct buffer *b, const char *p) |
| 243 | { |
| 244 | p++; |
| 245 | if (p == b_wrap(b)) |
| 246 | p = b_orig(b); |
| 247 | return (char *)p; |
| 248 | } |
| 249 | |
| 250 | /* b_dist() : returns the distance between two pointers, taking into account |
| 251 | * the ability to wrap around the buffer's end. The operation is not defined if |
| 252 | * either of the pointers does not belong to the buffer or if their distance is |
| 253 | * greater than the buffer's size. |
| 254 | */ |
| 255 | static inline size_t b_dist(const struct buffer *b, const char *from, const char *to) |
| 256 | { |
| 257 | ssize_t dist = to - from; |
| 258 | |
| 259 | dist += dist < 0 ? b_size(b) : 0; |
| 260 | return dist; |
| 261 | } |
| 262 | |
| 263 | /* b_almost_full() : returns 1 if the buffer uses at least 3/4 of its capacity, |
| 264 | * otherwise zero. Buffers of size zero are considered full. |
| 265 | */ |
| 266 | static inline int b_almost_full(const struct buffer *b) |
| 267 | { |
| 268 | return b_data(b) >= b_size(b) * 3 / 4; |
| 269 | } |
| 270 | |
| 271 | /* b_space_wraps() : returns non-zero only if the buffer's free space wraps : |
Willy Tarreau | d54a8ce | 2018-06-29 18:42:02 +0200 | [diff] [blame] | 272 | * [ |xxxx| ] => yes |
| 273 | * [xxxx| ] => no |
| 274 | * [ |xxxx] => no |
| 275 | * [xxxx| |xxxx] => no |
| 276 | * [xxxxxxxxxx|xxxxxxxxxxx] => no |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 277 | * |
| 278 | * So the only case where the buffer does not wrap is when there's data either |
| 279 | * at the beginning or at the end of the buffer. Thus we have this : |
| 280 | * - if (head <= 0) ==> doesn't wrap |
| 281 | * - if (tail >= size) ==> doesn't wrap |
| 282 | * - otherwise wraps |
| 283 | */ |
| 284 | static inline int b_space_wraps(const struct buffer *b) |
| 285 | { |
| 286 | if ((ssize_t)__b_head_ofs(b) <= 0) |
| 287 | return 0; |
| 288 | if (__b_tail_ofs(b) >= b_size(b)) |
| 289 | return 0; |
| 290 | return 1; |
| 291 | } |
| 292 | |
Willy Tarreau | 7194d3c | 2018-06-06 16:55:45 +0200 | [diff] [blame] | 293 | /* b_contig_data() : returns the amount of data that can contiguously be read |
| 294 | * at once starting from a relative offset <start> (which allows to easily |
| 295 | * pre-compute blocks for memcpy). The start point will typically contain the |
| 296 | * amount of past data already returned by a previous call to this function. |
| 297 | */ |
| 298 | static inline size_t b_contig_data(const struct buffer *b, size_t start) |
| 299 | { |
| 300 | size_t data = b_wrap(b) - b_peek(b, start); |
| 301 | size_t limit = b_data(b) - start; |
| 302 | |
| 303 | if (data > limit) |
| 304 | data = limit; |
| 305 | return data; |
| 306 | } |
| 307 | |
Willy Tarreau | e4d5a03 | 2018-06-07 18:58:07 +0200 | [diff] [blame] | 308 | /* b_contig_space() : returns the amount of bytes that can be appended to the |
Willy Tarreau | ab322d4 | 2018-07-20 16:07:42 +0200 | [diff] [blame] | 309 | * buffer at once. We have 8 possible cases : |
| 310 | * |
| 311 | * [____________________] return size |
| 312 | * [______|_____________] return size - tail_ofs |
| 313 | * [XXXXXX|_____________] return size - tail_ofs |
| 314 | * [___|XXXXXX|_________] return size - tail_ofs |
| 315 | * [______________XXXXXX] return head_ofs |
| 316 | * [XXXX|___________|XXX] return head_ofs - tail_ofs |
| 317 | * [XXXXXXXXXX|XXXXXXXXX] return 0 |
| 318 | * [XXXXXXXXXXXXXXXXXXXX] return 0 |
Willy Tarreau | e4d5a03 | 2018-06-07 18:58:07 +0200 | [diff] [blame] | 319 | */ |
| 320 | static inline size_t b_contig_space(const struct buffer *b) |
| 321 | { |
Willy Tarreau | ab322d4 | 2018-07-20 16:07:42 +0200 | [diff] [blame] | 322 | size_t left, right; |
Willy Tarreau | e4d5a03 | 2018-06-07 18:58:07 +0200 | [diff] [blame] | 323 | |
Willy Tarreau | ab322d4 | 2018-07-20 16:07:42 +0200 | [diff] [blame] | 324 | right = b_head_ofs(b); |
Willy Tarreau | e4d5a03 | 2018-06-07 18:58:07 +0200 | [diff] [blame] | 325 | left = right + b_data(b); |
| 326 | |
Willy Tarreau | ab322d4 | 2018-07-20 16:07:42 +0200 | [diff] [blame] | 327 | left = b_size(b) - left; |
| 328 | if ((ssize_t)left <= 0) |
| 329 | left += right; |
| 330 | return left; |
Willy Tarreau | e4d5a03 | 2018-06-07 18:58:07 +0200 | [diff] [blame] | 331 | } |
| 332 | |
Willy Tarreau | 90ed383 | 2018-06-15 14:20:26 +0200 | [diff] [blame] | 333 | /* b_getblk() : gets one full block of data at once from a buffer, starting |
| 334 | * from offset <offset> after the buffer's head, and limited to no more than |
| 335 | * <len> bytes. The caller is responsible for ensuring that neither <offset> |
| 336 | * nor <offset>+<len> exceed the total number of bytes available in the buffer. |
| 337 | * Return values : |
| 338 | * >0 : number of bytes read, equal to requested size. |
| 339 | * =0 : not enough data available. <blk> is left undefined. |
| 340 | * The buffer is left unaffected. |
| 341 | */ |
| 342 | static inline size_t b_getblk(const struct buffer *buf, char *blk, size_t len, size_t offset) |
| 343 | { |
| 344 | size_t firstblock; |
| 345 | |
| 346 | if (len + offset > b_data(buf)) |
| 347 | return 0; |
| 348 | |
| 349 | firstblock = b_wrap(buf) - b_head(buf); |
| 350 | if (firstblock > offset) { |
| 351 | if (firstblock >= len + offset) { |
| 352 | memcpy(blk, b_head(buf) + offset, len); |
| 353 | return len; |
| 354 | } |
| 355 | |
| 356 | memcpy(blk, b_head(buf) + offset, firstblock - offset); |
| 357 | memcpy(blk + firstblock - offset, b_orig(buf), len - firstblock + offset); |
| 358 | return len; |
| 359 | } |
| 360 | |
| 361 | memcpy(blk, b_orig(buf) + offset - firstblock, len); |
| 362 | return len; |
| 363 | } |
| 364 | |
Willy Tarreau | a1f78fb | 2018-06-14 14:38:11 +0200 | [diff] [blame] | 365 | /* b_getblk_nc() : gets one or two blocks of data at once from a buffer, |
| 366 | * starting from offset <ofs> after the beginning of its output, and limited to |
| 367 | * no more than <max> bytes. The caller is responsible for ensuring that |
| 368 | * neither <ofs> nor <ofs>+<max> exceed the total number of bytes available in |
| 369 | * the buffer. Return values : |
| 370 | * >0 : number of blocks filled (1 or 2). blk1 is always filled before blk2. |
| 371 | * =0 : not enough data available. <blk*> are left undefined. |
| 372 | * The buffer is left unaffected. Unused buffers are left in an undefined state. |
| 373 | */ |
Willy Tarreau | 55f3ce1 | 2018-07-18 11:49:27 +0200 | [diff] [blame] | 374 | static inline size_t b_getblk_nc(const struct buffer *buf, const char **blk1, size_t *len1, const char **blk2, size_t *len2, size_t ofs, size_t max) |
Willy Tarreau | a1f78fb | 2018-06-14 14:38:11 +0200 | [diff] [blame] | 375 | { |
| 376 | size_t l1; |
| 377 | |
| 378 | if (!max) |
| 379 | return 0; |
| 380 | |
| 381 | *blk1 = b_peek(buf, ofs); |
| 382 | l1 = b_wrap(buf) - *blk1; |
| 383 | if (l1 < max) { |
| 384 | *len1 = l1; |
| 385 | *len2 = max - l1; |
Willy Tarreau | 591d445 | 2018-06-15 17:21:00 +0200 | [diff] [blame] | 386 | *blk2 = b_orig(buf); |
Willy Tarreau | a1f78fb | 2018-06-14 14:38:11 +0200 | [diff] [blame] | 387 | return 2; |
| 388 | } |
| 389 | *len1 = max; |
| 390 | return 1; |
| 391 | } |
| 392 | |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 393 | |
| 394 | /*********************************************/ |
| 395 | /* Functions used to modify the buffer state */ |
| 396 | /*********************************************/ |
| 397 | |
| 398 | /* b_reset() : resets a buffer. The size is not touched. */ |
| 399 | static inline void b_reset(struct buffer *b) |
| 400 | { |
Willy Tarreau | d54a8ce | 2018-06-29 18:42:02 +0200 | [diff] [blame] | 401 | b->head = 0; |
Willy Tarreau | bd1dba8 | 2018-07-10 10:43:27 +0200 | [diff] [blame] | 402 | b->data = 0; |
Willy Tarreau | bbc68df | 2018-06-06 14:30:50 +0200 | [diff] [blame] | 403 | } |
Willy Tarreau | 41806d1 | 2018-07-11 09:39:05 +0200 | [diff] [blame] | 404 | |
Willy Tarreau | e39b58f | 2019-05-24 14:52:56 +0200 | [diff] [blame] | 405 | /* b_make() : make a buffer from all parameters */ |
| 406 | static inline struct buffer b_make(char *area, size_t size, size_t head, size_t data) |
| 407 | { |
| 408 | struct buffer b; |
| 409 | |
| 410 | b.area = area; |
| 411 | b.size = size; |
| 412 | b.head = head; |
| 413 | b.data = data; |
| 414 | return b; |
| 415 | } |
| 416 | |
Olivier Houchard | 09138ec | 2018-06-28 19:17:38 +0200 | [diff] [blame] | 417 | /* b_sub() : decreases the buffer length by <count> */ |
| 418 | static inline void b_sub(struct buffer *b, size_t count) |
| 419 | { |
Willy Tarreau | bd1dba8 | 2018-07-10 10:43:27 +0200 | [diff] [blame] | 420 | b->data -= count; |
Olivier Houchard | 09138ec | 2018-06-28 19:17:38 +0200 | [diff] [blame] | 421 | } |
| 422 | |
| 423 | /* b_add() : increase the buffer length by <count> */ |
| 424 | static inline void b_add(struct buffer *b, size_t count) |
| 425 | { |
Willy Tarreau | bd1dba8 | 2018-07-10 10:43:27 +0200 | [diff] [blame] | 426 | b->data += count; |
Olivier Houchard | 09138ec | 2018-06-28 19:17:38 +0200 | [diff] [blame] | 427 | } |
| 428 | |
Olivier Houchard | a04e40d | 2018-06-28 19:10:25 +0200 | [diff] [blame] | 429 | /* b_set_data() : sets the buffer's length */ |
| 430 | static inline void b_set_data(struct buffer *b, size_t len) |
| 431 | { |
Willy Tarreau | bd1dba8 | 2018-07-10 10:43:27 +0200 | [diff] [blame] | 432 | b->data = len; |
Olivier Houchard | a04e40d | 2018-06-28 19:10:25 +0200 | [diff] [blame] | 433 | } |
| 434 | |
Willy Tarreau | e5f12ce | 2018-06-15 10:28:05 +0200 | [diff] [blame] | 435 | /* b_del() : skips <del> bytes in a buffer <b>. Covers both the output and the |
| 436 | * input parts so it's up to the caller to know where it plays and that <del> |
| 437 | * is always smaller than the amount of data in the buffer. |
| 438 | */ |
| 439 | static inline void b_del(struct buffer *b, size_t del) |
| 440 | { |
Willy Tarreau | bd1dba8 | 2018-07-10 10:43:27 +0200 | [diff] [blame] | 441 | b->data -= del; |
Willy Tarreau | d54a8ce | 2018-06-29 18:42:02 +0200 | [diff] [blame] | 442 | b->head += del; |
| 443 | if (b->head >= b->size) |
| 444 | b->head -= b->size; |
Willy Tarreau | e5f12ce | 2018-06-15 10:28:05 +0200 | [diff] [blame] | 445 | } |
| 446 | |
Willy Tarreau | f17f19f | 2018-06-15 17:50:15 +0200 | [diff] [blame] | 447 | /* b_realign_if_empty() : realigns a buffer if it's empty */ |
| 448 | static inline void b_realign_if_empty(struct buffer *b) |
| 449 | { |
| 450 | if (!b_data(b)) |
Willy Tarreau | d54a8ce | 2018-06-29 18:42:02 +0200 | [diff] [blame] | 451 | b->head = 0; |
Willy Tarreau | f17f19f | 2018-06-15 17:50:15 +0200 | [diff] [blame] | 452 | } |
| 453 | |
Willy Tarreau | 4cf1300 | 2018-06-06 06:53:15 +0200 | [diff] [blame] | 454 | /* b_slow_realign() : this function realigns a possibly wrapping buffer so that |
| 455 | * the part remaining to be parsed is contiguous and starts at the beginning of |
| 456 | * the buffer and the already parsed output part ends at the end of the buffer. |
| 457 | * This provides the best conditions since it allows the largest inputs to be |
| 458 | * processed at once and ensures that once the output data leaves, the whole |
| 459 | * buffer is available at once. The number of output bytes supposedly present |
| 460 | * at the beginning of the buffer and which need to be moved to the end must be |
| 461 | * passed in <output>. A temporary swap area at least as large as b->size must |
| 462 | * be provided in <swap>. It's up to the caller to ensure <output> is no larger |
| 463 | * than the difference between the whole buffer's length and its input. |
| 464 | */ |
| 465 | static inline void b_slow_realign(struct buffer *b, char *swap, size_t output) |
| 466 | { |
| 467 | size_t block1 = output; |
| 468 | size_t block2 = 0; |
| 469 | |
| 470 | /* process output data in two steps to cover wrapping */ |
| 471 | if (block1 > b_size(b) - b_head_ofs(b)) { |
| 472 | block2 = b_size(b) - b_head_ofs(b); |
| 473 | block1 -= block2; |
| 474 | } |
| 475 | memcpy(swap + b_size(b) - output, b_head(b), block1); |
| 476 | memcpy(swap + b_size(b) - block2, b_orig(b), block2); |
| 477 | |
| 478 | /* process input data in two steps to cover wrapping */ |
| 479 | block1 = b_data(b) - output; |
| 480 | block2 = 0; |
| 481 | |
| 482 | if (block1 > b_tail_ofs(b)) { |
| 483 | block2 = b_tail_ofs(b); |
| 484 | block1 = block1 - block2; |
| 485 | } |
| 486 | memcpy(swap, b_peek(b, output), block1); |
| 487 | memcpy(swap + block1, b_orig(b), block2); |
| 488 | |
| 489 | /* reinject changes into the buffer */ |
| 490 | memcpy(b_orig(b), swap, b_data(b) - output); |
| 491 | memcpy(b_wrap(b) - output, swap + b_size(b) - output, output); |
| 492 | |
Christopher Faulet | ad4e1a4 | 2018-08-06 15:43:12 +0200 | [diff] [blame] | 493 | b->head = (output ? b_size(b) - output : 0); |
Willy Tarreau | 4cf1300 | 2018-06-06 06:53:15 +0200 | [diff] [blame] | 494 | } |
Willy Tarreau | 41806d1 | 2018-07-11 09:39:05 +0200 | [diff] [blame] | 495 | |
Willy Tarreau | 55372f6 | 2018-07-10 10:04:02 +0200 | [diff] [blame] | 496 | /* b_putchar() : tries to append char <c> at the end of buffer <b>. Supports |
| 497 | * wrapping. Data are truncated if buffer is full. |
| 498 | */ |
| 499 | static inline void b_putchr(struct buffer *b, char c) |
| 500 | { |
| 501 | if (b_full(b)) |
| 502 | return; |
| 503 | *b_tail(b) = c; |
Willy Tarreau | bd1dba8 | 2018-07-10 10:43:27 +0200 | [diff] [blame] | 504 | b->data++; |
Willy Tarreau | 55372f6 | 2018-07-10 10:04:02 +0200 | [diff] [blame] | 505 | } |
| 506 | |
Willy Tarreau | f7d0447 | 2018-07-20 16:20:34 +0200 | [diff] [blame] | 507 | /* __b_putblk() : tries to append <len> bytes from block <blk> to the end of |
| 508 | * buffer <b> without checking for free space (it's up to the caller to do it). |
| 509 | * Supports wrapping. It must not be called with len == 0. |
| 510 | */ |
| 511 | static inline void __b_putblk(struct buffer *b, const char *blk, size_t len) |
| 512 | { |
| 513 | size_t half = b_contig_space(b); |
| 514 | |
Willy Tarreau | ec3750c | 2018-09-05 19:00:20 +0200 | [diff] [blame] | 515 | if (half > len) |
| 516 | half = len; |
| 517 | |
Willy Tarreau | f7d0447 | 2018-07-20 16:20:34 +0200 | [diff] [blame] | 518 | memcpy(b_tail(b), blk, half); |
| 519 | |
| 520 | if (len > half) |
| 521 | memcpy(b_peek(b, b_data(b) + half), blk + half, len - half); |
| 522 | b->data += len; |
| 523 | } |
| 524 | |
Willy Tarreau | 55372f6 | 2018-07-10 10:04:02 +0200 | [diff] [blame] | 525 | /* b_putblk() : tries to append block <blk> at the end of buffer <b>. Supports |
| 526 | * wrapping. Data are truncated if buffer is too short. It returns the number |
| 527 | * of bytes copied. |
| 528 | */ |
| 529 | static inline size_t b_putblk(struct buffer *b, const char *blk, size_t len) |
| 530 | { |
Willy Tarreau | 55372f6 | 2018-07-10 10:04:02 +0200 | [diff] [blame] | 531 | if (len > b_room(b)) |
| 532 | len = b_room(b); |
Willy Tarreau | f7d0447 | 2018-07-20 16:20:34 +0200 | [diff] [blame] | 533 | if (len) |
| 534 | __b_putblk(b, blk, len); |
Willy Tarreau | 55372f6 | 2018-07-10 10:04:02 +0200 | [diff] [blame] | 535 | return len; |
| 536 | } |
| 537 | |
Willy Tarreau | f148888 | 2018-07-20 16:24:39 +0200 | [diff] [blame] | 538 | /* b_xfer() : transfers at most <count> bytes from buffer <src> to buffer <dst> |
| 539 | * and returns the number of bytes copied. The bytes are removed from <src> and |
| 540 | * added to <dst>. The caller is responsible for ensuring that <count> is not |
Willy Tarreau | 7999bfb | 2018-07-20 18:58:51 +0200 | [diff] [blame] | 541 | * larger than b_room(dst). Whenever possible (if the destination is empty and |
| 542 | * at least as much as the source was requested), the buffers are simply |
| 543 | * swapped instead of copied. |
Willy Tarreau | f148888 | 2018-07-20 16:24:39 +0200 | [diff] [blame] | 544 | */ |
| 545 | static inline size_t b_xfer(struct buffer *dst, struct buffer *src, size_t count) |
| 546 | { |
| 547 | size_t ret, block1, block2; |
| 548 | |
| 549 | ret = 0; |
| 550 | if (!count) |
| 551 | goto leave; |
| 552 | |
| 553 | ret = b_data(src); |
| 554 | if (!ret) |
| 555 | goto leave; |
| 556 | |
| 557 | if (ret > count) |
| 558 | ret = count; |
Willy Tarreau | 7999bfb | 2018-07-20 18:58:51 +0200 | [diff] [blame] | 559 | else if (!b_data(dst)) { |
| 560 | /* zero copy is possible by just swapping buffers */ |
| 561 | struct buffer tmp = *dst; |
| 562 | *dst = *src; |
| 563 | *src = tmp; |
| 564 | goto leave; |
| 565 | } |
Willy Tarreau | f148888 | 2018-07-20 16:24:39 +0200 | [diff] [blame] | 566 | |
| 567 | block1 = b_contig_data(src, 0); |
| 568 | if (block1 > ret) |
| 569 | block1 = ret; |
| 570 | block2 = ret - block1; |
| 571 | |
| 572 | if (block1) |
| 573 | __b_putblk(dst, b_head(src), block1); |
| 574 | |
| 575 | if (block2) |
| 576 | __b_putblk(dst, b_peek(src, block1), block2); |
| 577 | |
| 578 | b_del(src, ret); |
| 579 | leave: |
| 580 | return ret; |
| 581 | } |
| 582 | |
Willy Tarreau | f48919a | 2018-12-22 19:19:50 +0100 | [diff] [blame] | 583 | /* Moves <len> bytes from absolute position <src> of buffer <b> by <shift> |
| 584 | * bytes, while supporting wrapping of both the source and the destination. |
| 585 | * The position is relative to the buffer's origin and may overlap with the |
| 586 | * target position. The <shift>'s absolute value must be strictly lower than |
| 587 | * the buffer's size. The main purpose is to aggregate data block during |
| 588 | * parsing while removing unused delimiters. The buffer's length is not |
| 589 | * modified, and the caller must take care of size adjustments and holes by |
| 590 | * itself. |
| 591 | */ |
| 592 | static inline void b_move(const struct buffer *b, size_t src, size_t len, ssize_t shift) |
| 593 | { |
| 594 | char *orig = b_orig(b); |
| 595 | size_t size = b_size(b); |
| 596 | size_t dst = src + size + shift; |
| 597 | size_t cnt; |
| 598 | |
| 599 | if (dst >= size) |
| 600 | dst -= size; |
| 601 | |
| 602 | if (shift < 0) { |
| 603 | /* copy from left to right */ |
| 604 | for (; (cnt = len); len -= cnt) { |
| 605 | if (cnt > size - src) |
| 606 | cnt = size - src; |
| 607 | if (cnt > size - dst) |
| 608 | cnt = size - dst; |
| 609 | |
| 610 | memmove(orig + dst, orig + src, cnt); |
| 611 | dst += cnt; |
| 612 | src += cnt; |
| 613 | if (dst >= size) |
| 614 | dst -= size; |
| 615 | if (src >= size) |
| 616 | src -= size; |
| 617 | } |
| 618 | } |
| 619 | else if (shift > 0) { |
| 620 | /* copy from right to left */ |
| 621 | for (; (cnt = len); len -= cnt) { |
| 622 | size_t src_end = src + len; |
| 623 | size_t dst_end = dst + len; |
| 624 | |
| 625 | if (dst_end > size) |
| 626 | dst_end -= size; |
| 627 | if (src_end > size) |
| 628 | src_end -= size; |
| 629 | |
| 630 | if (cnt > dst_end) |
| 631 | cnt = dst_end; |
| 632 | if (cnt > src_end) |
| 633 | cnt = src_end; |
| 634 | |
| 635 | memmove(orig + dst_end - cnt, orig + src_end - cnt, cnt); |
| 636 | } |
| 637 | } |
| 638 | } |
| 639 | |
Willy Tarreau | e312802 | 2018-07-12 15:55:34 +0200 | [diff] [blame] | 640 | /* b_rep_blk() : writes the block <blk> at position <pos> which must be in |
| 641 | * buffer <b>, and moves the part between <end> and the buffer's tail just |
| 642 | * after the end of the copy of <blk>. This effectively replaces the part |
| 643 | * located between <pos> and <end> with a copy of <blk> of length <len>. The |
| 644 | * buffer's length is automatically updated. This is used to replace a block |
| 645 | * with another one inside a buffer. The shift value (positive or negative) is |
| 646 | * returned. If there's no space left, the move is not done. If <len> is null, |
| 647 | * the <blk> pointer is allowed to be null, in order to erase a block. |
| 648 | */ |
| 649 | static inline int b_rep_blk(struct buffer *b, char *pos, char *end, const char *blk, size_t len) |
| 650 | { |
| 651 | int delta; |
| 652 | |
| 653 | delta = len - (end - pos); |
| 654 | |
Olivier Houchard | 363c745 | 2018-09-26 15:09:58 +0200 | [diff] [blame] | 655 | if (__b_tail(b) + delta > b_wrap(b)) |
Willy Tarreau | e312802 | 2018-07-12 15:55:34 +0200 | [diff] [blame] | 656 | return 0; /* no space left */ |
| 657 | |
| 658 | if (b_data(b) && |
| 659 | b_tail(b) + delta > b_head(b) && |
| 660 | b_head(b) >= b_tail(b)) |
| 661 | return 0; /* no space left before wrapping data */ |
| 662 | |
| 663 | /* first, protect the end of the buffer */ |
| 664 | memmove(end + delta, end, b_tail(b) - end); |
| 665 | |
| 666 | /* now, copy blk over pos */ |
| 667 | if (len) |
| 668 | memcpy(pos, blk, len); |
| 669 | |
| 670 | b_add(b, delta); |
| 671 | b_realign_if_empty(b); |
| 672 | |
| 673 | return delta; |
| 674 | } |
| 675 | |
Christopher Faulet | 251f491 | 2020-02-24 11:28:05 +0100 | [diff] [blame] | 676 | /* b_insert_blk(): inserts the block <blk> at the absolute offset <off> moving |
| 677 | * data between this offset and the buffer's tail just after the end of the copy |
| 678 | * of <blk>. The buffer's length is automatically updated. It Supports |
| 679 | * wrapping. If there are not enough space to perform the copy, 0 is |
| 680 | * returned. Otherwise, the number of bytes copied is returned |
| 681 | */ |
| 682 | static inline int b_insert_blk(struct buffer *b, size_t off, const char *blk, size_t len) |
| 683 | { |
| 684 | size_t pos; |
| 685 | |
| 686 | if (!len || len > b_room(b)) |
| 687 | return 0; /* nothing to copy or not enough space left */ |
| 688 | |
| 689 | pos = b_peek_ofs(b, off); |
| 690 | if (pos == b_tail_ofs(b)) |
| 691 | __b_putblk(b, blk, len); |
| 692 | else { |
| 693 | size_t delta = b_data(b) - off; |
| 694 | |
| 695 | /* first, protect the end of the buffer */ |
| 696 | b_move(b, pos, delta, len); |
| 697 | |
| 698 | /* change the amount of data in the buffer during the copy */ |
| 699 | b_sub(b, delta); |
| 700 | __b_putblk(b, blk, len); |
| 701 | b_add(b, delta); |
| 702 | } |
| 703 | return len; |
| 704 | } |
Willy Tarreau | c4943d5 | 2019-05-24 14:55:06 +0200 | [diff] [blame] | 705 | |
Willy Tarreau | b88d231 | 2019-08-23 17:06:12 +0200 | [diff] [blame] | 706 | /* __b_put_varint(): encode 64-bit value <v> as a varint into buffer <b>. The |
| 707 | * caller must have checked that the encoded value fits in the buffer so that |
| 708 | * there are no length checks. Wrapping is supported. You don't want to use |
| 709 | * this function but b_put_varint() instead. |
| 710 | */ |
| 711 | static inline void __b_put_varint(struct buffer *b, uint64_t v) |
| 712 | { |
| 713 | size_t data = b->data; |
| 714 | size_t size = b_size(b); |
| 715 | char *wrap = b_wrap(b); |
| 716 | char *tail = b_tail(b); |
| 717 | |
| 718 | if (v >= 0xF0) { |
| 719 | /* more than one byte, first write the 4 least significant |
| 720 | * bits, then follow with 7 bits per byte. |
| 721 | */ |
| 722 | *tail = v | 0xF0; |
| 723 | v = (v - 0xF0) >> 4; |
| 724 | |
| 725 | while (1) { |
| 726 | if (tail++ == wrap) |
| 727 | tail -= size; |
| 728 | data++; |
| 729 | if (v < 0x80) |
| 730 | break; |
| 731 | *tail = v | 0x80; |
| 732 | v = (v - 0x80) >> 7; |
| 733 | } |
| 734 | } |
| 735 | |
| 736 | /* last byte */ |
| 737 | *tail = v; |
| 738 | data++; |
| 739 | b->data = data; |
| 740 | } |
| 741 | |
| 742 | /* b_put_varint(): try to encode value <v> as a varint into buffer <b>. Returns |
| 743 | * the number of bytes written in case of success, or 0 if there is not enough |
| 744 | * room. Wrapping is supported. No partial writes will be performed. |
| 745 | */ |
| 746 | static inline int b_put_varint(struct buffer *b, uint64_t v) |
| 747 | { |
| 748 | size_t data = b->data; |
| 749 | size_t size = b_size(b); |
| 750 | char *wrap = b_wrap(b); |
| 751 | char *tail = b_tail(b); |
| 752 | |
| 753 | if (data != size && v >= 0xF0) { |
| 754 | /* more than one byte, first write the 4 least significant |
| 755 | * bits, then follow with 7 bits per byte. |
| 756 | */ |
| 757 | *tail = v | 0xF0; |
| 758 | v = (v - 0xF0) >> 4; |
| 759 | |
| 760 | while (1) { |
| 761 | if (tail++ == wrap) |
| 762 | tail -= size; |
| 763 | data++; |
| 764 | if (data == size || v < 0x80) |
| 765 | break; |
| 766 | *tail = v | 0x80; |
| 767 | v = (v - 0x80) >> 7; |
| 768 | } |
| 769 | } |
| 770 | |
| 771 | /* last byte */ |
| 772 | if (data == size) |
| 773 | return 0; |
| 774 | |
| 775 | *tail = v; |
| 776 | data++; |
| 777 | |
| 778 | size = data - b->data; |
| 779 | b->data = data; |
| 780 | return size; |
| 781 | } |
| 782 | |
| 783 | /* b_get_varint(): try to decode a varint from buffer <b> into value <vptr>. |
| 784 | * Returns the number of bytes read in case of success, or 0 if there were not |
| 785 | * enough bytes. Wrapping is supported. No partial reads will be performed. |
| 786 | */ |
| 787 | static inline int b_get_varint(struct buffer *b, uint64_t *vptr) |
| 788 | { |
| 789 | const uint8_t *head = (const uint8_t *)b_head(b); |
| 790 | const uint8_t *wrap = (const uint8_t *)b_wrap(b); |
| 791 | size_t data = b->data; |
| 792 | size_t size = b_size(b); |
| 793 | uint64_t v = 0; |
| 794 | int bits = 0; |
| 795 | |
| 796 | if (data != 0 && (*head >= 0xF0)) { |
| 797 | v = *head; |
| 798 | bits += 4; |
| 799 | while (1) { |
| 800 | if (head++ == wrap) |
| 801 | head -= size; |
| 802 | data--; |
| 803 | if (!data || !(*head & 0x80)) |
| 804 | break; |
| 805 | v += (uint64_t)*head << bits; |
| 806 | bits += 7; |
| 807 | } |
| 808 | } |
| 809 | |
| 810 | /* last byte */ |
| 811 | if (!data) |
| 812 | return 0; |
| 813 | |
| 814 | v += (uint64_t)*head << bits; |
| 815 | *vptr = v; |
| 816 | data--; |
| 817 | size = b->data - data; |
| 818 | b_del(b, size); |
| 819 | return size; |
| 820 | } |
| 821 | |
| 822 | /* b_peek_varint(): try to decode a varint from buffer <b> at offset <ofs> |
| 823 | * relative to head, into value <vptr>. Returns the number of bytes parsed in |
| 824 | * case of success, or 0 if there were not enough bytes, in which case the |
| 825 | * contents of <vptr> are not updated. Wrapping is supported. The buffer's head |
| 826 | * will NOT be updated. It is illegal to call this function with <ofs> greater |
| 827 | * than b->data. |
| 828 | */ |
| 829 | static inline int b_peek_varint(struct buffer *b, size_t ofs, uint64_t *vptr) |
| 830 | { |
| 831 | const uint8_t *head = (const uint8_t *)b_peek(b, ofs); |
| 832 | const uint8_t *wrap = (const uint8_t *)b_wrap(b); |
| 833 | size_t data = b_data(b) - ofs; |
| 834 | size_t size = b_size(b); |
| 835 | uint64_t v = 0; |
| 836 | int bits = 0; |
| 837 | |
| 838 | if (data != 0 && (*head >= 0xF0)) { |
| 839 | v = *head; |
| 840 | bits += 4; |
| 841 | while (1) { |
| 842 | if (head++ == wrap) |
| 843 | head -= size; |
| 844 | data--; |
| 845 | if (!data || !(*head & 0x80)) |
| 846 | break; |
| 847 | v += (uint64_t)*head << bits; |
| 848 | bits += 7; |
| 849 | } |
| 850 | } |
| 851 | |
| 852 | /* last byte */ |
| 853 | if (!data) |
| 854 | return 0; |
| 855 | |
| 856 | v += (uint64_t)*head << bits; |
| 857 | *vptr = v; |
| 858 | data--; |
| 859 | size = b->data - ofs - data; |
| 860 | return size; |
| 861 | } |
| 862 | |
| 863 | |
Willy Tarreau | c4943d5 | 2019-05-24 14:55:06 +0200 | [diff] [blame] | 864 | /* |
| 865 | * Buffer ring management. |
| 866 | * |
| 867 | * A buffer ring is a circular list of buffers, with a head buffer (the oldest, |
| 868 | * being read from) and a tail (the newest, being written to). Such a ring is |
| 869 | * declared as an array of buffers. The first element in the array is the root |
| 870 | * and is used differently. It stores the following elements : |
| 871 | * - size : number of allocated elements in the array, including the root |
| 872 | * - area : magic value BUF_RING (just to help debugging) |
| 873 | * - head : position of the head in the array (starts at one) |
| 874 | * - data : position of the tail in the array (starts at one). |
| 875 | * |
| 876 | * Note that contrary to a linear buffer, head and tail may be equal with room |
| 877 | * available, since the producer is expected to fill the tail. Also, the tail |
| 878 | * might pretty much be equal to BUF_WANTED if an allocation is pending, in |
| 879 | * which case it's illegal to try to allocate past this point (only one entry |
| 880 | * may be subscribed for allocation). It is illegal to allocate a buffer after |
| 881 | * an empty one, so that BUF_NULL is always the last buffer. It is also illegal |
| 882 | * to remove elements without freeing the buffers. Buffers between <tail> and |
| 883 | * <head> are in an undefined state, but <tail> and <head> are always valid. |
| 884 | * A ring may not contain less than 2 elements, since the root is mandatory, |
| 885 | * and at least one entry is required to always present a valid buffer. |
| 886 | * |
| 887 | * Given that buffers are 16- or 32- bytes long, it's convenient to set the |
| 888 | * size of the array to 2^N in order to keep (2^N)-1 elements, totalizing |
| 889 | * 2^N*16(or 32) bytes. For example on a 64-bit system, a ring of 31 usable |
| 890 | * buffers takes 1024 bytes. |
| 891 | */ |
| 892 | |
| 893 | /* Initialization of a ring, the size argument contains the number of allocated |
| 894 | * elements, including the root. There must always be at least 2 elements, one |
| 895 | * for the root and one for storage. |
| 896 | */ |
| 897 | static inline void br_init(struct buffer *r, size_t size) |
| 898 | { |
| 899 | BUG_ON(size < 2); |
| 900 | |
| 901 | r->size = size; |
| 902 | r->area = BUF_RING.area; |
| 903 | r->head = r->data = 1; |
| 904 | r[1] = BUF_NULL; |
| 905 | } |
| 906 | |
| 907 | /* Returns number of elements in the ring, root included */ |
| 908 | static inline unsigned int br_size(const struct buffer *r) |
| 909 | { |
| 910 | BUG_ON(r->area != BUF_RING.area); |
| 911 | |
| 912 | return r->size; |
| 913 | } |
| 914 | |
| 915 | /* Returns true if no more buffers may be added */ |
| 916 | static inline unsigned int br_full(const struct buffer *r) |
| 917 | { |
| 918 | BUG_ON(r->area != BUF_RING.area); |
| 919 | |
| 920 | return r->data + 1 == r->head || r->data + 1 == r->head - 1 + r->size; |
| 921 | } |
| 922 | |
| 923 | /* Returns the index of the ring's head buffer */ |
| 924 | static inline unsigned int br_head_idx(const struct buffer *r) |
| 925 | { |
| 926 | BUG_ON(r->area != BUF_RING.area); |
| 927 | |
| 928 | return r->head; |
| 929 | } |
| 930 | |
| 931 | /* Returns the index of the ring's tail buffer */ |
| 932 | static inline unsigned int br_tail_idx(const struct buffer *r) |
| 933 | { |
| 934 | BUG_ON(r->area != BUF_RING.area); |
| 935 | |
| 936 | return r->data; |
| 937 | } |
| 938 | |
| 939 | /* Returns a pointer to the ring's head buffer */ |
| 940 | static inline struct buffer *br_head(struct buffer *r) |
| 941 | { |
| 942 | BUG_ON(r->area != BUF_RING.area); |
| 943 | |
| 944 | return r + br_head_idx(r); |
| 945 | } |
| 946 | |
| 947 | /* Returns a pointer to the ring's tail buffer */ |
| 948 | static inline struct buffer *br_tail(struct buffer *r) |
| 949 | { |
| 950 | BUG_ON(r->area != BUF_RING.area); |
| 951 | |
| 952 | return r + br_tail_idx(r); |
| 953 | } |
| 954 | |
| 955 | /* Returns the amount of data of the ring's HEAD buffer */ |
| 956 | static inline unsigned int br_data(const struct buffer *r) |
| 957 | { |
| 958 | BUG_ON(r->area != BUF_RING.area); |
| 959 | |
| 960 | return b_data(r + br_head_idx(r)); |
| 961 | } |
| 962 | |
| 963 | /* Returns non-zero if the ring is non-full or its tail has some room */ |
| 964 | static inline unsigned int br_has_room(const struct buffer *r) |
| 965 | { |
| 966 | BUG_ON(r->area != BUF_RING.area); |
| 967 | |
| 968 | if (!br_full(r)) |
| 969 | return 1; |
| 970 | return b_room(r + br_tail_idx(r)); |
| 971 | } |
| 972 | |
| 973 | /* Advances the ring's tail if it points to a non-empty buffer, and returns the |
| 974 | * buffer, or NULL if the ring is full or the tail buffer is already empty. A |
| 975 | * new buffer is initialized to BUF_NULL before being returned. This is to be |
| 976 | * used after failing to append data, in order to decide to retry or not. |
| 977 | */ |
| 978 | static inline struct buffer *br_tail_add(struct buffer *r) |
| 979 | { |
| 980 | struct buffer *b; |
| 981 | |
| 982 | BUG_ON(r->area != BUF_RING.area); |
| 983 | |
| 984 | b = br_tail(r); |
| 985 | if (!b_size(b)) |
| 986 | return NULL; |
| 987 | |
| 988 | if (br_full(r)) |
| 989 | return NULL; |
| 990 | |
| 991 | r->data++; |
| 992 | if (r->data >= r->size) |
| 993 | r->data = 1; |
| 994 | |
| 995 | b = br_tail(r); |
| 996 | *b = BUF_NULL; |
| 997 | return b; |
| 998 | } |
| 999 | |
| 1000 | /* Extracts the ring's head buffer and returns it. The last buffer (tail) is |
| 1001 | * never removed but it is returned. This guarantees that we stop on BUF_WANTED |
| 1002 | * or BUF_EMPTY and that at the end a valid buffer remains present. This is |
| 1003 | * used for pre-extraction during a free() loop for example. The caller is |
| 1004 | * expected to detect the end (e.g. using bsize() since b_free() voids the |
| 1005 | * buffer). |
| 1006 | */ |
| 1007 | static inline struct buffer *br_head_pick(struct buffer *r) |
| 1008 | { |
| 1009 | struct buffer *b; |
| 1010 | |
| 1011 | BUG_ON(r->area != BUF_RING.area); |
| 1012 | |
| 1013 | b = br_head(r); |
| 1014 | if (r->head != r->data) { |
| 1015 | r->head++; |
| 1016 | if (r->head >= r->size) |
| 1017 | r->head = 1; |
| 1018 | } |
| 1019 | return b; |
| 1020 | } |
| 1021 | |
| 1022 | /* Advances the ring's head and returns the next buffer, unless it's already |
| 1023 | * the tail, in which case the tail itself is returned. This is used for post- |
| 1024 | * parsing deletion. The caller is expected to detect the end (e.g. a parser |
| 1025 | * will typically purge the head before proceeding). |
| 1026 | */ |
| 1027 | static inline struct buffer *br_del_head(struct buffer *r) |
| 1028 | { |
| 1029 | BUG_ON(r->area != BUF_RING.area); |
| 1030 | |
| 1031 | if (r->head != r->data) { |
| 1032 | r->head++; |
| 1033 | if (r->head >= r->size) |
| 1034 | r->head = 1; |
| 1035 | } |
| 1036 | return br_head(r); |
| 1037 | } |
Willy Tarreau | 55372f6 | 2018-07-10 10:04:02 +0200 | [diff] [blame] | 1038 | |
Willy Tarreau | 41806d1 | 2018-07-11 09:39:05 +0200 | [diff] [blame] | 1039 | #endif /* _COMMON_BUF_H */ |
| 1040 | |
| 1041 | /* |
| 1042 | * Local variables: |
| 1043 | * c-indent-level: 8 |
| 1044 | * c-basic-offset: 8 |
| 1045 | * End: |
| 1046 | */ |