blob: db0864984c4103e97ce4180f066e35960688bbb0 [file] [log] [blame]
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +02001#include <haproxy/ncbuf.h>
2
Amaury Denoyelle077e0962022-05-09 09:43:11 +02003#include <string.h>
4
5#ifndef MIN
6#define MIN(a, b) (((a) < (b)) ? (a) : (b))
7#endif
8
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +02009#ifdef DEBUG_DEV
10# include <haproxy/bug.h>
11#else
12# include <stdio.h>
13# include <stdlib.h>
14
15# undef BUG_ON
16# define BUG_ON(x) if (x) { fprintf(stderr, "CRASH ON %s:%d\n", __func__, __LINE__); abort(); }
17
18# undef BUG_ON_HOT
19# define BUG_ON_HOT(x) if (x) { fprintf(stderr, "CRASH ON %s:%d\n", __func__, __LINE__); abort(); }
20#endif /* DEBUG_DEV */
21
22/* ******** internal API ******** */
23
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +020024#define NCB_BLK_NULL ((struct ncb_blk){ .st = NULL })
25
26#define NCB_BK_F_GAP 0x01 /* block represents a gap */
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +020027#define NCB_BK_F_FIN 0x02 /* special reduced gap present at the end of the buffer */
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +020028struct ncb_blk {
29 char *st; /* first byte of the block */
30 char *end; /* first byte after this block */
31
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +020032 char *sz_ptr; /* pointer to size element - NULL for reduced gap */
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +020033 ncb_sz_t sz; /* size of the block */
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +020034 ncb_sz_t sz_data; /* size of the data following the block - invalid for reduced GAP */
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +020035 ncb_sz_t off; /* offset of block in buffer */
36
37 char flag;
38};
39
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +020040/* Return pointer to <off> relative to <buf> head. Support buffer wrapping. */
41static char *ncb_peek(const struct ncbuf *buf, ncb_sz_t off)
42{
43 char *ptr = ncb_head(buf) + off;
44 if (ptr >= buf->area + buf->size)
45 ptr -= buf->size;
46 return ptr;
47}
48
49/* Returns the reserved space of <buf> which contains the size of the first
50 * data block.
51 */
52static char *ncb_reserved(const struct ncbuf *buf)
53{
54 return ncb_peek(buf, buf->size - NCB_RESERVED_SZ);
55}
56
57/* Encode <off> at <st> position in <buf>. Support wrapping. */
58static void ncb_write_off(const struct ncbuf *buf, char *st, ncb_sz_t off)
59{
60 int i;
61
62 BUG_ON_HOT(st >= buf->area + buf->size);
63
64 for (i = 0; i < sizeof(ncb_sz_t); ++i) {
65 (*st) = off >> (8 * i) & 0xff;
66
67 if ((++st) == ncb_wrap(buf))
68 st = ncb_orig(buf);
69 }
70}
71
72/* Decode offset stored at <st> position in <buf>. Support wrapping. */
73static ncb_sz_t ncb_read_off(const struct ncbuf *buf, char *st)
74{
75 int i;
76 ncb_sz_t off = 0;
77
78 BUG_ON_HOT(st >= buf->area + buf->size);
79
80 for (i = 0; i < sizeof(ncb_sz_t); ++i) {
81 off |= (unsigned char )(*st) << (8 * i);
82
83 if ((++st) == ncb_wrap(buf))
84 st = ncb_orig(buf);
85 }
86
87 return off;
88}
89
Amaury Denoyelle077e0962022-05-09 09:43:11 +020090/* Add <off> to the offset stored at <st> in <buf>. Support wrapping. */
91static void ncb_inc_off(const struct ncbuf *buf, char *st, ncb_sz_t off)
92{
93 const ncb_sz_t old = ncb_read_off(buf, st);
94 ncb_write_off(buf, st, old + off);
95}
96
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +020097/* Returns true if a gap cannot be inserted at <off> : a reduced gap must be used. */
98static int ncb_off_reduced(const struct ncbuf *b, ncb_sz_t off)
99{
100 return off + NCB_GAP_MIN_SZ > ncb_size(b);
101}
102
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200103/* Returns true if <blk> is the special NULL block. */
104static int ncb_blk_is_null(const struct ncb_blk blk)
105{
106 return !blk.st;
107}
108
109/* Returns true if <blk> is the last block of <buf>. */
110static int ncb_blk_is_last(const struct ncbuf *buf, const struct ncb_blk blk)
111{
112 BUG_ON_HOT(blk.off + blk.sz > ncb_size(buf));
113 return blk.off + blk.sz == ncb_size(buf);
114}
115
116/* Returns the first block of <buf> which is always a DATA. */
117static struct ncb_blk ncb_blk_first(const struct ncbuf *buf)
118{
119 struct ncb_blk blk;
120
121 blk.st = ncb_head(buf);
122
123 blk.sz_ptr = ncb_reserved(buf);
124 blk.sz = ncb_read_off(buf, ncb_reserved(buf));
125 BUG_ON_HOT(blk.sz > ncb_size(buf));
126
127 blk.end = ncb_peek(buf, blk.sz);
128 blk.off = 0;
129 blk.flag = 0;
130
131 return blk;
132}
133
134/* Returns the block following <prev> in the buffer <buf>. */
135static struct ncb_blk ncb_blk_next(const struct ncbuf *buf,
136 const struct ncb_blk prev)
137{
138 struct ncb_blk blk;
139
140 BUG_ON_HOT(ncb_blk_is_null(prev));
141
142 if (ncb_blk_is_last(buf, prev))
143 return NCB_BLK_NULL;
144
145 blk.st = prev.end;
146 blk.off = prev.off + prev.sz;
147 blk.flag = ~prev.flag & NCB_BK_F_GAP;
148
149 if (blk.flag & NCB_BK_F_GAP) {
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +0200150 if (ncb_off_reduced(buf, blk.off)) {
151 blk.flag |= NCB_BK_F_FIN;
152 blk.sz_ptr = NULL;
153 blk.sz = ncb_size(buf) - blk.off;
154 blk.sz_data = 0;
155
156 /* A reduced gap can only be the last block. */
157 BUG_ON_HOT(!ncb_blk_is_last(buf, blk));
158 }
159 else {
160 blk.sz_ptr = ncb_peek(buf, blk.off + NCB_GAP_SZ_OFF);
161 blk.sz = ncb_read_off(buf, blk.sz_ptr);
162 blk.sz_data = ncb_read_off(buf, ncb_peek(buf, blk.off + NCB_GAP_SZ_DATA_OFF));
163 BUG_ON_HOT(blk.sz < NCB_GAP_MIN_SZ);
164 }
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200165 }
166 else {
167 blk.sz_ptr = ncb_peek(buf, prev.off + NCB_GAP_SZ_DATA_OFF);
168 blk.sz = prev.sz_data;
169 blk.sz_data = 0;
170
171 /* only first DATA block can be empty. If this happens, a GAP
172 * merge should have been realized.
173 */
174 BUG_ON_HOT(!blk.sz);
175 }
176
177 BUG_ON_HOT(blk.off + blk.sz > ncb_size(buf));
178 blk.end = ncb_peek(buf, blk.off + blk.sz);
179
180 return blk;
181}
182
183/* Returns the block containing offset <off>. Note that if <off> is at the
184 * frontier between two blocks, this function will return the preceding one.
185 * This is done to easily merge blocks on insertion/deletion.
186 */
187static struct ncb_blk ncb_blk_find(const struct ncbuf *buf, ncb_sz_t off)
188{
189 struct ncb_blk blk;
190
191 BUG_ON_HOT(off >= ncb_size(buf));
192
193 for (blk = ncb_blk_first(buf); off > blk.off + blk.sz;
194 blk = ncb_blk_next(buf, blk)) {
195 }
196
197 return blk;
198}
199
200/* Transform absolute offset <off> to a relative one from <blk> start. */
201static ncb_sz_t ncb_blk_off(const struct ncb_blk blk, ncb_sz_t off)
202{
203 BUG_ON_HOT(off < blk.off || off > blk.off + blk.sz);
204 BUG_ON_HOT(off - blk.off > blk.sz);
205 return off - blk.off;
206}
207
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200208/* Simulate insertion in <buf> of <data> of length <len> at offset <off>. This
209 * ensures that minimal block size are respected for newly formed gaps. <blk>
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200210 * must be the block where the insert operation begins. If <mode> is
211 * NCB_ADD_COMPARE, old and new overlapped data are compared to validate the
212 * insertion.
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200213 *
214 * Returns NCB_RET_OK if insertion can proceed.
215 */
216static enum ncb_ret ncb_check_insert(const struct ncbuf *buf,
217 struct ncb_blk blk, ncb_sz_t off,
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200218 const char *data, ncb_sz_t len,
219 enum ncb_add_mode mode)
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200220{
221 ncb_sz_t off_blk = ncb_blk_off(blk, off);
222 ncb_sz_t to_copy;
223 ncb_sz_t left = len;
224
225 /* If insertion starts in a gap, it must leave enough space to keep the
226 * gap header.
227 */
228 if (left && (blk.flag & NCB_BK_F_GAP)) {
229 if (off_blk < NCB_GAP_MIN_SZ)
230 return NCB_RET_GAP_SIZE;
231 }
232
233 while (left) {
234 off_blk = ncb_blk_off(blk, off);
235 to_copy = MIN(left, blk.sz - off_blk);
236
237 if (blk.flag & NCB_BK_F_GAP && off_blk + to_copy < blk.sz) {
238 /* Insertion must leave enough space for a new gap
239 * header if stopped in a middle of a gap.
240 */
241 const ncb_sz_t gap_sz = blk.sz - (off_blk + to_copy);
242 if (gap_sz < NCB_GAP_MIN_SZ && !ncb_blk_is_last(buf, blk))
243 return NCB_RET_GAP_SIZE;
244 }
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200245 else if (!(blk.flag & NCB_BK_F_GAP) && mode == NCB_ADD_COMPARE) {
246 /* Compare memory of data block in NCB_ADD_COMPARE mode. */
247 const ncb_sz_t off_blk = ncb_blk_off(blk, off);
248 char *st = ncb_peek(buf, off);
249
250 to_copy = MIN(left, blk.sz - off_blk);
251 if (st + to_copy > ncb_wrap(buf)) {
252 const ncb_sz_t sz1 = ncb_wrap(buf) - st;
253 if (memcmp(st, data, sz1))
254 return NCB_RET_DATA_REJ;
255 if (memcmp(ncb_orig(buf), data + sz1, to_copy - sz1))
256 return NCB_RET_DATA_REJ;
257 }
258 else {
259 if (memcmp(st, data, to_copy))
260 return NCB_RET_DATA_REJ;
261 }
262 }
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200263
264 left -= to_copy;
265 data += to_copy;
266 off += to_copy;
267
268 blk = ncb_blk_next(buf, blk);
269 }
270
271 return NCB_RET_OK;
272}
273
274/* Fill new <data> of length <len> inside an already existing data <blk> at
275 * offset <off>. Offset is relative to <blk> so it cannot be greater than the
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200276 * block size. <mode> specifies if old data are preserved or overwritten.
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200277 */
278static ncb_sz_t ncb_fill_data_blk(const struct ncbuf *buf,
279 struct ncb_blk blk, ncb_sz_t off,
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200280 const char *data, ncb_sz_t len,
281 enum ncb_add_mode mode)
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200282{
283 const ncb_sz_t to_copy = MIN(len, blk.sz - off);
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200284 char *ptr = NULL;
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200285
286 BUG_ON_HOT(off > blk.sz);
287 /* This can happens due to previous ncb_blk_find() usage. In this
288 * case the current fill is a noop.
289 */
290 if (off == blk.sz)
291 return 0;
292
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200293 if (mode == NCB_ADD_OVERWRT) {
294 ptr = ncb_peek(buf, blk.off + off);
295
296 if (ptr + to_copy >= ncb_wrap(buf)) {
297 const ncb_sz_t sz1 = ncb_wrap(buf) - ptr;
298 memcpy(ptr, data, sz1);
299 memcpy(ncb_orig(buf), data + sz1, to_copy - sz1);
300 }
301 else {
302 memcpy(ptr, data, to_copy);
303 }
304 }
305
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200306 return to_copy;
307}
308
309/* Fill the gap <blk> starting at <off> with new <data> of length <len>. <off>
310 * is relative to <blk> so it cannot be greater than the block size.
311 */
312static ncb_sz_t ncb_fill_gap_blk(const struct ncbuf *buf,
313 struct ncb_blk blk, ncb_sz_t off,
314 const char *data, ncb_sz_t len)
315{
316 const ncb_sz_t to_copy = MIN(len, blk.sz - off);
317 char *ptr;
318
319 BUG_ON_HOT(off > blk.sz);
320 /* This can happens due to previous ncb_blk_find() usage. In this
321 * case the current fill is a noop.
322 */
323 if (off == blk.sz)
324 return 0;
325
326 /* A new gap must be created if insertion stopped before gap end. */
327 if (off + to_copy < blk.sz) {
328 const ncb_sz_t gap_off = blk.off + off + to_copy;
329 const ncb_sz_t gap_sz = blk.sz - off - to_copy;
330
331 BUG_ON_HOT(!ncb_off_reduced(buf, gap_off) &&
332 blk.off + blk.sz - gap_off < NCB_GAP_MIN_SZ);
333
334 /* write the new gap header unless this is a reduced gap. */
335 if (!ncb_off_reduced(buf, gap_off)) {
336 char *gap_ptr = ncb_peek(buf, gap_off + NCB_GAP_SZ_OFF);
337 char *gap_data_ptr = ncb_peek(buf, gap_off + NCB_GAP_SZ_DATA_OFF);
338
339 ncb_write_off(buf, gap_ptr, gap_sz);
340 ncb_write_off(buf, gap_data_ptr, blk.sz_data);
341 }
342 }
343
344 /* fill the gap with new data */
345 ptr = ncb_peek(buf, blk.off + off);
346 if (ptr + to_copy >= ncb_wrap(buf)) {
347 ncb_sz_t sz1 = ncb_wrap(buf) - ptr;
348 memcpy(ptr, data, sz1);
349 memcpy(ncb_orig(buf), data + sz1, to_copy - sz1);
350 }
351 else {
352 memcpy(ptr, data, to_copy);
353 }
354
355 return to_copy;
356}
357
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200358/* ******** public API ******** */
359
360int ncb_is_null(const struct ncbuf *buf)
361{
362 return buf->size == 0;
363}
364
365/* Initialize or reset <buf> by clearing all data. Its size is untouched.
366 * Buffer is positioned to <head> offset. Use 0 to realign it.
367 */
368void ncb_init(struct ncbuf *buf, ncb_sz_t head)
369{
370 BUG_ON_HOT(head >= buf->size);
371 buf->head = head;
372
373 ncb_write_off(buf, ncb_reserved(buf), 0);
374 ncb_write_off(buf, ncb_head(buf), ncb_size(buf));
375 ncb_write_off(buf, ncb_peek(buf, sizeof(ncb_sz_t)), 0);
376}
377
378/* Construct a ncbuf with all its parameters. */
379struct ncbuf ncb_make(char *area, ncb_sz_t size, ncb_sz_t head)
380{
381 struct ncbuf buf;
382
383 /* Ensure that there is enough space for the reserved space and data.
384 * This is the minimal value to not crash later.
385 */
386 BUG_ON_HOT(size <= NCB_RESERVED_SZ);
387
388 buf.area = area;
389 buf.size = size;
390 buf.head = head;
391
392 return buf;
393}
394
395/* Returns start of allocated buffer area. */
396char *ncb_orig(const struct ncbuf *buf)
397{
398 return buf->area;
399}
400
401/* Returns current head pointer into buffer area. */
402char *ncb_head(const struct ncbuf *buf)
403{
404 return buf->area + buf->head;
405}
406
407/* Returns the first byte after the allocated buffer area. */
408char *ncb_wrap(const struct ncbuf *buf)
409{
410 return buf->area + buf->size;
411}
412
413/* Returns the usable size of <buf> for data storage. This is the size of the
414 * allocated buffer without the reserved header space.
415 */
416ncb_sz_t ncb_size(const struct ncbuf *buf)
417{
418 return buf->size - NCB_RESERVED_SZ;
419}
420
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200421/* Returns the total number of bytes stored in whole <buf>. */
422ncb_sz_t ncb_total_data(const struct ncbuf *buf)
423{
424 struct ncb_blk blk;
425 int total = 0;
426
427 for (blk = ncb_blk_first(buf); !ncb_blk_is_null(blk); blk = ncb_blk_next(buf, blk)) {
428 if (!(blk.flag & NCB_BK_F_GAP))
429 total += blk.sz;
430 }
431
432 return total;
433}
434
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200435/* Returns true if there is no data anywhere in <buf>. */
436int ncb_is_empty(const struct ncbuf *buf)
437{
438 BUG_ON_HOT(*ncb_reserved(buf) + *ncb_head(buf) > ncb_size(buf));
439 return *ncb_reserved(buf) == 0 && *ncb_head(buf) == ncb_size(buf);
440}
441
442/* Returns true if no more data can be inserted in <buf>. */
443int ncb_is_full(const struct ncbuf *buf)
444{
445 BUG_ON_HOT(ncb_read_off(buf, ncb_reserved(buf)) > ncb_size(buf));
446 return ncb_read_off(buf, ncb_reserved(buf)) == ncb_size(buf);
447}
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200448
449/* Returns the number of bytes of data avaiable in <buf> starting at offset
450 * <off> until the next gap or the buffer end. The counted data may wrapped if
451 * the buffer storage is not aligned.
452 */
453ncb_sz_t ncb_data(const struct ncbuf *buf, ncb_sz_t off)
454{
455 struct ncb_blk blk = ncb_blk_find(buf, off);
456 ncb_sz_t off_blk = ncb_blk_off(blk, off);
457
458 /* if <off> at the frontier between two and <blk> is gap, retrieve the
459 * next data block.
460 */
461 if (blk.flag & NCB_BK_F_GAP && off_blk == blk.sz &&
462 !ncb_blk_is_last(buf, blk)) {
463 blk = ncb_blk_next(buf, blk);
464 off_blk = ncb_blk_off(blk, off);
465 }
466
467 if (blk.flag & NCB_BK_F_GAP)
468 return 0;
469
470 return blk.sz - off_blk;
471}
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200472
473/* Add a new block at <data> of size <len> in <buf> at offset <off>.
474 *
475 * Returns NCB_RET_OK on success. On error the following codes are returned :
476 * - NCB_RET_GAP_SIZE : cannot add data because the gap formed is too small
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200477 * - NCB_RET_DATA_REJ : old data would be overwritten by different ones in
478 * NCB_ADD_COMPARE mode.
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200479 */
480enum ncb_ret ncb_add(struct ncbuf *buf, ncb_sz_t off,
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200481 const char *data, ncb_sz_t len, enum ncb_add_mode mode)
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200482{
483 struct ncb_blk blk;
484 ncb_sz_t left = len;
485 enum ncb_ret ret;
486 char *new_sz;
487
488 if (!len)
489 return NCB_RET_OK;
490
491 BUG_ON_HOT(off + len > ncb_size(buf));
492
493 /* Get block where insertion begins. */
494 blk = ncb_blk_find(buf, off);
495
496 /* Check if insertion is possible. */
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200497 ret = ncb_check_insert(buf, blk, off, data, len, mode);
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200498 if (ret != NCB_RET_OK)
499 return ret;
500
501 if (blk.flag & NCB_BK_F_GAP) {
502 /* Reduce gap size if insertion begins in a gap. Gap data size
503 * is reset and will be recalculated during insertion.
504 */
505 const ncb_sz_t gap_sz = off - blk.off;
506 BUG_ON_HOT(gap_sz < NCB_GAP_MIN_SZ);
507
508 /* pointer to data size to increase. */
509 new_sz = ncb_peek(buf, blk.off + NCB_GAP_SZ_DATA_OFF);
510
511 ncb_write_off(buf, blk.sz_ptr, gap_sz);
512 ncb_write_off(buf, new_sz, 0);
513 }
514 else {
515 /* pointer to data size to increase. */
516 new_sz = blk.sz_ptr;
517 }
518
519 /* insert data */
520 while (left) {
521 struct ncb_blk next;
522 const ncb_sz_t off_blk = ncb_blk_off(blk, off);
523 ncb_sz_t done;
524
525 /* retrieve the next block. This is necessary to do this
526 * before overwritting a gap.
527 */
528 next = ncb_blk_next(buf, blk);
529
530 if (blk.flag & NCB_BK_F_GAP) {
531 done = ncb_fill_gap_blk(buf, blk, off_blk, data, left);
532
533 /* update the inserted data block size */
534 if (off + done == blk.off + blk.sz) {
535 /* merge next data block if insertion reached gap end */
536 ncb_inc_off(buf, new_sz, done + blk.sz_data);
537 }
538 else {
539 /* insertion stopped before gap end */
540 ncb_inc_off(buf, new_sz, done);
541 }
542 }
543 else {
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200544 done = ncb_fill_data_blk(buf, blk, off_blk, data, left, mode);
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200545 }
546
547 BUG_ON_HOT(done > blk.sz || done > left);
548 left -= done;
549 data += done;
550 off += done;
551
552 blk = next;
553 }
554
555 return NCB_RET_OK;
556}