blob: 2b21a29a7ace7a11ab70838057a98e4081aa5b48 [file] [log] [blame]
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +02001#include <haproxy/ncbuf.h>
2
Amaury Denoyelle077e0962022-05-09 09:43:11 +02003#include <string.h>
4
5#ifndef MIN
6#define MIN(a, b) (((a) < (b)) ? (a) : (b))
7#endif
8
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +02009#ifdef STANDALONE
10#include <stdarg.h>
11#include <stdlib.h>
12#include <stdio.h>
13#include <unistd.h>
14
15#include <haproxy/list.h>
16#endif /* STANDALONE */
17
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +020018#ifdef DEBUG_DEV
19# include <haproxy/bug.h>
20#else
21# include <stdio.h>
22# include <stdlib.h>
23
24# undef BUG_ON
25# define BUG_ON(x) if (x) { fprintf(stderr, "CRASH ON %s:%d\n", __func__, __LINE__); abort(); }
26
27# undef BUG_ON_HOT
28# define BUG_ON_HOT(x) if (x) { fprintf(stderr, "CRASH ON %s:%d\n", __func__, __LINE__); abort(); }
29#endif /* DEBUG_DEV */
30
31/* ******** internal API ******** */
32
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +020033#define NCB_BLK_NULL ((struct ncb_blk){ .st = NULL })
34
35#define NCB_BK_F_GAP 0x01 /* block represents a gap */
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +020036#define NCB_BK_F_FIN 0x02 /* special reduced gap present at the end of the buffer */
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +020037struct ncb_blk {
38 char *st; /* first byte of the block */
39 char *end; /* first byte after this block */
40
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +020041 char *sz_ptr; /* pointer to size element - NULL for reduced gap */
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +020042 ncb_sz_t sz; /* size of the block */
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +020043 ncb_sz_t sz_data; /* size of the data following the block - invalid for reduced GAP */
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +020044 ncb_sz_t off; /* offset of block in buffer */
45
46 char flag;
47};
48
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +020049/* Return pointer to <off> relative to <buf> head. Support buffer wrapping. */
50static char *ncb_peek(const struct ncbuf *buf, ncb_sz_t off)
51{
52 char *ptr = ncb_head(buf) + off;
53 if (ptr >= buf->area + buf->size)
54 ptr -= buf->size;
55 return ptr;
56}
57
58/* Returns the reserved space of <buf> which contains the size of the first
59 * data block.
60 */
61static char *ncb_reserved(const struct ncbuf *buf)
62{
63 return ncb_peek(buf, buf->size - NCB_RESERVED_SZ);
64}
65
66/* Encode <off> at <st> position in <buf>. Support wrapping. */
67static void ncb_write_off(const struct ncbuf *buf, char *st, ncb_sz_t off)
68{
69 int i;
70
71 BUG_ON_HOT(st >= buf->area + buf->size);
72
73 for (i = 0; i < sizeof(ncb_sz_t); ++i) {
74 (*st) = off >> (8 * i) & 0xff;
75
76 if ((++st) == ncb_wrap(buf))
77 st = ncb_orig(buf);
78 }
79}
80
81/* Decode offset stored at <st> position in <buf>. Support wrapping. */
82static ncb_sz_t ncb_read_off(const struct ncbuf *buf, char *st)
83{
84 int i;
85 ncb_sz_t off = 0;
86
87 BUG_ON_HOT(st >= buf->area + buf->size);
88
89 for (i = 0; i < sizeof(ncb_sz_t); ++i) {
90 off |= (unsigned char )(*st) << (8 * i);
91
92 if ((++st) == ncb_wrap(buf))
93 st = ncb_orig(buf);
94 }
95
96 return off;
97}
98
Amaury Denoyelle077e0962022-05-09 09:43:11 +020099/* Add <off> to the offset stored at <st> in <buf>. Support wrapping. */
100static void ncb_inc_off(const struct ncbuf *buf, char *st, ncb_sz_t off)
101{
102 const ncb_sz_t old = ncb_read_off(buf, st);
103 ncb_write_off(buf, st, old + off);
104}
105
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +0200106/* Returns true if a gap cannot be inserted at <off> : a reduced gap must be used. */
107static int ncb_off_reduced(const struct ncbuf *b, ncb_sz_t off)
108{
109 return off + NCB_GAP_MIN_SZ > ncb_size(b);
110}
111
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200112/* Returns true if <blk> is the special NULL block. */
113static int ncb_blk_is_null(const struct ncb_blk blk)
114{
115 return !blk.st;
116}
117
118/* Returns true if <blk> is the last block of <buf>. */
119static int ncb_blk_is_last(const struct ncbuf *buf, const struct ncb_blk blk)
120{
121 BUG_ON_HOT(blk.off + blk.sz > ncb_size(buf));
122 return blk.off + blk.sz == ncb_size(buf);
123}
124
125/* Returns the first block of <buf> which is always a DATA. */
126static struct ncb_blk ncb_blk_first(const struct ncbuf *buf)
127{
128 struct ncb_blk blk;
129
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200130 if (ncb_is_null(buf))
131 return NCB_BLK_NULL;
132
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200133 blk.st = ncb_head(buf);
134
135 blk.sz_ptr = ncb_reserved(buf);
136 blk.sz = ncb_read_off(buf, ncb_reserved(buf));
Amaury Denoyelleb5ca9432022-05-13 15:33:50 +0200137 blk.sz_data = 0;
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200138 BUG_ON_HOT(blk.sz > ncb_size(buf));
139
140 blk.end = ncb_peek(buf, blk.sz);
141 blk.off = 0;
142 blk.flag = 0;
143
144 return blk;
145}
146
147/* Returns the block following <prev> in the buffer <buf>. */
148static struct ncb_blk ncb_blk_next(const struct ncbuf *buf,
149 const struct ncb_blk prev)
150{
151 struct ncb_blk blk;
152
153 BUG_ON_HOT(ncb_blk_is_null(prev));
154
155 if (ncb_blk_is_last(buf, prev))
156 return NCB_BLK_NULL;
157
158 blk.st = prev.end;
159 blk.off = prev.off + prev.sz;
160 blk.flag = ~prev.flag & NCB_BK_F_GAP;
161
162 if (blk.flag & NCB_BK_F_GAP) {
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +0200163 if (ncb_off_reduced(buf, blk.off)) {
164 blk.flag |= NCB_BK_F_FIN;
165 blk.sz_ptr = NULL;
166 blk.sz = ncb_size(buf) - blk.off;
167 blk.sz_data = 0;
168
169 /* A reduced gap can only be the last block. */
170 BUG_ON_HOT(!ncb_blk_is_last(buf, blk));
171 }
172 else {
173 blk.sz_ptr = ncb_peek(buf, blk.off + NCB_GAP_SZ_OFF);
174 blk.sz = ncb_read_off(buf, blk.sz_ptr);
175 blk.sz_data = ncb_read_off(buf, ncb_peek(buf, blk.off + NCB_GAP_SZ_DATA_OFF));
176 BUG_ON_HOT(blk.sz < NCB_GAP_MIN_SZ);
177 }
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200178 }
179 else {
180 blk.sz_ptr = ncb_peek(buf, prev.off + NCB_GAP_SZ_DATA_OFF);
181 blk.sz = prev.sz_data;
182 blk.sz_data = 0;
183
184 /* only first DATA block can be empty. If this happens, a GAP
185 * merge should have been realized.
186 */
187 BUG_ON_HOT(!blk.sz);
188 }
189
190 BUG_ON_HOT(blk.off + blk.sz > ncb_size(buf));
191 blk.end = ncb_peek(buf, blk.off + blk.sz);
192
193 return blk;
194}
195
196/* Returns the block containing offset <off>. Note that if <off> is at the
197 * frontier between two blocks, this function will return the preceding one.
198 * This is done to easily merge blocks on insertion/deletion.
199 */
200static struct ncb_blk ncb_blk_find(const struct ncbuf *buf, ncb_sz_t off)
201{
202 struct ncb_blk blk;
203
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200204 if (ncb_is_null(buf))
205 return NCB_BLK_NULL;
206
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200207 BUG_ON_HOT(off >= ncb_size(buf));
208
209 for (blk = ncb_blk_first(buf); off > blk.off + blk.sz;
210 blk = ncb_blk_next(buf, blk)) {
211 }
212
213 return blk;
214}
215
216/* Transform absolute offset <off> to a relative one from <blk> start. */
217static ncb_sz_t ncb_blk_off(const struct ncb_blk blk, ncb_sz_t off)
218{
219 BUG_ON_HOT(off < blk.off || off > blk.off + blk.sz);
220 BUG_ON_HOT(off - blk.off > blk.sz);
221 return off - blk.off;
222}
223
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200224/* Simulate insertion in <buf> of <data> of length <len> at offset <off>. This
225 * ensures that minimal block size are respected for newly formed gaps. <blk>
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200226 * must be the block where the insert operation begins. If <mode> is
227 * NCB_ADD_COMPARE, old and new overlapped data are compared to validate the
228 * insertion.
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200229 *
230 * Returns NCB_RET_OK if insertion can proceed.
231 */
232static enum ncb_ret ncb_check_insert(const struct ncbuf *buf,
233 struct ncb_blk blk, ncb_sz_t off,
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200234 const char *data, ncb_sz_t len,
235 enum ncb_add_mode mode)
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200236{
237 ncb_sz_t off_blk = ncb_blk_off(blk, off);
238 ncb_sz_t to_copy;
239 ncb_sz_t left = len;
240
241 /* If insertion starts in a gap, it must leave enough space to keep the
242 * gap header.
243 */
244 if (left && (blk.flag & NCB_BK_F_GAP)) {
245 if (off_blk < NCB_GAP_MIN_SZ)
246 return NCB_RET_GAP_SIZE;
247 }
248
249 while (left) {
250 off_blk = ncb_blk_off(blk, off);
251 to_copy = MIN(left, blk.sz - off_blk);
252
253 if (blk.flag & NCB_BK_F_GAP && off_blk + to_copy < blk.sz) {
254 /* Insertion must leave enough space for a new gap
255 * header if stopped in a middle of a gap.
256 */
257 const ncb_sz_t gap_sz = blk.sz - (off_blk + to_copy);
258 if (gap_sz < NCB_GAP_MIN_SZ && !ncb_blk_is_last(buf, blk))
259 return NCB_RET_GAP_SIZE;
260 }
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200261 else if (!(blk.flag & NCB_BK_F_GAP) && mode == NCB_ADD_COMPARE) {
262 /* Compare memory of data block in NCB_ADD_COMPARE mode. */
263 const ncb_sz_t off_blk = ncb_blk_off(blk, off);
264 char *st = ncb_peek(buf, off);
265
266 to_copy = MIN(left, blk.sz - off_blk);
267 if (st + to_copy > ncb_wrap(buf)) {
268 const ncb_sz_t sz1 = ncb_wrap(buf) - st;
269 if (memcmp(st, data, sz1))
270 return NCB_RET_DATA_REJ;
271 if (memcmp(ncb_orig(buf), data + sz1, to_copy - sz1))
272 return NCB_RET_DATA_REJ;
273 }
274 else {
275 if (memcmp(st, data, to_copy))
276 return NCB_RET_DATA_REJ;
277 }
278 }
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200279
280 left -= to_copy;
281 data += to_copy;
282 off += to_copy;
283
284 blk = ncb_blk_next(buf, blk);
285 }
286
287 return NCB_RET_OK;
288}
289
290/* Fill new <data> of length <len> inside an already existing data <blk> at
291 * offset <off>. Offset is relative to <blk> so it cannot be greater than the
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200292 * block size. <mode> specifies if old data are preserved or overwritten.
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200293 */
294static ncb_sz_t ncb_fill_data_blk(const struct ncbuf *buf,
295 struct ncb_blk blk, ncb_sz_t off,
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200296 const char *data, ncb_sz_t len,
297 enum ncb_add_mode mode)
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200298{
299 const ncb_sz_t to_copy = MIN(len, blk.sz - off);
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200300 char *ptr = NULL;
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200301
302 BUG_ON_HOT(off > blk.sz);
303 /* This can happens due to previous ncb_blk_find() usage. In this
304 * case the current fill is a noop.
305 */
306 if (off == blk.sz)
307 return 0;
308
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200309 if (mode == NCB_ADD_OVERWRT) {
310 ptr = ncb_peek(buf, blk.off + off);
311
312 if (ptr + to_copy >= ncb_wrap(buf)) {
313 const ncb_sz_t sz1 = ncb_wrap(buf) - ptr;
314 memcpy(ptr, data, sz1);
315 memcpy(ncb_orig(buf), data + sz1, to_copy - sz1);
316 }
317 else {
318 memcpy(ptr, data, to_copy);
319 }
320 }
321
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200322 return to_copy;
323}
324
325/* Fill the gap <blk> starting at <off> with new <data> of length <len>. <off>
326 * is relative to <blk> so it cannot be greater than the block size.
327 */
328static ncb_sz_t ncb_fill_gap_blk(const struct ncbuf *buf,
329 struct ncb_blk blk, ncb_sz_t off,
330 const char *data, ncb_sz_t len)
331{
332 const ncb_sz_t to_copy = MIN(len, blk.sz - off);
333 char *ptr;
334
335 BUG_ON_HOT(off > blk.sz);
336 /* This can happens due to previous ncb_blk_find() usage. In this
337 * case the current fill is a noop.
338 */
339 if (off == blk.sz)
340 return 0;
341
342 /* A new gap must be created if insertion stopped before gap end. */
343 if (off + to_copy < blk.sz) {
344 const ncb_sz_t gap_off = blk.off + off + to_copy;
345 const ncb_sz_t gap_sz = blk.sz - off - to_copy;
346
347 BUG_ON_HOT(!ncb_off_reduced(buf, gap_off) &&
348 blk.off + blk.sz - gap_off < NCB_GAP_MIN_SZ);
349
350 /* write the new gap header unless this is a reduced gap. */
351 if (!ncb_off_reduced(buf, gap_off)) {
352 char *gap_ptr = ncb_peek(buf, gap_off + NCB_GAP_SZ_OFF);
353 char *gap_data_ptr = ncb_peek(buf, gap_off + NCB_GAP_SZ_DATA_OFF);
354
355 ncb_write_off(buf, gap_ptr, gap_sz);
356 ncb_write_off(buf, gap_data_ptr, blk.sz_data);
357 }
358 }
359
360 /* fill the gap with new data */
361 ptr = ncb_peek(buf, blk.off + off);
362 if (ptr + to_copy >= ncb_wrap(buf)) {
363 ncb_sz_t sz1 = ncb_wrap(buf) - ptr;
364 memcpy(ptr, data, sz1);
365 memcpy(ncb_orig(buf), data + sz1, to_copy - sz1);
366 }
367 else {
368 memcpy(ptr, data, to_copy);
369 }
370
371 return to_copy;
372}
373
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200374/* ******** public API ******** */
375
376int ncb_is_null(const struct ncbuf *buf)
377{
378 return buf->size == 0;
379}
380
381/* Initialize or reset <buf> by clearing all data. Its size is untouched.
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200382 * Buffer is positioned to <head> offset. Use 0 to realign it. <buf> must not
383 * be NCBUF_NULL.
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200384 */
385void ncb_init(struct ncbuf *buf, ncb_sz_t head)
386{
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200387 BUG_ON_HOT(ncb_is_null(buf));
388
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200389 BUG_ON_HOT(head >= buf->size);
390 buf->head = head;
391
392 ncb_write_off(buf, ncb_reserved(buf), 0);
393 ncb_write_off(buf, ncb_head(buf), ncb_size(buf));
394 ncb_write_off(buf, ncb_peek(buf, sizeof(ncb_sz_t)), 0);
395}
396
397/* Construct a ncbuf with all its parameters. */
398struct ncbuf ncb_make(char *area, ncb_sz_t size, ncb_sz_t head)
399{
400 struct ncbuf buf;
401
402 /* Ensure that there is enough space for the reserved space and data.
403 * This is the minimal value to not crash later.
404 */
405 BUG_ON_HOT(size <= NCB_RESERVED_SZ);
406
407 buf.area = area;
408 buf.size = size;
409 buf.head = head;
410
411 return buf;
412}
413
414/* Returns start of allocated buffer area. */
415char *ncb_orig(const struct ncbuf *buf)
416{
417 return buf->area;
418}
419
420/* Returns current head pointer into buffer area. */
421char *ncb_head(const struct ncbuf *buf)
422{
423 return buf->area + buf->head;
424}
425
426/* Returns the first byte after the allocated buffer area. */
427char *ncb_wrap(const struct ncbuf *buf)
428{
429 return buf->area + buf->size;
430}
431
432/* Returns the usable size of <buf> for data storage. This is the size of the
433 * allocated buffer without the reserved header space.
434 */
435ncb_sz_t ncb_size(const struct ncbuf *buf)
436{
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200437 if (ncb_is_null(buf))
438 return 0;
439
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200440 return buf->size - NCB_RESERVED_SZ;
441}
442
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200443/* Returns the total number of bytes stored in whole <buf>. */
444ncb_sz_t ncb_total_data(const struct ncbuf *buf)
445{
446 struct ncb_blk blk;
447 int total = 0;
448
449 for (blk = ncb_blk_first(buf); !ncb_blk_is_null(blk); blk = ncb_blk_next(buf, blk)) {
450 if (!(blk.flag & NCB_BK_F_GAP))
451 total += blk.sz;
452 }
453
454 return total;
455}
456
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200457/* Returns true if there is no data anywhere in <buf>. */
458int ncb_is_empty(const struct ncbuf *buf)
459{
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200460 if (ncb_is_null(buf))
461 return 1;
462
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200463 BUG_ON_HOT(*ncb_reserved(buf) + *ncb_head(buf) > ncb_size(buf));
464 return *ncb_reserved(buf) == 0 && *ncb_head(buf) == ncb_size(buf);
465}
466
467/* Returns true if no more data can be inserted in <buf>. */
468int ncb_is_full(const struct ncbuf *buf)
469{
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200470 if (ncb_is_null(buf))
471 return 0;
472
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200473 BUG_ON_HOT(ncb_read_off(buf, ncb_reserved(buf)) > ncb_size(buf));
474 return ncb_read_off(buf, ncb_reserved(buf)) == ncb_size(buf);
475}
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200476
477/* Returns the number of bytes of data avaiable in <buf> starting at offset
478 * <off> until the next gap or the buffer end. The counted data may wrapped if
479 * the buffer storage is not aligned.
480 */
481ncb_sz_t ncb_data(const struct ncbuf *buf, ncb_sz_t off)
482{
483 struct ncb_blk blk = ncb_blk_find(buf, off);
484 ncb_sz_t off_blk = ncb_blk_off(blk, off);
485
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200486 if (ncb_blk_is_null(blk))
487 return 0;
488
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200489 /* if <off> at the frontier between two and <blk> is gap, retrieve the
490 * next data block.
491 */
492 if (blk.flag & NCB_BK_F_GAP && off_blk == blk.sz &&
493 !ncb_blk_is_last(buf, blk)) {
494 blk = ncb_blk_next(buf, blk);
495 off_blk = ncb_blk_off(blk, off);
496 }
497
498 if (blk.flag & NCB_BK_F_GAP)
499 return 0;
500
501 return blk.sz - off_blk;
502}
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200503
504/* Add a new block at <data> of size <len> in <buf> at offset <off>.
505 *
506 * Returns NCB_RET_OK on success. On error the following codes are returned :
507 * - NCB_RET_GAP_SIZE : cannot add data because the gap formed is too small
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200508 * - NCB_RET_DATA_REJ : old data would be overwritten by different ones in
509 * NCB_ADD_COMPARE mode.
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200510 */
511enum ncb_ret ncb_add(struct ncbuf *buf, ncb_sz_t off,
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200512 const char *data, ncb_sz_t len, enum ncb_add_mode mode)
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200513{
514 struct ncb_blk blk;
515 ncb_sz_t left = len;
516 enum ncb_ret ret;
517 char *new_sz;
518
519 if (!len)
520 return NCB_RET_OK;
521
522 BUG_ON_HOT(off + len > ncb_size(buf));
523
524 /* Get block where insertion begins. */
525 blk = ncb_blk_find(buf, off);
526
527 /* Check if insertion is possible. */
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200528 ret = ncb_check_insert(buf, blk, off, data, len, mode);
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200529 if (ret != NCB_RET_OK)
530 return ret;
531
532 if (blk.flag & NCB_BK_F_GAP) {
533 /* Reduce gap size if insertion begins in a gap. Gap data size
534 * is reset and will be recalculated during insertion.
535 */
536 const ncb_sz_t gap_sz = off - blk.off;
537 BUG_ON_HOT(gap_sz < NCB_GAP_MIN_SZ);
538
539 /* pointer to data size to increase. */
540 new_sz = ncb_peek(buf, blk.off + NCB_GAP_SZ_DATA_OFF);
541
542 ncb_write_off(buf, blk.sz_ptr, gap_sz);
543 ncb_write_off(buf, new_sz, 0);
544 }
545 else {
546 /* pointer to data size to increase. */
547 new_sz = blk.sz_ptr;
548 }
549
550 /* insert data */
551 while (left) {
552 struct ncb_blk next;
553 const ncb_sz_t off_blk = ncb_blk_off(blk, off);
554 ncb_sz_t done;
555
556 /* retrieve the next block. This is necessary to do this
557 * before overwritting a gap.
558 */
559 next = ncb_blk_next(buf, blk);
560
561 if (blk.flag & NCB_BK_F_GAP) {
562 done = ncb_fill_gap_blk(buf, blk, off_blk, data, left);
563
564 /* update the inserted data block size */
565 if (off + done == blk.off + blk.sz) {
566 /* merge next data block if insertion reached gap end */
567 ncb_inc_off(buf, new_sz, done + blk.sz_data);
568 }
569 else {
570 /* insertion stopped before gap end */
571 ncb_inc_off(buf, new_sz, done);
572 }
573 }
574 else {
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200575 done = ncb_fill_data_blk(buf, blk, off_blk, data, left, mode);
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200576 }
577
578 BUG_ON_HOT(done > blk.sz || done > left);
579 left -= done;
580 data += done;
581 off += done;
582
583 blk = next;
584 }
585
586 return NCB_RET_OK;
587}
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200588
589/* Advance the head of <buf> to the offset <off>. Data at the start of buffer
590 * will be lost while some space will be formed at the end to be able to insert
591 * new data.
592 *
593 * Returns NCB_RET_OK on success.
594 */
595enum ncb_ret ncb_advance(struct ncbuf *buf, ncb_sz_t off)
596{
597 struct ncb_blk blk, last;
598 ncb_sz_t off_blk;
599 ncb_sz_t first_data_sz;
600
601 BUG_ON_HOT(off > ncb_size(buf));
602 if (!off)
603 return NCB_RET_OK;
604
605 /* Special case if off is full size. This is equivalent to a reset. */
606 if (off == ncb_size(buf)) {
607 ncb_init(buf, buf->head);
608 return NCB_RET_OK;
609 }
610
611 last = blk = ncb_blk_find(buf, off);
612 while (!ncb_blk_is_last(buf, last))
613 last = ncb_blk_next(buf, last);
614
615 off_blk = ncb_blk_off(blk, off);
616
617 /* If new head points in a GAP, the GAP size must be big enough. */
618 if (blk.flag & NCB_BK_F_GAP) {
619 if (blk.sz == off_blk) {
620 /* GAP si completely removed. */
621 first_data_sz = blk.sz_data;
622 }
623 else if (!ncb_blk_is_last(buf, blk) &&
624 blk.sz - off_blk < NCB_GAP_MIN_SZ) {
625 return NCB_RET_GAP_SIZE;
626 }
627 else {
628 /* A GAP will be present at the front. */
629 first_data_sz = 0;
630 }
631 }
632 else {
633 /* If off_blk less than blk.sz, the data block will becomes the
634 * first block. If equal, the data block is completely removed
635 * and thus the following GAP will be the first block.
636 */
637 first_data_sz = blk.sz - off_blk;
638 }
639
640 /* Insert a new GAP if :
641 * - last block is DATA
642 * - last block is GAP and but is not the same as blk
643 *
644 * In the the of last block is a GAP and is the same as blk, it means
645 * that a GAP will be formed to recover the whole buffer content.
646 */
647 if (last.flag & NCB_BK_F_GAP && !ncb_blk_is_last(buf, blk)) {
648 /* last block is a GAP : extends it unless this is a reduced
649 * gap and the new gap size is still not big enough.
650 */
651 if (!(last.flag & NCB_BK_F_FIN) || last.sz + off >= NCB_GAP_MIN_SZ) {
652 /* use .st instead of .sz_ptr which can be NULL if reduced gap */
653 ncb_write_off(buf, last.st, last.sz + off);
654 ncb_write_off(buf, ncb_peek(buf, last.off + NCB_GAP_SZ_DATA_OFF), 0);
655 }
656 }
657 else if (!(last.flag & NCB_BK_F_GAP)) {
658 /* last block DATA : insert a new gap after the deleted data.
659 * If the gap is not big enough, it will be a reduced gap.
660 */
661 if (off >= NCB_GAP_MIN_SZ) {
662 ncb_write_off(buf, ncb_peek(buf, last.off + last.sz + NCB_GAP_SZ_OFF), off);
663 ncb_write_off(buf, ncb_peek(buf, last.off + last.sz + NCB_GAP_SZ_DATA_OFF), 0);
664 }
665 }
666
667 /* Advance head and update the buffer reserved header which contains
668 * the first data block size.
669 */
670 buf->head += off;
671 if (buf->head >= buf->size)
672 buf->head -= buf->size;
673 ncb_write_off(buf, ncb_reserved(buf), first_data_sz);
674
675 /* Update the first block GAP size if needed. */
676 if (blk.flag & NCB_BK_F_GAP && !first_data_sz) {
677 /* If first block GAP is also last one, cover whole buf. */
678 if (ncb_blk_is_last(buf, blk))
679 ncb_write_off(buf, ncb_head(buf), ncb_size(buf));
680 else
681 ncb_write_off(buf, ncb_head(buf), blk.sz - off_blk);
682
683 /* Recopy the block sz_data at the new position. */
684 ncb_write_off(buf, ncb_peek(buf, NCB_GAP_SZ_DATA_OFF), blk.sz_data);
685 }
686
687 return NCB_RET_OK;
688}
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200689
690/* ******** testing API ******** */
691/* To build it :
Amaury Denoyellef46393a2022-05-16 11:09:05 +0200692 * gcc -Wall -DSTANDALONE -lasan -I./include -o ncbuf src/ncbuf.c
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200693 */
694#ifdef STANDALONE
695
696int ncb_print = 0;
697
698static void ncbuf_printf(char *str, ...)
699{
700 va_list args;
701
702 va_start(args, str);
703 if (ncb_print)
704 vfprintf(stderr, str, args);
705 va_end(args);
706}
707
708struct rand_off {
709 struct list el;
710 ncb_sz_t off;
711 ncb_sz_t len;
712};
713
714static struct rand_off *ncb_generate_rand_off(const struct ncbuf *buf)
715{
716 struct rand_off *roff;
717 roff = calloc(1, sizeof(struct rand_off));
718 BUG_ON(!roff);
719
720 roff->off = rand() % (ncb_size(buf));
721 if (roff->off > 0 && roff->off < NCB_GAP_MIN_SZ)
722 roff->off = 0;
723
724 roff->len = rand() % (ncb_size(buf) - roff->off + 1);
725
726 return roff;
727}
728
729static void ncb_print_blk(const struct ncb_blk blk)
730{
731 if (ncb_print) {
Amaury Denoyellef46393a2022-05-16 11:09:05 +0200732 fprintf(stderr, "%s(%s): %2u/%u.\n",
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200733 blk.flag & NCB_BK_F_GAP ? "GAP " : "DATA",
734 blk.flag & NCB_BK_F_FIN ? "F" : "-", blk.off, blk.sz);
735 }
736}
737
738static inline int ncb_is_null_blk(const struct ncb_blk blk)
739{
740 return !blk.st;
741}
742
743static void ncb_loop(const struct ncbuf *buf)
744{
745 struct ncb_blk blk;
746
747 blk = ncb_blk_first(buf);
748 do {
749 ncb_print_blk(blk);
750 blk = ncb_blk_next(buf, blk);
751 } while (!ncb_is_null_blk(blk));
752
753 ncbuf_printf("\n");
754}
755
756static void ncbuf_print_buf(struct ncbuf *b, ncb_sz_t len,
757 unsigned char *area, int line)
758{
759 int i;
760
761 ncbuf_printf("buffer status at line %d\n", line);
762 for (i = 0; i < len; ++i) {
763 ncbuf_printf("%02x.", area[i]);
764 if (i && i % 32 == 31) ncbuf_printf("\n");
765 else if (i && i % 8 == 7) ncbuf_printf(" ");
766 }
767 ncbuf_printf("\n");
768
769 ncb_loop(b);
770
771 if (ncb_print)
772 getchar();
773}
774
775static struct ncbuf b;
776static unsigned char *bufarea = NULL;
777static ncb_sz_t bufsize = 16384;
778static ncb_sz_t bufhead = 15;
779
780#define NCB_INIT(buf) \
781 if ((reset)) { memset(bufarea, 0xaa, bufsize); } \
782 ncb_init(buf, bufhead); \
783 ncbuf_print_buf(&b, bufsize, bufarea, __LINE__);
784
785#define NCB_ADD_EQ(buf, off, data, sz, mode, ret) \
786 BUG_ON(ncb_add((buf), (off), (data), (sz), (mode)) != (ret)); \
787 ncbuf_print_buf(buf, bufsize, bufarea, __LINE__);
788
789#define NCB_ADD_NEQ(buf, off, data, sz, mode, ret) \
790 BUG_ON(ncb_add((buf), (off), (data), (sz), (mode)) == (ret)); \
791 ncbuf_print_buf(buf, bufsize, bufarea, __LINE__);
792
793#define NCB_ADVANCE_EQ(buf, off, ret) \
794 BUG_ON(ncb_advance((buf), (off)) != (ret)); \
795 ncbuf_print_buf(buf, bufsize, bufarea, __LINE__);
796
797#define NCB_TOTAL_DATA_EQ(buf, data) \
798 BUG_ON(ncb_total_data((buf)) != (data));
799
800#define NCB_DATA_EQ(buf, off, data) \
801 BUG_ON(ncb_data((buf), (off)) != (data));
802
803static int ncbuf_test(ncb_sz_t head, int reset, int print_delay)
804{
805 char *data0, data1[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
806 struct list list = LIST_HEAD_INIT(list);
807 struct rand_off *roff, *roff_tmp;
808 enum ncb_ret ret;
809
810 data0 = malloc(bufsize);
811 memset(data0, 0xff, bufsize);
812
813 bufarea = malloc(bufsize);
814
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200815 fprintf(stderr, "running unit tests\n");
816
817 b = NCBUF_NULL;
818 BUG_ON(!ncb_is_null(&b));
819 NCB_DATA_EQ(&b, 0, 0);
820 NCB_TOTAL_DATA_EQ(&b, 0);
821 BUG_ON(ncb_size(&b) != 0);
822 BUG_ON(!ncb_is_empty(&b));
823 BUG_ON(ncb_is_full(&b));
824
Amaury Denoyellef46393a2022-05-16 11:09:05 +0200825 b.area = (char *)bufarea;
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200826 b.size = bufsize;
827 b.head = head;
828 NCB_INIT(&b);
829
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200830 /* insertion test suite */
831 NCB_INIT(&b);
832 NCB_DATA_EQ(&b, 0, 0); NCB_DATA_EQ(&b, bufsize - NCB_RESERVED_SZ - 1, 0); /* first and last offset */
833 NCB_ADD_EQ(&b, 24, data0, 9, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 24, 9);
834 /* insert new data at the same offset as old */
835 NCB_ADD_EQ(&b, 24, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 24, 16);
836
837 NCB_INIT(&b); NCB_DATA_EQ(&b, 0, 0);
838 NCB_ADD_EQ(&b, 0, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 16);
839 NCB_ADD_EQ(&b, 24, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 16);
840 /* insert data overlapping two data blocks and a gap */
841 NCB_ADD_EQ(&b, 12, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 40);
842
843 NCB_INIT(&b);
844 NCB_ADD_EQ(&b, 32, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 0); NCB_DATA_EQ(&b, 16, 0); NCB_DATA_EQ(&b, 32, 16);
845 NCB_ADD_EQ(&b, 0, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 16); NCB_DATA_EQ(&b, 16, 0); NCB_DATA_EQ(&b, 32, 16);
846 /* insert data to exactly cover a gap between two data blocks */
847 NCB_ADD_EQ(&b, 16, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 48); NCB_DATA_EQ(&b, 16, 32); NCB_DATA_EQ(&b, 32, 16);
848
849 NCB_INIT(&b);
850 NCB_ADD_EQ(&b, 0, data0, 8, NCB_ADD_PRESERVE, NCB_RET_OK);
851 /* this insertion must be rejected because of minimal gap size */
852 NCB_ADD_EQ(&b, 10, data0, 8, NCB_ADD_PRESERVE, NCB_RET_GAP_SIZE);
853
854 /* Test reduced gap support */
855 NCB_INIT(&b);
856 /* this insertion will form a reduced gap */
857 NCB_ADD_EQ(&b, 0, data0, bufsize - (NCB_GAP_MIN_SZ - 1), NCB_ADD_COMPARE, NCB_RET_OK);
858
859 /* Test the various insertion mode */
860 NCB_INIT(&b);
861 NCB_ADD_EQ(&b, 10, data1, 16, NCB_ADD_PRESERVE, NCB_RET_OK);
862 NCB_ADD_EQ(&b, 12, data1, 16, NCB_ADD_COMPARE, NCB_RET_DATA_REJ);
863 NCB_ADD_EQ(&b, 12, data1, 16, NCB_ADD_PRESERVE, NCB_RET_OK); BUG_ON(*ncb_peek(&b, 12) != data1[2]);
864 NCB_ADD_EQ(&b, 12, data1, 16, NCB_ADD_OVERWRT, NCB_RET_OK); BUG_ON(*ncb_peek(&b, 12) == data1[2]);
865
866 /* advance test suite */
867 NCB_INIT(&b);
868 NCB_ADVANCE_EQ(&b, 10, NCB_RET_OK); /* advance in an empty buffer; this ensures we do not leave an empty DATA in the middle of the buffer */
869 NCB_ADVANCE_EQ(&b, ncb_size(&b) - 2, NCB_RET_OK);
870
871 NCB_INIT(&b);
872 /* first fill the buffer */
873 NCB_ADD_EQ(&b, 0, data0, bufsize - NCB_RESERVED_SZ, NCB_ADD_COMPARE, NCB_RET_OK);
874 /* delete 2 bytes : a reduced gap must be created */
875 NCB_ADVANCE_EQ(&b, 2, NCB_RET_OK); NCB_TOTAL_DATA_EQ(&b, ncb_size(&b) - 2);
876 /* delete 1 byte : extend the reduced gap */
877 NCB_ADVANCE_EQ(&b, 1, NCB_RET_OK); NCB_TOTAL_DATA_EQ(&b, ncb_size(&b) - 3);
878 /* delete 5 bytes : a full gap must be present */
879 NCB_ADVANCE_EQ(&b, 5, NCB_RET_OK); NCB_TOTAL_DATA_EQ(&b, ncb_size(&b) - 8);
880 /* completely clear the buffer */
881 NCB_ADVANCE_EQ(&b, bufsize - NCB_RESERVED_SZ, NCB_RET_OK); NCB_TOTAL_DATA_EQ(&b, 0);
882
883
884 NCB_INIT(&b);
885 NCB_ADD_EQ(&b, 10, data0, 10, NCB_ADD_PRESERVE, NCB_RET_OK);
886 NCB_ADVANCE_EQ(&b, 2, NCB_RET_OK); /* reduce a gap in front of the buffer */
887 NCB_ADVANCE_EQ(&b, 1, NCB_RET_GAP_SIZE); /* reject */
888 NCB_ADVANCE_EQ(&b, 8, NCB_RET_OK); /* remove completely the gap */
889 NCB_ADVANCE_EQ(&b, 8, NCB_RET_OK); /* remove inside the data */
890 NCB_ADVANCE_EQ(&b, 10, NCB_RET_OK); /* remove completely the data */
891
892 fprintf(stderr, "first random pass\n");
893 NCB_INIT(&b);
894
895 /* generate randon data offsets until the buffer is full */
896 while (!ncb_is_full(&b)) {
897 roff = ncb_generate_rand_off(&b);
898 LIST_INSERT(&list, &roff->el);
899
900 ret = ncb_add(&b, roff->off, data0, roff->len, NCB_ADD_COMPARE);
901 BUG_ON(ret == NCB_RET_DATA_REJ);
902 ncbuf_print_buf(&b, bufsize, bufarea, __LINE__);
903 usleep(print_delay);
904 }
905
906 fprintf(stderr, "buf full, prepare for reverse random\n");
907 ncbuf_print_buf(&b, bufsize, bufarea, __LINE__);
908
909 /* insert the previously generated random offsets in the reverse order.
910 * At the end, the buffer should be full.
911 */
912 NCB_INIT(&b);
913 list_for_each_entry_safe(roff, roff_tmp, &list, el) {
914 int full = ncb_is_full(&b);
915 if (!full) {
916 ret = ncb_add(&b, roff->off, data0, roff->len, NCB_ADD_COMPARE);
917 BUG_ON(ret == NCB_RET_DATA_REJ);
918 ncbuf_print_buf(&b, bufsize, bufarea, __LINE__);
919 usleep(print_delay);
920 }
921
922 LIST_DELETE(&roff->el);
923 free(roff);
924 }
925
926 if (!ncb_is_full(&b))
927 abort();
928
929 fprintf(stderr, "done\n");
930
931 free(bufarea);
932 free(data0);
933
934 return 1;
935}
936
937int main(int argc, char **argv)
938{
939 int reset = 0;
940 int print_delay = 100000;
941 char c;
942
943 opterr = 0;
944 while ((c = getopt(argc, argv, "h:s:rp::")) != -1) {
945 switch (c) {
946 case 'h':
947 bufhead = atoi(optarg);
948 break;
949 case 's':
950 bufsize = atoi(optarg);
951 if (bufsize < 64) {
952 fprintf(stderr, "bufsize should be at least 64 bytes for unit test suite\n");
953 exit(127);
954 }
955 break;
956 case 'r':
957 reset = 1;
958 break;
959 case 'p':
960 if (optarg)
961 print_delay = atoi(optarg);
962 ncb_print = 1;
963 break;
964 case '?':
965 default:
966 fprintf(stderr, "usage: %s [-r] [-s bufsize] [-h bufhead] [-p <delay_msec>]\n", argv[0]);
967 exit(127);
968 }
969 }
970
971 ncbuf_test(bufhead, reset, print_delay);
972 return EXIT_SUCCESS;
973}
974
975#endif /* STANDALONE */