blob: ed1878bf8138a3d78d97efe80717962c377e4aca [file] [log] [blame]
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +02001#include <haproxy/ncbuf.h>
2
Amaury Denoyelle077e0962022-05-09 09:43:11 +02003#include <string.h>
4
5#ifndef MIN
6#define MIN(a, b) (((a) < (b)) ? (a) : (b))
7#endif
8
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +02009#ifdef STANDALONE
10#include <stdarg.h>
11#include <stdlib.h>
12#include <stdio.h>
13#include <unistd.h>
14
15#include <haproxy/list.h>
16#endif /* STANDALONE */
17
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +020018#ifdef DEBUG_DEV
19# include <haproxy/bug.h>
20#else
21# include <stdio.h>
22# include <stdlib.h>
23
24# undef BUG_ON
25# define BUG_ON(x) if (x) { fprintf(stderr, "CRASH ON %s:%d\n", __func__, __LINE__); abort(); }
26
27# undef BUG_ON_HOT
28# define BUG_ON_HOT(x) if (x) { fprintf(stderr, "CRASH ON %s:%d\n", __func__, __LINE__); abort(); }
29#endif /* DEBUG_DEV */
30
31/* ******** internal API ******** */
32
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +020033#define NCB_BLK_NULL ((struct ncb_blk){ .st = NULL })
34
35#define NCB_BK_F_GAP 0x01 /* block represents a gap */
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +020036#define NCB_BK_F_FIN 0x02 /* special reduced gap present at the end of the buffer */
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +020037struct ncb_blk {
38 char *st; /* first byte of the block */
39 char *end; /* first byte after this block */
40
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +020041 char *sz_ptr; /* pointer to size element - NULL for reduced gap */
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +020042 ncb_sz_t sz; /* size of the block */
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +020043 ncb_sz_t sz_data; /* size of the data following the block - invalid for reduced GAP */
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +020044 ncb_sz_t off; /* offset of block in buffer */
45
46 char flag;
47};
48
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +020049/* Return pointer to <off> relative to <buf> head. Support buffer wrapping. */
50static char *ncb_peek(const struct ncbuf *buf, ncb_sz_t off)
51{
52 char *ptr = ncb_head(buf) + off;
53 if (ptr >= buf->area + buf->size)
54 ptr -= buf->size;
55 return ptr;
56}
57
58/* Returns the reserved space of <buf> which contains the size of the first
59 * data block.
60 */
61static char *ncb_reserved(const struct ncbuf *buf)
62{
63 return ncb_peek(buf, buf->size - NCB_RESERVED_SZ);
64}
65
66/* Encode <off> at <st> position in <buf>. Support wrapping. */
67static void ncb_write_off(const struct ncbuf *buf, char *st, ncb_sz_t off)
68{
69 int i;
70
71 BUG_ON_HOT(st >= buf->area + buf->size);
72
73 for (i = 0; i < sizeof(ncb_sz_t); ++i) {
74 (*st) = off >> (8 * i) & 0xff;
75
76 if ((++st) == ncb_wrap(buf))
77 st = ncb_orig(buf);
78 }
79}
80
81/* Decode offset stored at <st> position in <buf>. Support wrapping. */
82static ncb_sz_t ncb_read_off(const struct ncbuf *buf, char *st)
83{
84 int i;
85 ncb_sz_t off = 0;
86
87 BUG_ON_HOT(st >= buf->area + buf->size);
88
89 for (i = 0; i < sizeof(ncb_sz_t); ++i) {
90 off |= (unsigned char )(*st) << (8 * i);
91
92 if ((++st) == ncb_wrap(buf))
93 st = ncb_orig(buf);
94 }
95
96 return off;
97}
98
Amaury Denoyelle077e0962022-05-09 09:43:11 +020099/* Add <off> to the offset stored at <st> in <buf>. Support wrapping. */
100static void ncb_inc_off(const struct ncbuf *buf, char *st, ncb_sz_t off)
101{
102 const ncb_sz_t old = ncb_read_off(buf, st);
103 ncb_write_off(buf, st, old + off);
104}
105
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +0200106/* Returns true if a gap cannot be inserted at <off> : a reduced gap must be used. */
107static int ncb_off_reduced(const struct ncbuf *b, ncb_sz_t off)
108{
109 return off + NCB_GAP_MIN_SZ > ncb_size(b);
110}
111
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200112/* Returns true if <blk> is the special NULL block. */
113static int ncb_blk_is_null(const struct ncb_blk blk)
114{
115 return !blk.st;
116}
117
118/* Returns true if <blk> is the last block of <buf>. */
119static int ncb_blk_is_last(const struct ncbuf *buf, const struct ncb_blk blk)
120{
121 BUG_ON_HOT(blk.off + blk.sz > ncb_size(buf));
122 return blk.off + blk.sz == ncb_size(buf);
123}
124
125/* Returns the first block of <buf> which is always a DATA. */
126static struct ncb_blk ncb_blk_first(const struct ncbuf *buf)
127{
128 struct ncb_blk blk;
129
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200130 if (ncb_is_null(buf))
131 return NCB_BLK_NULL;
132
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200133 blk.st = ncb_head(buf);
134
135 blk.sz_ptr = ncb_reserved(buf);
136 blk.sz = ncb_read_off(buf, ncb_reserved(buf));
Amaury Denoyelleb5ca9432022-05-13 15:33:50 +0200137 blk.sz_data = 0;
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200138 BUG_ON_HOT(blk.sz > ncb_size(buf));
139
140 blk.end = ncb_peek(buf, blk.sz);
141 blk.off = 0;
142 blk.flag = 0;
143
144 return blk;
145}
146
147/* Returns the block following <prev> in the buffer <buf>. */
148static struct ncb_blk ncb_blk_next(const struct ncbuf *buf,
149 const struct ncb_blk prev)
150{
151 struct ncb_blk blk;
152
153 BUG_ON_HOT(ncb_blk_is_null(prev));
154
155 if (ncb_blk_is_last(buf, prev))
156 return NCB_BLK_NULL;
157
158 blk.st = prev.end;
159 blk.off = prev.off + prev.sz;
160 blk.flag = ~prev.flag & NCB_BK_F_GAP;
161
162 if (blk.flag & NCB_BK_F_GAP) {
Amaury Denoyelleedeb0a62022-05-04 16:16:39 +0200163 if (ncb_off_reduced(buf, blk.off)) {
164 blk.flag |= NCB_BK_F_FIN;
165 blk.sz_ptr = NULL;
166 blk.sz = ncb_size(buf) - blk.off;
167 blk.sz_data = 0;
168
169 /* A reduced gap can only be the last block. */
170 BUG_ON_HOT(!ncb_blk_is_last(buf, blk));
171 }
172 else {
173 blk.sz_ptr = ncb_peek(buf, blk.off + NCB_GAP_SZ_OFF);
174 blk.sz = ncb_read_off(buf, blk.sz_ptr);
175 blk.sz_data = ncb_read_off(buf, ncb_peek(buf, blk.off + NCB_GAP_SZ_DATA_OFF));
176 BUG_ON_HOT(blk.sz < NCB_GAP_MIN_SZ);
177 }
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200178 }
179 else {
180 blk.sz_ptr = ncb_peek(buf, prev.off + NCB_GAP_SZ_DATA_OFF);
181 blk.sz = prev.sz_data;
182 blk.sz_data = 0;
183
184 /* only first DATA block can be empty. If this happens, a GAP
185 * merge should have been realized.
186 */
187 BUG_ON_HOT(!blk.sz);
188 }
189
190 BUG_ON_HOT(blk.off + blk.sz > ncb_size(buf));
191 blk.end = ncb_peek(buf, blk.off + blk.sz);
192
193 return blk;
194}
195
196/* Returns the block containing offset <off>. Note that if <off> is at the
197 * frontier between two blocks, this function will return the preceding one.
198 * This is done to easily merge blocks on insertion/deletion.
199 */
200static struct ncb_blk ncb_blk_find(const struct ncbuf *buf, ncb_sz_t off)
201{
202 struct ncb_blk blk;
203
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200204 if (ncb_is_null(buf))
205 return NCB_BLK_NULL;
206
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200207 BUG_ON_HOT(off >= ncb_size(buf));
208
209 for (blk = ncb_blk_first(buf); off > blk.off + blk.sz;
210 blk = ncb_blk_next(buf, blk)) {
211 }
212
213 return blk;
214}
215
216/* Transform absolute offset <off> to a relative one from <blk> start. */
217static ncb_sz_t ncb_blk_off(const struct ncb_blk blk, ncb_sz_t off)
218{
219 BUG_ON_HOT(off < blk.off || off > blk.off + blk.sz);
220 BUG_ON_HOT(off - blk.off > blk.sz);
221 return off - blk.off;
222}
223
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200224/* Simulate insertion in <buf> of <data> of length <len> at offset <off>. This
225 * ensures that minimal block size are respected for newly formed gaps. <blk>
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200226 * must be the block where the insert operation begins. If <mode> is
227 * NCB_ADD_COMPARE, old and new overlapped data are compared to validate the
228 * insertion.
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200229 *
230 * Returns NCB_RET_OK if insertion can proceed.
231 */
232static enum ncb_ret ncb_check_insert(const struct ncbuf *buf,
233 struct ncb_blk blk, ncb_sz_t off,
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200234 const char *data, ncb_sz_t len,
235 enum ncb_add_mode mode)
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200236{
237 ncb_sz_t off_blk = ncb_blk_off(blk, off);
238 ncb_sz_t to_copy;
239 ncb_sz_t left = len;
240
241 /* If insertion starts in a gap, it must leave enough space to keep the
242 * gap header.
243 */
244 if (left && (blk.flag & NCB_BK_F_GAP)) {
245 if (off_blk < NCB_GAP_MIN_SZ)
246 return NCB_RET_GAP_SIZE;
247 }
248
249 while (left) {
250 off_blk = ncb_blk_off(blk, off);
251 to_copy = MIN(left, blk.sz - off_blk);
252
253 if (blk.flag & NCB_BK_F_GAP && off_blk + to_copy < blk.sz) {
254 /* Insertion must leave enough space for a new gap
255 * header if stopped in a middle of a gap.
256 */
257 const ncb_sz_t gap_sz = blk.sz - (off_blk + to_copy);
258 if (gap_sz < NCB_GAP_MIN_SZ && !ncb_blk_is_last(buf, blk))
259 return NCB_RET_GAP_SIZE;
260 }
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200261 else if (!(blk.flag & NCB_BK_F_GAP) && mode == NCB_ADD_COMPARE) {
262 /* Compare memory of data block in NCB_ADD_COMPARE mode. */
263 const ncb_sz_t off_blk = ncb_blk_off(blk, off);
264 char *st = ncb_peek(buf, off);
265
266 to_copy = MIN(left, blk.sz - off_blk);
267 if (st + to_copy > ncb_wrap(buf)) {
268 const ncb_sz_t sz1 = ncb_wrap(buf) - st;
269 if (memcmp(st, data, sz1))
270 return NCB_RET_DATA_REJ;
271 if (memcmp(ncb_orig(buf), data + sz1, to_copy - sz1))
272 return NCB_RET_DATA_REJ;
273 }
274 else {
275 if (memcmp(st, data, to_copy))
276 return NCB_RET_DATA_REJ;
277 }
278 }
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200279
280 left -= to_copy;
281 data += to_copy;
282 off += to_copy;
283
284 blk = ncb_blk_next(buf, blk);
285 }
286
287 return NCB_RET_OK;
288}
289
290/* Fill new <data> of length <len> inside an already existing data <blk> at
291 * offset <off>. Offset is relative to <blk> so it cannot be greater than the
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200292 * block size. <mode> specifies if old data are preserved or overwritten.
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200293 */
294static ncb_sz_t ncb_fill_data_blk(const struct ncbuf *buf,
295 struct ncb_blk blk, ncb_sz_t off,
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200296 const char *data, ncb_sz_t len,
297 enum ncb_add_mode mode)
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200298{
299 const ncb_sz_t to_copy = MIN(len, blk.sz - off);
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200300 char *ptr = NULL;
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200301
302 BUG_ON_HOT(off > blk.sz);
303 /* This can happens due to previous ncb_blk_find() usage. In this
304 * case the current fill is a noop.
305 */
306 if (off == blk.sz)
307 return 0;
308
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200309 if (mode == NCB_ADD_OVERWRT) {
310 ptr = ncb_peek(buf, blk.off + off);
311
312 if (ptr + to_copy >= ncb_wrap(buf)) {
313 const ncb_sz_t sz1 = ncb_wrap(buf) - ptr;
314 memcpy(ptr, data, sz1);
315 memcpy(ncb_orig(buf), data + sz1, to_copy - sz1);
316 }
317 else {
318 memcpy(ptr, data, to_copy);
319 }
320 }
321
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200322 return to_copy;
323}
324
325/* Fill the gap <blk> starting at <off> with new <data> of length <len>. <off>
326 * is relative to <blk> so it cannot be greater than the block size.
327 */
328static ncb_sz_t ncb_fill_gap_blk(const struct ncbuf *buf,
329 struct ncb_blk blk, ncb_sz_t off,
330 const char *data, ncb_sz_t len)
331{
332 const ncb_sz_t to_copy = MIN(len, blk.sz - off);
333 char *ptr;
334
335 BUG_ON_HOT(off > blk.sz);
336 /* This can happens due to previous ncb_blk_find() usage. In this
337 * case the current fill is a noop.
338 */
339 if (off == blk.sz)
340 return 0;
341
342 /* A new gap must be created if insertion stopped before gap end. */
343 if (off + to_copy < blk.sz) {
344 const ncb_sz_t gap_off = blk.off + off + to_copy;
345 const ncb_sz_t gap_sz = blk.sz - off - to_copy;
346
347 BUG_ON_HOT(!ncb_off_reduced(buf, gap_off) &&
348 blk.off + blk.sz - gap_off < NCB_GAP_MIN_SZ);
349
350 /* write the new gap header unless this is a reduced gap. */
351 if (!ncb_off_reduced(buf, gap_off)) {
352 char *gap_ptr = ncb_peek(buf, gap_off + NCB_GAP_SZ_OFF);
353 char *gap_data_ptr = ncb_peek(buf, gap_off + NCB_GAP_SZ_DATA_OFF);
354
355 ncb_write_off(buf, gap_ptr, gap_sz);
356 ncb_write_off(buf, gap_data_ptr, blk.sz_data);
357 }
358 }
359
360 /* fill the gap with new data */
361 ptr = ncb_peek(buf, blk.off + off);
362 if (ptr + to_copy >= ncb_wrap(buf)) {
363 ncb_sz_t sz1 = ncb_wrap(buf) - ptr;
364 memcpy(ptr, data, sz1);
365 memcpy(ncb_orig(buf), data + sz1, to_copy - sz1);
366 }
367 else {
368 memcpy(ptr, data, to_copy);
369 }
370
371 return to_copy;
372}
373
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200374/* ******** public API ******** */
375
376int ncb_is_null(const struct ncbuf *buf)
377{
378 return buf->size == 0;
379}
380
381/* Initialize or reset <buf> by clearing all data. Its size is untouched.
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200382 * Buffer is positioned to <head> offset. Use 0 to realign it. <buf> must not
383 * be NCBUF_NULL.
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200384 */
385void ncb_init(struct ncbuf *buf, ncb_sz_t head)
386{
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200387 BUG_ON_HOT(ncb_is_null(buf));
388
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200389 BUG_ON_HOT(head >= buf->size);
390 buf->head = head;
391
392 ncb_write_off(buf, ncb_reserved(buf), 0);
393 ncb_write_off(buf, ncb_head(buf), ncb_size(buf));
394 ncb_write_off(buf, ncb_peek(buf, sizeof(ncb_sz_t)), 0);
395}
396
397/* Construct a ncbuf with all its parameters. */
398struct ncbuf ncb_make(char *area, ncb_sz_t size, ncb_sz_t head)
399{
400 struct ncbuf buf;
401
402 /* Ensure that there is enough space for the reserved space and data.
403 * This is the minimal value to not crash later.
404 */
405 BUG_ON_HOT(size <= NCB_RESERVED_SZ);
406
407 buf.area = area;
408 buf.size = size;
409 buf.head = head;
410
411 return buf;
412}
413
414/* Returns start of allocated buffer area. */
415char *ncb_orig(const struct ncbuf *buf)
416{
417 return buf->area;
418}
419
420/* Returns current head pointer into buffer area. */
421char *ncb_head(const struct ncbuf *buf)
422{
423 return buf->area + buf->head;
424}
425
426/* Returns the first byte after the allocated buffer area. */
427char *ncb_wrap(const struct ncbuf *buf)
428{
429 return buf->area + buf->size;
430}
431
432/* Returns the usable size of <buf> for data storage. This is the size of the
433 * allocated buffer without the reserved header space.
434 */
435ncb_sz_t ncb_size(const struct ncbuf *buf)
436{
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200437 if (ncb_is_null(buf))
438 return 0;
439
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200440 return buf->size - NCB_RESERVED_SZ;
441}
442
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200443/* Returns the total number of bytes stored in whole <buf>. */
444ncb_sz_t ncb_total_data(const struct ncbuf *buf)
445{
446 struct ncb_blk blk;
447 int total = 0;
448
449 for (blk = ncb_blk_first(buf); !ncb_blk_is_null(blk); blk = ncb_blk_next(buf, blk)) {
450 if (!(blk.flag & NCB_BK_F_GAP))
451 total += blk.sz;
452 }
453
454 return total;
455}
456
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200457/* Returns true if there is no data anywhere in <buf>. */
458int ncb_is_empty(const struct ncbuf *buf)
459{
Amaury Denoyellef6dbdc12022-05-17 18:52:22 +0200460 int first_data, first_gap;
461
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200462 if (ncb_is_null(buf))
463 return 1;
464
Amaury Denoyellef6dbdc12022-05-17 18:52:22 +0200465 first_data = ncb_read_off(buf, ncb_reserved(buf));
466 BUG_ON_HOT(first_data > ncb_size(buf));
467 /* Buffer is not empty if first data block is not nul. */
468 if (first_data)
469 return 0;
470
471 /* Head contains the first gap size if first data block is empty. */
472 first_gap = ncb_read_off(buf, ncb_head(buf));
473 BUG_ON_HOT(first_gap > ncb_size(buf));
474 return first_gap == ncb_size(buf);
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200475}
476
477/* Returns true if no more data can be inserted in <buf>. */
478int ncb_is_full(const struct ncbuf *buf)
479{
Amaury Denoyellef6dbdc12022-05-17 18:52:22 +0200480 int first_data;
481
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200482 if (ncb_is_null(buf))
483 return 0;
484
Amaury Denoyellef6dbdc12022-05-17 18:52:22 +0200485 /* First data block must cover whole buffer if full. */
486 first_data = ncb_read_off(buf, ncb_reserved(buf));
487 BUG_ON_HOT(first_data > ncb_size(buf));
488 return first_data == ncb_size(buf);
Amaury Denoyelle1b5f77f2022-05-09 09:37:27 +0200489}
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200490
Amaury Denoyellee0a92a72022-07-01 14:45:41 +0200491/* Returns true if <buf> contains data fragmented by gaps. */
492int ncb_is_fragmented(const struct ncbuf *buf)
493{
494 struct ncb_blk data, gap;
495
496 if (ncb_is_null(buf))
497 return 0;
498
499 /* check if buffer is empty or full */
500 if (ncb_is_empty(buf) || ncb_is_full(buf))
501 return 0;
502
503 /* check that following gap is the last block */
504 data = ncb_blk_first(buf);
505 gap = ncb_blk_next(buf, data);
506 return !ncb_blk_is_last(buf, gap);
507}
508
Ilya Shipitsin3b64a282022-07-29 22:26:53 +0500509/* Returns the number of bytes of data available in <buf> starting at offset
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200510 * <off> until the next gap or the buffer end. The counted data may wrapped if
511 * the buffer storage is not aligned.
512 */
513ncb_sz_t ncb_data(const struct ncbuf *buf, ncb_sz_t off)
514{
Amaury Denoyelle1194db22022-05-31 11:44:25 +0200515 struct ncb_blk blk;
516 ncb_sz_t off_blk;
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200517
Amaury Denoyelle1194db22022-05-31 11:44:25 +0200518 if (ncb_is_null(buf))
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200519 return 0;
520
Amaury Denoyelle1194db22022-05-31 11:44:25 +0200521 blk = ncb_blk_find(buf, off);
522 off_blk = ncb_blk_off(blk, off);
523
Amaury Denoyelled5d2ed92022-05-09 09:38:45 +0200524 /* if <off> at the frontier between two and <blk> is gap, retrieve the
525 * next data block.
526 */
527 if (blk.flag & NCB_BK_F_GAP && off_blk == blk.sz &&
528 !ncb_blk_is_last(buf, blk)) {
529 blk = ncb_blk_next(buf, blk);
530 off_blk = ncb_blk_off(blk, off);
531 }
532
533 if (blk.flag & NCB_BK_F_GAP)
534 return 0;
535
536 return blk.sz - off_blk;
537}
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200538
539/* Add a new block at <data> of size <len> in <buf> at offset <off>.
540 *
541 * Returns NCB_RET_OK on success. On error the following codes are returned :
542 * - NCB_RET_GAP_SIZE : cannot add data because the gap formed is too small
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200543 * - NCB_RET_DATA_REJ : old data would be overwritten by different ones in
544 * NCB_ADD_COMPARE mode.
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200545 */
546enum ncb_ret ncb_add(struct ncbuf *buf, ncb_sz_t off,
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200547 const char *data, ncb_sz_t len, enum ncb_add_mode mode)
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200548{
549 struct ncb_blk blk;
550 ncb_sz_t left = len;
551 enum ncb_ret ret;
552 char *new_sz;
553
554 if (!len)
555 return NCB_RET_OK;
556
557 BUG_ON_HOT(off + len > ncb_size(buf));
558
559 /* Get block where insertion begins. */
560 blk = ncb_blk_find(buf, off);
561
562 /* Check if insertion is possible. */
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200563 ret = ncb_check_insert(buf, blk, off, data, len, mode);
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200564 if (ret != NCB_RET_OK)
565 return ret;
566
567 if (blk.flag & NCB_BK_F_GAP) {
568 /* Reduce gap size if insertion begins in a gap. Gap data size
569 * is reset and will be recalculated during insertion.
570 */
571 const ncb_sz_t gap_sz = off - blk.off;
572 BUG_ON_HOT(gap_sz < NCB_GAP_MIN_SZ);
573
574 /* pointer to data size to increase. */
575 new_sz = ncb_peek(buf, blk.off + NCB_GAP_SZ_DATA_OFF);
576
577 ncb_write_off(buf, blk.sz_ptr, gap_sz);
578 ncb_write_off(buf, new_sz, 0);
579 }
580 else {
581 /* pointer to data size to increase. */
582 new_sz = blk.sz_ptr;
583 }
584
585 /* insert data */
586 while (left) {
587 struct ncb_blk next;
588 const ncb_sz_t off_blk = ncb_blk_off(blk, off);
589 ncb_sz_t done;
590
591 /* retrieve the next block. This is necessary to do this
Ilya Shipitsin3b64a282022-07-29 22:26:53 +0500592 * before overwriting a gap.
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200593 */
594 next = ncb_blk_next(buf, blk);
595
596 if (blk.flag & NCB_BK_F_GAP) {
597 done = ncb_fill_gap_blk(buf, blk, off_blk, data, left);
598
599 /* update the inserted data block size */
600 if (off + done == blk.off + blk.sz) {
601 /* merge next data block if insertion reached gap end */
602 ncb_inc_off(buf, new_sz, done + blk.sz_data);
603 }
604 else {
605 /* insertion stopped before gap end */
606 ncb_inc_off(buf, new_sz, done);
607 }
608 }
609 else {
Amaury Denoyelleb830f0d2022-05-09 11:59:15 +0200610 done = ncb_fill_data_blk(buf, blk, off_blk, data, left, mode);
Amaury Denoyelle077e0962022-05-09 09:43:11 +0200611 }
612
613 BUG_ON_HOT(done > blk.sz || done > left);
614 left -= done;
615 data += done;
616 off += done;
617
618 blk = next;
619 }
620
621 return NCB_RET_OK;
622}
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200623
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200624/* Advance the head of <buf> to the offset <adv>. Data at the start of buffer
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200625 * will be lost while some space will be formed at the end to be able to insert
626 * new data.
627 *
Amaury Denoyelle7f0295f2022-11-18 15:54:40 +0100628 * Returns NCB_RET_OK on success. It may return NCB_RET_GAP_SIZE if operation
629 * is rejected due to the formation of a too small gap in front. If advance is
630 * done only inside a data block it is guaranteed to succeed.
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200631 */
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200632enum ncb_ret ncb_advance(struct ncbuf *buf, ncb_sz_t adv)
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200633{
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200634 struct ncb_blk start, last;
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200635 ncb_sz_t off_blk;
636 ncb_sz_t first_data_sz;
637
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200638 BUG_ON_HOT(adv > ncb_size(buf));
639 if (!adv)
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200640 return NCB_RET_OK;
641
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200642 /* Special case if adv is full size. This is equivalent to a reset. */
643 if (adv == ncb_size(buf)) {
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200644 ncb_init(buf, buf->head);
645 return NCB_RET_OK;
646 }
647
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200648 start = ncb_blk_find(buf, adv);
649
650 /* Special case if advance until the last block which is a GAP. The
651 * buffer will be left empty and is thus equivalent to a reset.
652 */
653 if (ncb_blk_is_last(buf, start) && (start.flag & NCB_BK_F_GAP)) {
654 ncb_sz_t new_head = buf->head + adv;
655 if (new_head >= buf->size)
656 new_head -= buf->size;
657
658 ncb_init(buf, new_head);
659 return NCB_RET_OK;
660 }
661
662 last = start;
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200663 while (!ncb_blk_is_last(buf, last))
664 last = ncb_blk_next(buf, last);
665
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200666 off_blk = ncb_blk_off(start, adv);
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200667
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200668 if (start.flag & NCB_BK_F_GAP) {
669 /* If advance in a GAP, its new size must be big enough. */
670 if (start.sz == off_blk) {
671 /* GAP removed. Buffer will start with following DATA block. */
672 first_data_sz = start.sz_data;
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200673 }
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200674 else if (start.sz - off_blk < NCB_GAP_MIN_SZ) {
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200675 return NCB_RET_GAP_SIZE;
676 }
677 else {
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200678 /* Buffer will start with this GAP block. */
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200679 first_data_sz = 0;
680 }
681 }
682 else {
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200683 /* If off_blk less than start.sz, the data block will becomes the
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200684 * first block. If equal, the data block is completely removed
685 * and thus the following GAP will be the first block.
686 */
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200687 first_data_sz = start.sz - off_blk;
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200688 }
689
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200690 if (last.flag & NCB_BK_F_GAP) {
691 /* Extend last GAP unless this is a reduced gap. */
692 if (!(last.flag & NCB_BK_F_FIN) || last.sz + adv >= NCB_GAP_MIN_SZ) {
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200693 /* use .st instead of .sz_ptr which can be NULL if reduced gap */
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200694 ncb_write_off(buf, last.st, last.sz + adv);
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200695 ncb_write_off(buf, ncb_peek(buf, last.off + NCB_GAP_SZ_DATA_OFF), 0);
696 }
697 }
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200698 else {
699 /* Insert a GAP after the last DATA block. */
700 if (adv >= NCB_GAP_MIN_SZ) {
701 ncb_write_off(buf, ncb_peek(buf, last.off + last.sz + NCB_GAP_SZ_OFF), adv);
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200702 ncb_write_off(buf, ncb_peek(buf, last.off + last.sz + NCB_GAP_SZ_DATA_OFF), 0);
703 }
704 }
705
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200706 /* Advance head and update reserved header with new first data size. */
707 buf->head += adv;
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200708 if (buf->head >= buf->size)
709 buf->head -= buf->size;
710 ncb_write_off(buf, ncb_reserved(buf), first_data_sz);
711
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200712 /* If advance in a GAP, reduce its size. */
713 if (start.flag & NCB_BK_F_GAP && !first_data_sz) {
714 ncb_write_off(buf, ncb_head(buf), start.sz - off_blk);
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200715 /* Recopy the block sz_data at the new position. */
Amaury Denoyelleca21c762022-05-17 18:52:39 +0200716 ncb_write_off(buf, ncb_peek(buf, NCB_GAP_SZ_DATA_OFF), start.sz_data);
Amaury Denoyelledf25acf2022-05-04 16:47:09 +0200717 }
718
719 return NCB_RET_OK;
720}
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200721
722/* ******** testing API ******** */
723/* To build it :
Amaury Denoyellef46393a2022-05-16 11:09:05 +0200724 * gcc -Wall -DSTANDALONE -lasan -I./include -o ncbuf src/ncbuf.c
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200725 */
726#ifdef STANDALONE
727
728int ncb_print = 0;
729
730static void ncbuf_printf(char *str, ...)
731{
732 va_list args;
733
734 va_start(args, str);
735 if (ncb_print)
736 vfprintf(stderr, str, args);
737 va_end(args);
738}
739
740struct rand_off {
741 struct list el;
742 ncb_sz_t off;
743 ncb_sz_t len;
744};
745
746static struct rand_off *ncb_generate_rand_off(const struct ncbuf *buf)
747{
748 struct rand_off *roff;
Tim Duesterhus9fb57e82022-06-01 21:58:37 +0200749 roff = calloc(1, sizeof(*roff));
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200750 BUG_ON(!roff);
751
752 roff->off = rand() % (ncb_size(buf));
753 if (roff->off > 0 && roff->off < NCB_GAP_MIN_SZ)
754 roff->off = 0;
755
756 roff->len = rand() % (ncb_size(buf) - roff->off + 1);
757
758 return roff;
759}
760
761static void ncb_print_blk(const struct ncb_blk blk)
762{
763 if (ncb_print) {
Amaury Denoyellef46393a2022-05-16 11:09:05 +0200764 fprintf(stderr, "%s(%s): %2u/%u.\n",
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200765 blk.flag & NCB_BK_F_GAP ? "GAP " : "DATA",
766 blk.flag & NCB_BK_F_FIN ? "F" : "-", blk.off, blk.sz);
767 }
768}
769
770static inline int ncb_is_null_blk(const struct ncb_blk blk)
771{
772 return !blk.st;
773}
774
775static void ncb_loop(const struct ncbuf *buf)
776{
777 struct ncb_blk blk;
778
779 blk = ncb_blk_first(buf);
780 do {
781 ncb_print_blk(blk);
782 blk = ncb_blk_next(buf, blk);
783 } while (!ncb_is_null_blk(blk));
784
785 ncbuf_printf("\n");
786}
787
788static void ncbuf_print_buf(struct ncbuf *b, ncb_sz_t len,
789 unsigned char *area, int line)
790{
791 int i;
792
793 ncbuf_printf("buffer status at line %d\n", line);
794 for (i = 0; i < len; ++i) {
795 ncbuf_printf("%02x.", area[i]);
796 if (i && i % 32 == 31) ncbuf_printf("\n");
797 else if (i && i % 8 == 7) ncbuf_printf(" ");
798 }
799 ncbuf_printf("\n");
800
801 ncb_loop(b);
802
803 if (ncb_print)
804 getchar();
805}
806
807static struct ncbuf b;
808static unsigned char *bufarea = NULL;
809static ncb_sz_t bufsize = 16384;
810static ncb_sz_t bufhead = 15;
811
812#define NCB_INIT(buf) \
813 if ((reset)) { memset(bufarea, 0xaa, bufsize); } \
814 ncb_init(buf, bufhead); \
815 ncbuf_print_buf(&b, bufsize, bufarea, __LINE__);
816
817#define NCB_ADD_EQ(buf, off, data, sz, mode, ret) \
818 BUG_ON(ncb_add((buf), (off), (data), (sz), (mode)) != (ret)); \
819 ncbuf_print_buf(buf, bufsize, bufarea, __LINE__);
820
821#define NCB_ADD_NEQ(buf, off, data, sz, mode, ret) \
822 BUG_ON(ncb_add((buf), (off), (data), (sz), (mode)) == (ret)); \
823 ncbuf_print_buf(buf, bufsize, bufarea, __LINE__);
824
825#define NCB_ADVANCE_EQ(buf, off, ret) \
826 BUG_ON(ncb_advance((buf), (off)) != (ret)); \
827 ncbuf_print_buf(buf, bufsize, bufarea, __LINE__);
828
829#define NCB_TOTAL_DATA_EQ(buf, data) \
830 BUG_ON(ncb_total_data((buf)) != (data));
831
832#define NCB_DATA_EQ(buf, off, data) \
833 BUG_ON(ncb_data((buf), (off)) != (data));
834
835static int ncbuf_test(ncb_sz_t head, int reset, int print_delay)
836{
837 char *data0, data1[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
838 struct list list = LIST_HEAD_INIT(list);
839 struct rand_off *roff, *roff_tmp;
840 enum ncb_ret ret;
841
842 data0 = malloc(bufsize);
843 memset(data0, 0xff, bufsize);
844
845 bufarea = malloc(bufsize);
846
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200847 fprintf(stderr, "running unit tests\n");
848
849 b = NCBUF_NULL;
850 BUG_ON(!ncb_is_null(&b));
851 NCB_DATA_EQ(&b, 0, 0);
852 NCB_TOTAL_DATA_EQ(&b, 0);
853 BUG_ON(ncb_size(&b) != 0);
854 BUG_ON(!ncb_is_empty(&b));
855 BUG_ON(ncb_is_full(&b));
Amaury Denoyellee0a92a72022-07-01 14:45:41 +0200856 BUG_ON(ncb_is_fragmented(&b));
Amaury Denoyelle48fbad42022-05-16 11:09:29 +0200857
Amaury Denoyellef46393a2022-05-16 11:09:05 +0200858 b.area = (char *)bufarea;
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200859 b.size = bufsize;
860 b.head = head;
861 NCB_INIT(&b);
862
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200863 /* insertion test suite */
864 NCB_INIT(&b);
865 NCB_DATA_EQ(&b, 0, 0); NCB_DATA_EQ(&b, bufsize - NCB_RESERVED_SZ - 1, 0); /* first and last offset */
866 NCB_ADD_EQ(&b, 24, data0, 9, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 24, 9);
867 /* insert new data at the same offset as old */
868 NCB_ADD_EQ(&b, 24, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 24, 16);
869
870 NCB_INIT(&b); NCB_DATA_EQ(&b, 0, 0);
871 NCB_ADD_EQ(&b, 0, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 16);
Amaury Denoyellee0a92a72022-07-01 14:45:41 +0200872 BUG_ON(ncb_is_fragmented(&b));
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200873 NCB_ADD_EQ(&b, 24, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 16);
Amaury Denoyellee0a92a72022-07-01 14:45:41 +0200874 BUG_ON(!ncb_is_fragmented(&b));
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200875 /* insert data overlapping two data blocks and a gap */
876 NCB_ADD_EQ(&b, 12, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 40);
Amaury Denoyellee0a92a72022-07-01 14:45:41 +0200877 BUG_ON(ncb_is_fragmented(&b));
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200878
879 NCB_INIT(&b);
880 NCB_ADD_EQ(&b, 32, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 0); NCB_DATA_EQ(&b, 16, 0); NCB_DATA_EQ(&b, 32, 16);
Amaury Denoyellee0a92a72022-07-01 14:45:41 +0200881 BUG_ON(!ncb_is_fragmented(&b));
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200882 NCB_ADD_EQ(&b, 0, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 16); NCB_DATA_EQ(&b, 16, 0); NCB_DATA_EQ(&b, 32, 16);
Amaury Denoyellee0a92a72022-07-01 14:45:41 +0200883 BUG_ON(!ncb_is_fragmented(&b));
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200884 /* insert data to exactly cover a gap between two data blocks */
885 NCB_ADD_EQ(&b, 16, data0, 16, NCB_ADD_PRESERVE, NCB_RET_OK); NCB_DATA_EQ(&b, 0, 48); NCB_DATA_EQ(&b, 16, 32); NCB_DATA_EQ(&b, 32, 16);
Amaury Denoyellee0a92a72022-07-01 14:45:41 +0200886 BUG_ON(ncb_is_fragmented(&b));
Amaury Denoyelleeeeeed42022-05-04 16:51:19 +0200887
888 NCB_INIT(&b);
889 NCB_ADD_EQ(&b, 0, data0, 8, NCB_ADD_PRESERVE, NCB_RET_OK);
890 /* this insertion must be rejected because of minimal gap size */
891 NCB_ADD_EQ(&b, 10, data0, 8, NCB_ADD_PRESERVE, NCB_RET_GAP_SIZE);
892
893 /* Test reduced gap support */
894 NCB_INIT(&b);
895 /* this insertion will form a reduced gap */
896 NCB_ADD_EQ(&b, 0, data0, bufsize - (NCB_GAP_MIN_SZ - 1), NCB_ADD_COMPARE, NCB_RET_OK);
897
898 /* Test the various insertion mode */
899 NCB_INIT(&b);
900 NCB_ADD_EQ(&b, 10, data1, 16, NCB_ADD_PRESERVE, NCB_RET_OK);
901 NCB_ADD_EQ(&b, 12, data1, 16, NCB_ADD_COMPARE, NCB_RET_DATA_REJ);
902 NCB_ADD_EQ(&b, 12, data1, 16, NCB_ADD_PRESERVE, NCB_RET_OK); BUG_ON(*ncb_peek(&b, 12) != data1[2]);
903 NCB_ADD_EQ(&b, 12, data1, 16, NCB_ADD_OVERWRT, NCB_RET_OK); BUG_ON(*ncb_peek(&b, 12) == data1[2]);
904
905 /* advance test suite */
906 NCB_INIT(&b);
907 NCB_ADVANCE_EQ(&b, 10, NCB_RET_OK); /* advance in an empty buffer; this ensures we do not leave an empty DATA in the middle of the buffer */
908 NCB_ADVANCE_EQ(&b, ncb_size(&b) - 2, NCB_RET_OK);
909
910 NCB_INIT(&b);
911 /* first fill the buffer */
912 NCB_ADD_EQ(&b, 0, data0, bufsize - NCB_RESERVED_SZ, NCB_ADD_COMPARE, NCB_RET_OK);
913 /* delete 2 bytes : a reduced gap must be created */
914 NCB_ADVANCE_EQ(&b, 2, NCB_RET_OK); NCB_TOTAL_DATA_EQ(&b, ncb_size(&b) - 2);
915 /* delete 1 byte : extend the reduced gap */
916 NCB_ADVANCE_EQ(&b, 1, NCB_RET_OK); NCB_TOTAL_DATA_EQ(&b, ncb_size(&b) - 3);
917 /* delete 5 bytes : a full gap must be present */
918 NCB_ADVANCE_EQ(&b, 5, NCB_RET_OK); NCB_TOTAL_DATA_EQ(&b, ncb_size(&b) - 8);
919 /* completely clear the buffer */
920 NCB_ADVANCE_EQ(&b, bufsize - NCB_RESERVED_SZ, NCB_RET_OK); NCB_TOTAL_DATA_EQ(&b, 0);
921
922
923 NCB_INIT(&b);
924 NCB_ADD_EQ(&b, 10, data0, 10, NCB_ADD_PRESERVE, NCB_RET_OK);
925 NCB_ADVANCE_EQ(&b, 2, NCB_RET_OK); /* reduce a gap in front of the buffer */
926 NCB_ADVANCE_EQ(&b, 1, NCB_RET_GAP_SIZE); /* reject */
927 NCB_ADVANCE_EQ(&b, 8, NCB_RET_OK); /* remove completely the gap */
928 NCB_ADVANCE_EQ(&b, 8, NCB_RET_OK); /* remove inside the data */
929 NCB_ADVANCE_EQ(&b, 10, NCB_RET_OK); /* remove completely the data */
930
931 fprintf(stderr, "first random pass\n");
932 NCB_INIT(&b);
933
934 /* generate randon data offsets until the buffer is full */
935 while (!ncb_is_full(&b)) {
936 roff = ncb_generate_rand_off(&b);
937 LIST_INSERT(&list, &roff->el);
938
939 ret = ncb_add(&b, roff->off, data0, roff->len, NCB_ADD_COMPARE);
940 BUG_ON(ret == NCB_RET_DATA_REJ);
941 ncbuf_print_buf(&b, bufsize, bufarea, __LINE__);
942 usleep(print_delay);
943 }
944
945 fprintf(stderr, "buf full, prepare for reverse random\n");
946 ncbuf_print_buf(&b, bufsize, bufarea, __LINE__);
947
948 /* insert the previously generated random offsets in the reverse order.
949 * At the end, the buffer should be full.
950 */
951 NCB_INIT(&b);
952 list_for_each_entry_safe(roff, roff_tmp, &list, el) {
953 int full = ncb_is_full(&b);
954 if (!full) {
955 ret = ncb_add(&b, roff->off, data0, roff->len, NCB_ADD_COMPARE);
956 BUG_ON(ret == NCB_RET_DATA_REJ);
957 ncbuf_print_buf(&b, bufsize, bufarea, __LINE__);
958 usleep(print_delay);
959 }
960
961 LIST_DELETE(&roff->el);
962 free(roff);
963 }
964
965 if (!ncb_is_full(&b))
966 abort();
967
968 fprintf(stderr, "done\n");
969
970 free(bufarea);
971 free(data0);
972
973 return 1;
974}
975
976int main(int argc, char **argv)
977{
978 int reset = 0;
979 int print_delay = 100000;
980 char c;
981
982 opterr = 0;
983 while ((c = getopt(argc, argv, "h:s:rp::")) != -1) {
984 switch (c) {
985 case 'h':
986 bufhead = atoi(optarg);
987 break;
988 case 's':
989 bufsize = atoi(optarg);
990 if (bufsize < 64) {
991 fprintf(stderr, "bufsize should be at least 64 bytes for unit test suite\n");
992 exit(127);
993 }
994 break;
995 case 'r':
996 reset = 1;
997 break;
998 case 'p':
999 if (optarg)
1000 print_delay = atoi(optarg);
1001 ncb_print = 1;
1002 break;
1003 case '?':
1004 default:
1005 fprintf(stderr, "usage: %s [-r] [-s bufsize] [-h bufhead] [-p <delay_msec>]\n", argv[0]);
1006 exit(127);
1007 }
1008 }
1009
1010 ncbuf_test(bufhead, reset, print_delay);
1011 return EXIT_SUCCESS;
1012}
1013
1014#endif /* STANDALONE */