blob: e5af410449658c27bd54a2208d87a656b2397901 [file] [log] [blame]
Christopher Fauleta3d2a162018-10-22 08:59:39 +02001/*
2 * internal HTTP message
3 *
4 * Copyright 2018 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreauc13ed532020-06-02 10:22:45 +020013#include <haproxy/chunk.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020014#include <haproxy/htx.h>
Christopher Fauleta3d2a162018-10-22 08:59:39 +020015
Christopher Faulet192c6a22019-06-11 16:32:24 +020016struct htx htx_empty = { .size = 0, .data = 0, .head = -1, .tail = -1, .first = -1 };
Christopher Fauleta3d2a162018-10-22 08:59:39 +020017
Christopher Faulet3b219722019-06-19 13:48:09 +020018/* Defragments an HTX message. It removes unused blocks and unwraps the payloads
Christopher Faulet159873a2021-06-09 17:30:40 +020019 * part. A temporary buffer is used to do so. This function never fails. Most of
20 * time, we need keep a ref on a specific HTX block. Thus is <blk> is set, the
21 * pointer on its new position, after defrag, is returned. In addition, if the
22 * size of the block must be altered, <blkinfo> info must be provided (!=
23 * 0). But in this case, it remains the caller responsibility to update the
24 * block content.
Christopher Fauleta3d2a162018-10-22 08:59:39 +020025 */
26/* TODO: merge data blocks into one */
Christopher Faulet159873a2021-06-09 17:30:40 +020027struct htx_blk *htx_defrag(struct htx *htx, struct htx_blk *blk, uint32_t blkinfo)
Christopher Fauleta3d2a162018-10-22 08:59:39 +020028{
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010029 struct buffer *chunk = get_trash_chunk();
30 struct htx *tmp = htxbuf(chunk);
31 struct htx_blk *newblk, *oldblk;
Christopher Faulet200f8952019-01-02 11:23:44 +010032 uint32_t new, old, blkpos;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010033 uint32_t addr, blksz;
Christopher Faulet29f17582019-05-23 11:03:26 +020034 int32_t first = -1;
Christopher Fauleta3d2a162018-10-22 08:59:39 +020035
Christopher Faulet192c6a22019-06-11 16:32:24 +020036 if (htx->head == -1)
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010037 return NULL;
Christopher Fauleta3d2a162018-10-22 08:59:39 +020038
Christopher Faulet200f8952019-01-02 11:23:44 +010039 blkpos = -1;
40
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010041 new = 0;
42 addr = 0;
43 tmp->size = htx->size;
Christopher Faulet159873a2021-06-09 17:30:40 +020044 tmp->data = 0;
Christopher Fauleta3d2a162018-10-22 08:59:39 +020045
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010046 /* start from the head */
47 for (old = htx_get_head(htx); old != -1; old = htx_get_next(htx, old)) {
48 oldblk = htx_get_blk(htx, old);
Christopher Faulet28f29c72019-04-30 17:55:45 +020049 if (htx_get_blk_type(oldblk) == HTX_BLK_UNUSED)
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010050 continue;
Christopher Fauleta3d2a162018-10-22 08:59:39 +020051
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010052 blksz = htx_get_blksz(oldblk);
Christopher Faulet159873a2021-06-09 17:30:40 +020053 memcpy((void *)tmp->blocks + addr, htx_get_blk_ptr(htx, oldblk), blksz);
Christopher Fauleta3d2a162018-10-22 08:59:39 +020054
Christopher Faulet9c66b982019-04-30 18:08:26 +020055 /* update the start-line position */
Christopher Faulet29f17582019-05-23 11:03:26 +020056 if (htx->first == old)
57 first = new;
Christopher Faulet174bfb12018-12-06 14:31:12 +010058
Christopher Faulet159873a2021-06-09 17:30:40 +020059 newblk = htx_get_blk(tmp, new);
60 newblk->addr = addr;
61 newblk->info = oldblk->info;
62
Christopher Faulet3b219722019-06-19 13:48:09 +020063 /* if <blk> is defined, save its new position */
Christopher Faulet159873a2021-06-09 17:30:40 +020064 if (blk != NULL && blk == oldblk) {
65 if (blkinfo)
66 newblk->info = blkinfo;
Christopher Faulet200f8952019-01-02 11:23:44 +010067 blkpos = new;
Christopher Faulet159873a2021-06-09 17:30:40 +020068 }
Christopher Faulet200f8952019-01-02 11:23:44 +010069
Christopher Faulet159873a2021-06-09 17:30:40 +020070 blksz = htx_get_blksz(newblk);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010071 addr += blksz;
Christopher Faulet159873a2021-06-09 17:30:40 +020072 tmp->data += blksz;
73 new++;
Christopher Fauletb8fd4c02019-05-20 09:32:25 +020074 }
Christopher Fauleta3d2a162018-10-22 08:59:39 +020075
Christopher Faulet159873a2021-06-09 17:30:40 +020076 htx->data = tmp->data;
Christopher Faulet29f17582019-05-23 11:03:26 +020077 htx->first = first;
Christopher Faulet28f29c72019-04-30 17:55:45 +020078 htx->head = 0;
Christopher Fauletd7884d32019-06-11 10:40:43 +020079 htx->tail = new - 1;
80 htx->head_addr = htx->end_addr = 0;
81 htx->tail_addr = addr;
Christopher Faulet95bf88d2021-09-21 15:39:30 +020082 htx->flags &= ~HTX_FL_FRAGMENTED;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010083 memcpy((void *)htx->blocks, (void *)tmp->blocks, htx->size);
Christopher Fauleta3d2a162018-10-22 08:59:39 +020084
Christopher Faulet200f8952019-01-02 11:23:44 +010085 return ((blkpos == -1) ? NULL : htx_get_blk(htx, blkpos));
Christopher Fauleta3d2a162018-10-22 08:59:39 +020086}
87
Christopher Faulet3b219722019-06-19 13:48:09 +020088/* Degragments HTX blocks of an HTX message. Payloads part is keep untouched
89 * here. This function will move back all blocks starting at the position 0,
90 * removing unused blocks. It must never be called with an empty message.
91 */
Christopher Fauletd7884d32019-06-11 10:40:43 +020092static void htx_defrag_blks(struct htx *htx)
93{
94 int32_t pos, new;
95
96 new = 0;
97 for (pos = htx_get_head(htx); pos != -1; pos = htx_get_next(htx, pos)) {
98 struct htx_blk *posblk, *newblk;
99
100 if (pos == new) {
101 new++;
102 continue;
103 }
104
105 posblk = htx_get_blk(htx, pos);
106 if (htx_get_blk_type(posblk) == HTX_BLK_UNUSED)
107 continue;
108
109 if (htx->first == pos)
110 htx->first = new;
111 newblk = htx_get_blk(htx, new++);
112 newblk->info = posblk->info;
113 newblk->addr = posblk->addr;
114 }
115 BUG_ON(!new);
116 htx->head = 0;
117 htx->tail = new - 1;
118}
119
Christopher Faulet3b219722019-06-19 13:48:09 +0200120/* Reserves a new block in the HTX message <htx> with a content of <blksz>
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200121 * bytes. If there is not enough space, NULL is returned. Otherwise the reserved
Christopher Faulet3b219722019-06-19 13:48:09 +0200122 * block is returned and the HTX message is updated. Space for this new block is
123 * reserved in the HTX message. But it is the caller responsibility to set right
124 * info in the block to reflect the stored data.
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200125 */
126static struct htx_blk *htx_reserve_nxblk(struct htx *htx, uint32_t blksz)
127{
Christopher Fauletd7884d32019-06-11 10:40:43 +0200128 struct htx_blk *blk;
Christopher Faulet192c6a22019-06-11 16:32:24 +0200129 uint32_t tail, headroom, tailroom;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200130
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100131 if (blksz > htx_free_data_space(htx))
132 return NULL; /* full */
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200133
Christopher Faulet192c6a22019-06-11 16:32:24 +0200134 if (htx->head == -1) {
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100135 /* Empty message */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200136 htx->head = htx->tail = htx->first = 0;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100137 blk = htx_get_blk(htx, htx->tail);
138 blk->addr = 0;
139 htx->data = blksz;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200140 htx->tail_addr = blksz;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100141 return blk;
142 }
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200143
Christopher Fauletd7884d32019-06-11 10:40:43 +0200144 /* Find the block's position. First, we try to get the next position in
145 * the message, increasing the tail by one. If this position is not
146 * available with some holes, we try to defrag the blocks without
147 * touching their paylood. If it is impossible, we fully defrag the
148 * message.
149 */
Christopher Faulet28f29c72019-04-30 17:55:45 +0200150 tail = htx->tail + 1;
Christopher Faulet2bf43f02019-06-12 11:28:11 +0200151 if (htx_pos_to_addr(htx, tail) >= htx->tail_addr)
Christopher Faulet192c6a22019-06-11 16:32:24 +0200152 ;
153 else if (htx->head > 0) {
Christopher Fauletd7884d32019-06-11 10:40:43 +0200154 htx_defrag_blks(htx);
155 tail = htx->tail + 1;
Christopher Faulet2bf43f02019-06-12 11:28:11 +0200156 BUG_ON(htx_pos_to_addr(htx, tail) < htx->tail_addr);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100157 }
Christopher Fauletd7884d32019-06-11 10:40:43 +0200158 else
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100159 goto defrag;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200160
Thayne McCombs8f0cc5c2021-01-07 21:35:52 -0700161 /* Now, we have found the block's position. Try to find where to put its
Christopher Fauletd7884d32019-06-11 10:40:43 +0200162 * payload. The free space is split in two areas:
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100163 *
Thayne McCombs8f0cc5c2021-01-07 21:35:52 -0700164 * * The free space in front of the blocks table. This one is used if and
165 * only if the other one was not used yet.
Christopher Fauletd7884d32019-06-11 10:40:43 +0200166 *
167 * * The free space at the beginning of the message. Once this one is
Thayne McCombs8f0cc5c2021-01-07 21:35:52 -0700168 * used, the other one is never used again, until the next defrag.
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100169 */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200170 headroom = (htx->end_addr - htx->head_addr);
Christopher Faulet2bf43f02019-06-12 11:28:11 +0200171 tailroom = (!htx->head_addr ? htx_pos_to_addr(htx, tail) - htx->tail_addr : 0);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200172 BUG_ON((int32_t)headroom < 0);
173 BUG_ON((int32_t)tailroom < 0);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200174
Christopher Fauletd7884d32019-06-11 10:40:43 +0200175 if (blksz <= tailroom) {
176 blk = htx_get_blk(htx, tail);
177 blk->addr = htx->tail_addr;
178 htx->tail_addr += blksz;
179 }
180 else if (blksz <= headroom) {
181 blk = htx_get_blk(htx, tail);
182 blk->addr = htx->head_addr;
183 htx->head_addr += blksz;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100184 }
185 else {
Christopher Fauletd7884d32019-06-11 10:40:43 +0200186 defrag:
Christopher Faulet3b219722019-06-19 13:48:09 +0200187 /* need to defragment the message before inserting upfront */
Christopher Faulet159873a2021-06-09 17:30:40 +0200188 htx_defrag(htx, NULL, 0);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200189 tail = htx->tail + 1;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200190 blk = htx_get_blk(htx, tail);
191 blk->addr = htx->tail_addr;
192 htx->tail_addr += blksz;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100193 }
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200194
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100195 htx->tail = tail;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100196 htx->data += blksz;
Christopher Faulet29f17582019-05-23 11:03:26 +0200197 /* Set first position if not already set */
198 if (htx->first == -1)
199 htx->first = tail;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200200
201 BUG_ON((int32_t)htx->tail_addr < 0);
202 BUG_ON((int32_t)htx->head_addr < 0);
203 BUG_ON(htx->end_addr > htx->tail_addr);
204 BUG_ON(htx->head_addr > htx->end_addr);
205
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100206 return blk;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200207}
208
Christopher Fauletd7884d32019-06-11 10:40:43 +0200209/* Prepares the block to an expansion of its payload. The payload will be
210 * expanded by <delta> bytes and we need find where this expansion will be
211 * performed. It can be a compression if <delta> is negative. This function only
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500212 * updates all addresses. The caller have the responsibility to perform the
Christopher Faulet3b219722019-06-19 13:48:09 +0200213 * expansion and update the block and the HTX message accordingly. No error must
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500214 * occur. It returns following values:
Christopher Fauletd7884d32019-06-11 10:40:43 +0200215 *
216 * 0: The expansion cannot be performed, there is not enough space.
217 *
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500218 * 1: the expansion must be performed in place, there is enough space after
Christopher Fauletd7884d32019-06-11 10:40:43 +0200219 * the block's payload to handle it. This is especially true if it is a
220 * compression and not an expension.
221 *
222 * 2: the block's payload must be moved at the new block address before doing
223 * the expansion.
224 *
225 * 3: the HTX message message must be defragmented
226 */
227static int htx_prepare_blk_expansion(struct htx *htx, struct htx_blk *blk, int32_t delta)
228{
229 uint32_t sz, tailroom, headroom;
230 int ret = 3;
231
Christopher Faulet192c6a22019-06-11 16:32:24 +0200232 BUG_ON(htx->head == -1);
233
Christopher Fauletd7884d32019-06-11 10:40:43 +0200234 headroom = (htx->end_addr - htx->head_addr);
Christopher Faulet2bf43f02019-06-12 11:28:11 +0200235 tailroom = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200236 BUG_ON((int32_t)headroom < 0);
237 BUG_ON((int32_t)tailroom < 0);
238
239 sz = htx_get_blksz(blk);
240 if (delta <= 0) {
241 /* It is a compression, it can be performed in place */
242 if (blk->addr+sz == htx->tail_addr)
243 htx->tail_addr += delta;
244 else if (blk->addr+sz == htx->head_addr)
245 htx->head_addr += delta;
246 ret = 1;
247 }
248 else if (delta > htx_free_space(htx)) {
Ilya Shipitsin46a030c2020-07-05 16:36:08 +0500249 /* There is not enough space to handle the expansion */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200250 ret = 0;
251 }
252 else if (blk->addr+sz == htx->tail_addr) {
253 /* The block's payload is just before the tail room */
254 if (delta < tailroom) {
255 /* Expand the block's payload */
256 htx->tail_addr += delta;
257 ret = 1;
258 }
259 else if ((sz + delta) < headroom) {
Christopher Faulet61ed7792019-07-29 10:50:28 +0200260 uint32_t oldaddr = blk->addr;
261
Christopher Fauletd7884d32019-06-11 10:40:43 +0200262 /* Move the block's payload into the headroom */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200263 blk->addr = htx->head_addr;
264 htx->tail_addr -= sz;
265 htx->head_addr += sz + delta;
Christopher Faulet61ed7792019-07-29 10:50:28 +0200266 if (oldaddr == htx->end_addr) {
Christopher Faulet8c654862019-06-12 11:08:11 +0200267 if (htx->end_addr == htx->tail_addr) {
268 htx->tail_addr = htx->head_addr;
269 htx->head_addr = htx->end_addr = 0;
270 }
271 else
272 htx->end_addr += sz;
273 }
Christopher Fauletd7884d32019-06-11 10:40:43 +0200274 ret = 2;
275 }
276 }
277 else if (blk->addr+sz == htx->head_addr) {
278 /* The block's payload is just before the head room */
279 if (delta < headroom) {
280 /* Expand the block's payload */
281 htx->head_addr += delta;
282 ret = 1;
283 }
284 }
285 else {
286 /* The block's payload is not at the rooms edge */
287 if (!htx->head_addr && sz+delta < tailroom) {
288 /* Move the block's payload into the tailroom */
289 if (blk->addr == htx->end_addr)
290 htx->end_addr += sz;
291 blk->addr = htx->tail_addr;
292 htx->tail_addr += sz + delta;
293 ret = 2;
294 }
295 else if (sz+delta < headroom) {
296 /* Move the block's payload into the headroom */
297 if (blk->addr == htx->end_addr)
298 htx->end_addr += sz;
299 blk->addr = htx->head_addr;
300 htx->head_addr += sz + delta;
301 ret = 2;
302 }
303 }
304 /* Otherwise defrag the HTX message */
305
306 BUG_ON((int32_t)htx->tail_addr < 0);
307 BUG_ON((int32_t)htx->head_addr < 0);
308 BUG_ON(htx->end_addr > htx->tail_addr);
309 BUG_ON(htx->head_addr > htx->end_addr);
310 return ret;
311}
312
Christopher Faulet3b219722019-06-19 13:48:09 +0200313/* Adds a new block of type <type> in the HTX message <htx>. Its content size is
314 * passed but it is the caller responsibility to do the copy.
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200315 */
316struct htx_blk *htx_add_blk(struct htx *htx, enum htx_blk_type type, uint32_t blksz)
317{
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100318 struct htx_blk *blk;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200319
Willy Tarreau86cb2cd2021-08-26 16:07:22 +0200320 BUG_ON(blksz >= 256 << 20);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100321 blk = htx_reserve_nxblk(htx, blksz);
322 if (!blk)
323 return NULL;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200324 BUG_ON(blk->addr > htx->size);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200325
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100326 blk->info = (type << 28);
327 return blk;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200328}
329
Christopher Faulet3b219722019-06-19 13:48:09 +0200330/* Removes the block <blk> from the HTX message <htx>. The function returns the
331 * block following <blk> or NULL if <blk> is the last block or the last inserted
332 * one.
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200333 */
334struct htx_blk *htx_remove_blk(struct htx *htx, struct htx_blk *blk)
335{
Christopher Fauletd7884d32019-06-11 10:40:43 +0200336 enum htx_blk_type type;
337 uint32_t pos, addr, sz;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200338
Christopher Fauletda435f42022-02-28 15:29:56 +0100339 BUG_ON(!blk || htx->head == -1);
Christopher Faulet192c6a22019-06-11 16:32:24 +0200340
Christopher Fauletd7884d32019-06-11 10:40:43 +0200341 /* This is the last block in use */
Christopher Faulet192c6a22019-06-11 16:32:24 +0200342 if (htx->head == htx->tail) {
Christopher Faulet95bf88d2021-09-21 15:39:30 +0200343 uint32_t flags = (htx->flags & ~HTX_FL_FRAGMENTED); /* Preserve flags except FRAGMENTED */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100344
Christopher Fauletd7884d32019-06-11 10:40:43 +0200345 htx_reset(htx);
Christopher Faulet95bf88d2021-09-21 15:39:30 +0200346 htx->flags = flags; /* restore flags */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200347 return NULL;
348 }
349
350 type = htx_get_blk_type(blk);
Christopher Faulet9c66b982019-04-30 18:08:26 +0200351 pos = htx_get_blk_pos(htx, blk);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200352 sz = htx_get_blksz(blk);
353 addr = blk->addr;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100354 if (type != HTX_BLK_UNUSED) {
355 /* Mark the block as unused, decrement allocated size */
356 htx->data -= htx_get_blksz(blk);
357 blk->info = ((uint32_t)HTX_BLK_UNUSED << 28);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100358 }
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200359
Christopher Faulet3b219722019-06-19 13:48:09 +0200360 /* There is at least 2 blocks, so tail is always > 0 */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200361 if (pos == htx->head) {
362 /* move the head forward */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200363 htx->head++;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100364 }
Christopher Fauletd7884d32019-06-11 10:40:43 +0200365 else if (pos == htx->tail) {
366 /* remove the tail. this was the last inserted block so
367 * return NULL. */
368 htx->tail--;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200369 blk = NULL;
370 goto end;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100371 }
Christopher Faulet95bf88d2021-09-21 15:39:30 +0200372 else
373 htx->flags |= HTX_FL_FRAGMENTED;
374
Christopher Fauletd7884d32019-06-11 10:40:43 +0200375 blk = htx_get_blk(htx, pos+1);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200376
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200377 end:
Christopher Faulet29f17582019-05-23 11:03:26 +0200378 if (pos == htx->first)
379 htx->first = (blk ? htx_get_blk_pos(htx, blk) : -1);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200380
Christopher Faulet192c6a22019-06-11 16:32:24 +0200381 if (htx->head == htx->tail) {
Christopher Fauletd7884d32019-06-11 10:40:43 +0200382 /* If there is just one block in the HTX message, free space can
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500383 * be adjusted. This operation could save some defrags. */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200384 struct htx_blk *lastblk = htx_get_blk(htx, htx->tail);
385
386 htx->head_addr = 0;
387 htx->end_addr = lastblk->addr;
388 htx->tail_addr = lastblk->addr+htx->data;
389 }
390 else {
391 if (addr+sz == htx->tail_addr)
392 htx->tail_addr = addr;
393 else if (addr+sz == htx->head_addr)
394 htx->head_addr = addr;
Christopher Faulet8c654862019-06-12 11:08:11 +0200395 if (addr == htx->end_addr) {
396 if (htx->tail_addr == htx->end_addr) {
397 htx->tail_addr = htx->head_addr;
398 htx->head_addr = htx->end_addr = 0;
399 }
400 else
401 htx->end_addr += sz;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200402 }
403 }
404
405 BUG_ON((int32_t)htx->tail_addr < 0);
406 BUG_ON((int32_t)htx->head_addr < 0);
407 BUG_ON(htx->end_addr > htx->tail_addr);
408 BUG_ON(htx->head_addr > htx->end_addr);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100409 return blk;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200410}
411
Christopher Faulet1cdceb92020-02-24 11:41:59 +0100412/* Looks for the HTX block containing the offset <offset>, starting at the HTX
413 * message's head. The function returns an htx_ret with the found HTX block and
414 * the position inside this block where the offset is. If the offset <offset> is
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500415 * outside of the HTX message, htx_ret.blk is set to NULL.
Christopher Faulet1cdceb92020-02-24 11:41:59 +0100416 */
417struct htx_ret htx_find_offset(struct htx *htx, uint32_t offset)
418{
419 struct htx_blk *blk;
420 struct htx_ret htxret = { .blk = NULL, .ret = 0 };
421
422 if (offset >= htx->data)
423 return htxret;
424
425 for (blk = htx_get_head_blk(htx); blk && offset; blk = htx_get_next_blk(htx, blk)) {
426 uint32_t sz = htx_get_blksz(blk);
427
428 if (offset < sz)
429 break;
430 offset -= sz;
431 }
432 htxret.blk = blk;
433 htxret.ret = offset;
434 return htxret;
435}
436
Christopher Faulet3b219722019-06-19 13:48:09 +0200437/* Removes all blocks after the one containing the offset <offset>. This last
438 * one may be truncated if it is a DATA block.
Christopher Faulet00cf6972019-01-07 14:53:27 +0100439 */
440void htx_truncate(struct htx *htx, uint32_t offset)
441{
442 struct htx_blk *blk;
Christopher Fauletbb76aa42020-02-24 15:09:24 +0100443 struct htx_ret htxret = htx_find_offset(htx, offset);
Christopher Faulet00cf6972019-01-07 14:53:27 +0100444
Christopher Fauletbb76aa42020-02-24 15:09:24 +0100445 blk = htxret.blk;
446 if (blk && htxret.ret && htx_get_blk_type(blk) == HTX_BLK_DATA) {
447 htx_change_blk_value_len(htx, blk, htxret.ret);
448 blk = htx_get_next_blk(htx, blk);
Christopher Faulet00cf6972019-01-07 14:53:27 +0100449 }
450 while (blk)
451 blk = htx_remove_blk(htx, blk);
452}
453
Christopher Faulet3b219722019-06-19 13:48:09 +0200454/* Drains <count> bytes from the HTX message <htx>. If the last block is a DATA
455 * block, it will be cut if necessary. Others blocks will be removed at once if
456 * <count> is large enough. The function returns an htx_ret with the first block
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500457 * remaining in the message and the amount of data drained. If everything is
Christopher Faulet3b219722019-06-19 13:48:09 +0200458 * removed, htx_ret.blk is set to NULL.
Christopher Faulet549822f2019-02-25 10:23:19 +0100459 */
460struct htx_ret htx_drain(struct htx *htx, uint32_t count)
461{
462 struct htx_blk *blk;
463 struct htx_ret htxret = { .blk = NULL, .ret = 0 };
464
Christopher Faulet0f6d6a92019-05-23 11:11:52 +0200465 if (count == htx->data) {
Christopher Faulet95bf88d2021-09-21 15:39:30 +0200466 uint32_t flags = (htx->flags & ~HTX_FL_FRAGMENTED); /* Preserve flags except FRAGMENTED */
Christopher Faulet5e9b24f2021-04-22 09:43:47 +0200467
Christopher Faulet0f6d6a92019-05-23 11:11:52 +0200468 htx_reset(htx);
Christopher Faulet5e9b24f2021-04-22 09:43:47 +0200469 htx->flags = flags; /* restore flags */
Christopher Faulet0f6d6a92019-05-23 11:11:52 +0200470 htxret.ret = count;
471 return htxret;
472 }
473
Christopher Faulet549822f2019-02-25 10:23:19 +0100474 blk = htx_get_head_blk(htx);
475 while (count && blk) {
476 uint32_t sz = htx_get_blksz(blk);
477 enum htx_blk_type type = htx_get_blk_type(blk);
478
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500479 /* Ignore unused block */
Christopher Faulet549822f2019-02-25 10:23:19 +0100480 if (type == HTX_BLK_UNUSED)
481 goto next;
482
483 if (sz > count) {
484 if (type == HTX_BLK_DATA) {
485 htx_cut_data_blk(htx, blk, count);
486 htxret.ret += count;
487 }
488 break;
489 }
490 count -= sz;
491 htxret.ret += sz;
492 next:
493 blk = htx_remove_blk(htx, blk);
494 }
495 htxret.blk = blk;
496
497 return htxret;
498}
499
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200500/* Tries to append data to the last inserted block, if the type matches and if
Willy Tarreaud4908fa2019-05-28 10:23:46 +0200501 * there is enough space to take it all. If the space wraps, the buffer is
502 * defragmented and a new block is inserted. If an error occurred, NULL is
Christopher Faulet61775092019-05-07 21:42:27 +0200503 * returned. Otherwise, on success, the updated block (or the new one) is
Willy Tarreaud4908fa2019-05-28 10:23:46 +0200504 * returned. Due to its nature this function can be expensive and should be
505 * avoided whenever possible.
506 */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200507struct htx_blk *htx_add_data_atonce(struct htx *htx, struct ist data)
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200508{
Christopher Fauletd7884d32019-06-11 10:40:43 +0200509 struct htx_blk *blk, *tailblk;
510 void *ptr;
511 uint32_t len, sz, tailroom, headroom;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200512
Christopher Faulet192c6a22019-06-11 16:32:24 +0200513 if (htx->head == -1)
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100514 goto add_new_block;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200515
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100516 /* Not enough space to store data */
517 if (data.len > htx_free_data_space(htx))
518 return NULL;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200519
Christopher Fauletd7884d32019-06-11 10:40:43 +0200520 /* get the tail block and its size */
Christopher Fauletf1449b72019-04-10 14:54:46 +0200521 tailblk = htx_get_tail_blk(htx);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200522 if (tailblk == NULL)
Christopher Fauletf1449b72019-04-10 14:54:46 +0200523 goto add_new_block;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200524 sz = htx_get_blksz(tailblk);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200525
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100526 /* Don't try to append data if the last inserted block is not of the
527 * same type */
Willy Tarreaud4908fa2019-05-28 10:23:46 +0200528 if (htx_get_blk_type(tailblk) != HTX_BLK_DATA)
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100529 goto add_new_block;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200530
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100531 /*
532 * Same type and enough space: append data
533 */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200534 headroom = (htx->end_addr - htx->head_addr);
Christopher Faulet2bf43f02019-06-12 11:28:11 +0200535 tailroom = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200536 BUG_ON((int32_t)headroom < 0);
537 BUG_ON((int32_t)tailroom < 0);
538
539 len = data.len;
540 if (tailblk->addr+sz == htx->tail_addr) {
541 if (data.len <= tailroom)
542 goto append_data;
543 else if (!htx->head_addr) {
544 len = tailroom;
545 goto append_data;
546 }
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100547 }
Christopher Fauletd7884d32019-06-11 10:40:43 +0200548 else if (tailblk->addr+sz == htx->head_addr && data.len <= headroom)
549 goto append_data;
Christopher Fauletf1449b72019-04-10 14:54:46 +0200550
Christopher Fauletd7884d32019-06-11 10:40:43 +0200551 goto add_new_block;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200552
553 append_data:
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100554 /* Append data and update the block itself */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200555 ptr = htx_get_blk_ptr(htx, tailblk);
556 memcpy(ptr+sz, data.ptr, len);
Christopher Faulet3e2638e2019-06-18 09:49:16 +0200557 htx_change_blk_value_len(htx, tailblk, sz+len);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200558
Christopher Fauletd7884d32019-06-11 10:40:43 +0200559 if (data.len == len) {
560 blk = tailblk;
561 goto end;
562 }
Tim Duesterhus154374c2021-03-02 18:57:27 +0100563 data = istadv(data, len);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200564
565 add_new_block:
Willy Tarreaud4908fa2019-05-28 10:23:46 +0200566 blk = htx_add_blk(htx, HTX_BLK_DATA, data.len);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100567 if (!blk)
568 return NULL;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200569
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100570 blk->info += data.len;
571 memcpy(htx_get_blk_ptr(htx, blk), data.ptr, data.len);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200572
573 end:
574 BUG_ON((int32_t)htx->tail_addr < 0);
575 BUG_ON((int32_t)htx->head_addr < 0);
576 BUG_ON(htx->end_addr > htx->tail_addr);
577 BUG_ON(htx->head_addr > htx->end_addr);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100578 return blk;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200579}
580
581/* Replaces a value part of a block by a new one. The new part can be smaller or
582 * larger than the old one. This function works for any kind of block with
583 * attached data. It returns the new block on success, otherwise it returns
584 * NULL.
585 */
586struct htx_blk *htx_replace_blk_value(struct htx *htx, struct htx_blk *blk,
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100587 const struct ist old, const struct ist new)
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200588{
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100589 struct ist n, v;
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100590 int32_t delta;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200591 int ret;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200592
Christopher Fauletd7884d32019-06-11 10:40:43 +0200593 n = htx_get_blk_name(htx, blk);
594 v = htx_get_blk_value(htx, blk);
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100595 delta = new.len - old.len;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200596 ret = htx_prepare_blk_expansion(htx, blk, delta);
597 if (!ret)
598 return NULL; /* not enough space */
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100599
Christopher Faulet3b219722019-06-19 13:48:09 +0200600 if (ret == 1) { /* Replace in place */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200601 if (delta <= 0) {
Christopher Faulet3b219722019-06-19 13:48:09 +0200602 /* compression: copy new data first then move the end */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200603 memcpy(old.ptr, new.ptr, new.len);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100604 memmove(old.ptr + new.len, old.ptr + old.len, (v.ptr + v.len) - (old.ptr + old.len));
Christopher Fauletd7884d32019-06-11 10:40:43 +0200605 }
606 else {
Christopher Faulet3b219722019-06-19 13:48:09 +0200607 /* expansion: move the end first then copy new data */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200608 memmove(old.ptr + new.len, old.ptr + old.len, (v.ptr + v.len) - (old.ptr + old.len));
609 memcpy(old.ptr, new.ptr, new.len);
610 }
Christopher Faulet159873a2021-06-09 17:30:40 +0200611
612 /* set the new block size and update HTX message */
613 htx_set_blk_value_len(blk, v.len + delta);
614 htx->data += delta;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100615 }
Christopher Faulet3b219722019-06-19 13:48:09 +0200616 else if (ret == 2) { /* New address but no defrag */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200617 void *ptr = htx_get_blk_ptr(htx, blk);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200618
Christopher Fauletd7884d32019-06-11 10:40:43 +0200619 /* Copy the name, if any */
620 memcpy(ptr, n.ptr, n.len);
621 ptr += n.len;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200622
Christopher Fauletd7884d32019-06-11 10:40:43 +0200623 /* Copy value before old part, if any */
624 memcpy(ptr, v.ptr, old.ptr - v.ptr);
625 ptr += old.ptr - v.ptr;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200626
Christopher Fauletd7884d32019-06-11 10:40:43 +0200627 /* Copy new value */
628 memcpy(ptr, new.ptr, new.len);
629 ptr += new.len;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200630
Christopher Fauletd7884d32019-06-11 10:40:43 +0200631 /* Copy value after old part, if any */
632 memcpy(ptr, old.ptr + old.len, (v.ptr + v.len) - (old.ptr + old.len));
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200633
Christopher Faulet159873a2021-06-09 17:30:40 +0200634 /* set the new block size and update HTX message */
635 htx_set_blk_value_len(blk, v.len + delta);
636 htx->data += delta;
637 }
638 else { /* Do a degrag first (it is always an expansion) */
639 struct htx_blk tmpblk;
640 int32_t offset;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200641
Christopher Faulet159873a2021-06-09 17:30:40 +0200642 /* use tmpblk to set new block size before defrag and to compute
643 * the offset after defrag
644 */
645 tmpblk.addr = blk->addr;
646 tmpblk.info = blk->info;
647 htx_set_blk_value_len(&tmpblk, v.len + delta);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200648
Christopher Faulet159873a2021-06-09 17:30:40 +0200649 /* htx_defrag() will take care to update the block size and the htx message */
650 blk = htx_defrag(htx, blk, tmpblk.info);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200651
Christopher Faulet159873a2021-06-09 17:30:40 +0200652 /* newblk is now the new HTX block. Compute the offset to copy/move payload */
653 offset = blk->addr - tmpblk.addr;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200654
Christopher Faulet159873a2021-06-09 17:30:40 +0200655 /* move the end first and copy new data
656 */
657 memmove(old.ptr + offset + new.len, old.ptr + offset + old.len, (v.ptr + v.len) - (old.ptr + old.len));
658 memcpy(old.ptr + offset, new.ptr, new.len);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200659 }
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100660 return blk;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200661}
662
663/* Transfer HTX blocks from <src> to <dst>, stopping on the first block of the
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100664 * type <mark> (typically EOH or EOT) or when <count> bytes were moved
Christopher Faulet156852b2019-05-16 11:29:13 +0200665 * (including payload and meta-data). It returns the number of bytes moved and
666 * the last HTX block inserted in <dst>.
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200667 */
668struct htx_ret htx_xfer_blks(struct htx *dst, struct htx *src, uint32_t count,
669 enum htx_blk_type mark)
670{
671 struct htx_blk *blk, *dstblk;
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200672 struct htx_blk *srcref, *dstref;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200673 enum htx_blk_type type;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100674 uint32_t info, max, sz, ret;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200675
Christopher Faulet156852b2019-05-16 11:29:13 +0200676 ret = htx_used_space(dst);
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200677 srcref = dstref = dstblk = NULL;
Christopher Faulet156852b2019-05-16 11:29:13 +0200678
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200679 /* blocks are not removed yet from <src> HTX message to be able to
680 * rollback the transfer if all the headers/trailers are not copied.
681 */
682 for (blk = htx_get_head_blk(src); blk && count; blk = htx_get_next_blk(src, blk)) {
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200683 type = htx_get_blk_type(blk);
684
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500685 /* Ignore unused block */
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200686 if (type == HTX_BLK_UNUSED)
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200687 continue;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200688
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200689
Christopher Faulet156852b2019-05-16 11:29:13 +0200690 max = htx_get_max_blksz(dst, count);
691 if (!max)
692 break;
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200693
694 sz = htx_get_blksz(blk);
695 info = blk->info;
Willy Tarreau90caa072019-04-09 16:21:54 +0200696 if (sz > max) {
Christopher Faulet3b219722019-06-19 13:48:09 +0200697 /* Only DATA blocks can be partially xferred */
Christopher Faulet156852b2019-05-16 11:29:13 +0200698 if (type != HTX_BLK_DATA)
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200699 break;
Christopher Faulet156852b2019-05-16 11:29:13 +0200700 sz = max;
701 info = (type << 28) + sz;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200702 }
703
704 dstblk = htx_reserve_nxblk(dst, sz);
705 if (!dstblk)
706 break;
707 dstblk->info = info;
708 memcpy(htx_get_blk_ptr(dst, dstblk), htx_get_blk_ptr(src, blk), sz);
709
Christopher Faulet156852b2019-05-16 11:29:13 +0200710 count -= sizeof(dstblk) + sz;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200711 if (blk->info != info) {
Christopher Faulet3b219722019-06-19 13:48:09 +0200712 /* Partial xfer: don't remove <blk> from <src> but
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200713 * resize its content */
Christopher Faulet156852b2019-05-16 11:29:13 +0200714 htx_cut_data_blk(src, blk, sz);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200715 break;
716 }
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200717
718 if (type == mark) {
719 blk = htx_get_next_blk(src, blk);
720 srcref = dstref = NULL;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200721 break;
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200722 }
723
724 /* Save <blk> to <srcref> and <dstblk> to <dstref> when we start
725 * to xfer headers or trailers. When EOH/EOT block is reached,
726 * both are reset. It is mandatory to be able to rollback a
727 * partial transfer.
728 */
729 if (!srcref && !dstref &&
730 (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL || type == HTX_BLK_TLR)) {
731 srcref = blk;
732 dstref = dstblk;
733 }
734 else if (type == HTX_BLK_EOH || type == HTX_BLK_EOT)
735 srcref = dstref = NULL;
736 }
737
738 if (unlikely(dstref)) {
Christopher Fauletda435f42022-02-28 15:29:56 +0100739 /* Headers or trailers part was partially xferred, so rollback
740 * the copy by removing all block between <dstref> and <dstblk>,
741 * both included. <dstblk> may be NULL.
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200742 */
743 while (dstref && dstref != dstblk)
744 dstref = htx_remove_blk(dst, dstref);
Christopher Fauletda435f42022-02-28 15:29:56 +0100745 if (dstblk)
746 htx_remove_blk(dst, dstblk);
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200747
748 /* <dst> HTX message is empty, it means the headers or trailers
749 * part is too big to be copied at once.
750 */
751 if (htx_is_empty(dst))
752 src->flags |= HTX_FL_PARSING_ERROR;
753 }
754
755 /* Now, remove xferred blocks from <src> htx message */
756 if (!blk && !srcref) {
757 /* End of src reached, all blocks were consumed, drain all data */
758 htx_drain(src, src->data);
759 }
760 else {
761 /* Remove all block from the head to <blk>, or <srcref> if defined, excluded */
762 srcref = (srcref ? srcref : blk);
763 for (blk = htx_get_head_blk(src); blk && blk != srcref; blk = htx_remove_blk(src, blk));
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200764 }
765
Christopher Faulet156852b2019-05-16 11:29:13 +0200766 end:
767 ret = htx_used_space(dst) - ret;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200768 return (struct htx_ret){.ret = ret, .blk = dstblk};
769}
770
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200771/* Replaces an header by a new one. The new header can be smaller or larger than
772 * the old one. It returns the new block on success, otherwise it returns NULL.
Willy Tarreaued00e342018-12-07 08:47:45 +0100773 * The header name is always lower cased.
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200774 */
775struct htx_blk *htx_replace_header(struct htx *htx, struct htx_blk *blk,
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100776 const struct ist name, const struct ist value)
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200777{
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100778 enum htx_blk_type type;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200779 void *ptr;
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100780 int32_t delta;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200781 int ret;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200782
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100783 type = htx_get_blk_type(blk);
784 if (type != HTX_BLK_HDR)
785 return NULL;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200786
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100787 delta = name.len + value.len - htx_get_blksz(blk);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200788 ret = htx_prepare_blk_expansion(htx, blk, delta);
789 if (!ret)
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100790 return NULL; /* not enough space */
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200791
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100792
Christopher Faulet3b219722019-06-19 13:48:09 +0200793 /* Replace in place or at a new address is the same. We replace all the
794 * header (name+value). Only take care to defrag the message if
795 * necessary. */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200796 if (ret == 3)
Christopher Faulet159873a2021-06-09 17:30:40 +0200797 blk = htx_defrag(htx, blk, (type << 28) + (value.len << 8) + name.len);
798 else {
799 /* Set the new block size and update HTX message */
800 blk->info = (type << 28) + (value.len << 8) + name.len;
801 htx->data += delta;
802 }
Christopher Fauletd7884d32019-06-11 10:40:43 +0200803
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500804 /* Finally, copy data. */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200805 ptr = htx_get_blk_ptr(htx, blk);
806 ist2bin_lc(ptr, name);
807 memcpy(ptr + name.len, value.ptr, value.len);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100808 return blk;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200809}
810
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100811/* Replaces the parts of the start-line. It returns the new start-line on
812 * success, otherwise it returns NULL. It is the caller responsibility to update
813 * sl->info, if necessary.
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200814 */
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100815struct htx_sl *htx_replace_stline(struct htx *htx, struct htx_blk *blk, const struct ist p1,
816 const struct ist p2, const struct ist p3)
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200817{
Christopher Fauletd7884d32019-06-11 10:40:43 +0200818 enum htx_blk_type type;
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100819 struct htx_sl *sl;
820 struct htx_sl tmp; /* used to save sl->info and sl->flags */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200821 uint32_t sz;
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100822 int32_t delta;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200823 int ret;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200824
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100825 type = htx_get_blk_type(blk);
Willy Tarreauc706cd72018-12-07 17:12:22 +0100826 if (type != HTX_BLK_REQ_SL && type != HTX_BLK_RES_SL)
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100827 return NULL;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200828
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100829 /* Save start-line info and flags */
830 sl = htx_get_blk_ptr(htx, blk);
831 tmp.info = sl->info;
832 tmp.flags = sl->flags;
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100833
Christopher Fauletd7884d32019-06-11 10:40:43 +0200834 sz = htx_get_blksz(blk);
835 delta = sizeof(*sl) + p1.len + p2.len + p3.len - sz;
836 ret = htx_prepare_blk_expansion(htx, blk, delta);
837 if (!ret)
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100838 return NULL; /* not enough space */
839
Christopher Faulet3b219722019-06-19 13:48:09 +0200840 /* Replace in place or at a new address is the same. We replace all the
841 * start-line. Only take care to defrag the message if necessary. */
Christopher Faulet159873a2021-06-09 17:30:40 +0200842 if (ret == 3) {
843 blk = htx_defrag(htx, blk, (type << 28) + sz + delta);
844 }
845 else {
846 /* Set the new block size and update HTX message */
847 blk->info = (type << 28) + sz + delta;
848 htx->data += delta;
849 }
Christopher Fauletd7884d32019-06-11 10:40:43 +0200850
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100851 /* Restore start-line info and flags and copy parts of the start-line */
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100852 sl = htx_get_blk_ptr(htx, blk);
853 sl->info = tmp.info;
854 sl->flags = tmp.flags;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200855
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100856 HTX_SL_P1_LEN(sl) = p1.len;
857 HTX_SL_P2_LEN(sl) = p2.len;
858 HTX_SL_P3_LEN(sl) = p3.len;
Christopher Faulet54483df2018-11-26 15:05:52 +0100859
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100860 memcpy(HTX_SL_P1_PTR(sl), p1.ptr, p1.len);
861 memcpy(HTX_SL_P2_PTR(sl), p2.ptr, p2.len);
862 memcpy(HTX_SL_P3_PTR(sl), p3.ptr, p3.len);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200863
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100864 return sl;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200865}
866
Christopher Faulete071f0e2021-02-03 12:11:31 +0100867/* Reserves the maximum possible size for an HTX data block, by extending an
868 * existing one or by creating a now one. It returns a compound result with the
869 * HTX block and the position where new data must be inserted (0 for a new
870 * block). If an error occurs or if there is no space left, NULL is returned
871 * instead of a pointer on an HTX block.
872 */
873struct htx_ret htx_reserve_max_data(struct htx *htx)
874{
875 struct htx_blk *blk, *tailblk;
876 uint32_t sz, room;
877 int32_t len = htx_free_data_space(htx);
878
879 if (htx->head == -1)
880 goto rsv_new_block;
881
882 if (!len)
883 return (struct htx_ret){.ret = 0, .blk = NULL};
884
885 /* get the tail and head block */
886 tailblk = htx_get_tail_blk(htx);
887 if (tailblk == NULL)
888 goto rsv_new_block;
889 sz = htx_get_blksz(tailblk);
890
891 /* Don't try to append data if the last inserted block is not of the
892 * same type */
893 if (htx_get_blk_type(tailblk) != HTX_BLK_DATA)
894 goto rsv_new_block;
895
896 /*
897 * Same type and enough space: append data
898 */
899 if (!htx->head_addr) {
900 if (tailblk->addr+sz != htx->tail_addr)
901 goto rsv_new_block;
902 room = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr);
903 }
904 else {
905 if (tailblk->addr+sz != htx->head_addr)
906 goto rsv_new_block;
907 room = (htx->end_addr - htx->head_addr);
908 }
909 BUG_ON((int32_t)room < 0);
910 if (room < len)
911 len = room;
912
913 append_data:
914 htx_change_blk_value_len(htx, tailblk, sz+len);
915
916 BUG_ON((int32_t)htx->tail_addr < 0);
917 BUG_ON((int32_t)htx->head_addr < 0);
918 BUG_ON(htx->end_addr > htx->tail_addr);
919 BUG_ON(htx->head_addr > htx->end_addr);
920 return (struct htx_ret){.ret = sz, .blk = tailblk};
921
922 rsv_new_block:
Christopher Faulete071f0e2021-02-03 12:11:31 +0100923 blk = htx_add_blk(htx, HTX_BLK_DATA, len);
924 if (!blk)
925 return (struct htx_ret){.ret = 0, .blk = NULL};
926 blk->info += len;
927 return (struct htx_ret){.ret = 0, .blk = blk};
928}
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200929
930/* Adds an HTX block of type DATA in <htx>. It first tries to append data if
Willy Tarreau0a7ef022019-05-28 10:30:11 +0200931 * possible. It returns the number of bytes consumed from <data>, which may be
932 * zero if nothing could be copied.
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200933 */
Willy Tarreau0a7ef022019-05-28 10:30:11 +0200934size_t htx_add_data(struct htx *htx, const struct ist data)
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200935{
Christopher Fauletd7884d32019-06-11 10:40:43 +0200936 struct htx_blk *blk, *tailblk;
937 void *ptr;
938 uint32_t sz, room;
Willy Tarreau0350b902019-05-28 10:58:50 +0200939 int32_t len = data.len;
Willy Tarreau0a7ef022019-05-28 10:30:11 +0200940
Willy Tarreau0350b902019-05-28 10:58:50 +0200941 /* Not enough space to store data */
942 if (len > htx_free_data_space(htx))
943 len = htx_free_data_space(htx);
944
945 if (!len)
946 return 0;
947
Christopher Faulet5e643bb2022-01-12 14:03:42 +0100948 if (htx->head == -1)
949 goto add_new_block;
950
Willy Tarreau0350b902019-05-28 10:58:50 +0200951 /* get the tail and head block */
952 tailblk = htx_get_tail_blk(htx);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200953 if (tailblk == NULL)
Willy Tarreau0350b902019-05-28 10:58:50 +0200954 goto add_new_block;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200955 sz = htx_get_blksz(tailblk);
Willy Tarreau0350b902019-05-28 10:58:50 +0200956
957 /* Don't try to append data if the last inserted block is not of the
958 * same type */
959 if (htx_get_blk_type(tailblk) != HTX_BLK_DATA)
960 goto add_new_block;
961
962 /*
963 * Same type and enough space: append data
964 */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200965 if (!htx->head_addr) {
966 if (tailblk->addr+sz != htx->tail_addr)
Willy Tarreau0350b902019-05-28 10:58:50 +0200967 goto add_new_block;
Christopher Faulet2bf43f02019-06-12 11:28:11 +0200968 room = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr);
Willy Tarreau0350b902019-05-28 10:58:50 +0200969 }
Christopher Fauletd7884d32019-06-11 10:40:43 +0200970 else {
971 if (tailblk->addr+sz != htx->head_addr)
972 goto add_new_block;
973 room = (htx->end_addr - htx->head_addr);
974 }
975 BUG_ON((int32_t)room < 0);
Willy Tarreau0350b902019-05-28 10:58:50 +0200976 if (room < len)
977 len = room;
978
979 append_data:
Willy Tarreau0350b902019-05-28 10:58:50 +0200980 /* Append data and update the block itself */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200981 ptr = htx_get_blk_ptr(htx, tailblk);
982 memcpy(ptr + sz, data.ptr, len);
Christopher Faulet3e2638e2019-06-18 09:49:16 +0200983 htx_change_blk_value_len(htx, tailblk, sz+len);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200984
985 BUG_ON((int32_t)htx->tail_addr < 0);
986 BUG_ON((int32_t)htx->head_addr < 0);
987 BUG_ON(htx->end_addr > htx->tail_addr);
988 BUG_ON(htx->head_addr > htx->end_addr);
Willy Tarreau0350b902019-05-28 10:58:50 +0200989 return len;
990
991 add_new_block:
Willy Tarreau0350b902019-05-28 10:58:50 +0200992 blk = htx_add_blk(htx, HTX_BLK_DATA, len);
993 if (!blk)
Willy Tarreau0a7ef022019-05-28 10:30:11 +0200994 return 0;
Willy Tarreau0350b902019-05-28 10:58:50 +0200995
996 blk->info += len;
997 memcpy(htx_get_blk_ptr(htx, blk), data.ptr, len);
998 return len;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200999}
1000
Christopher Faulet86bc8df2019-06-11 10:38:38 +02001001
1002/* Adds an HTX block of type DATA in <htx> just after all other DATA
1003 * blocks. Because it relies on htx_add_data_atonce(), It may be happened to a
1004 * DATA block if possible. But, if the function succeeds, it will be the last
1005 * DATA block in all cases. If an error occurred, NULL is returned. Otherwise,
1006 * on success, the updated block (or the new one) is returned.
1007 */
1008struct htx_blk *htx_add_last_data(struct htx *htx, struct ist data)
Christopher Faulet24ed8352018-11-22 11:20:43 +01001009{
Christopher Faulet86bc8df2019-06-11 10:38:38 +02001010 struct htx_blk *blk, *pblk;
Christopher Faulet24ed8352018-11-22 11:20:43 +01001011
Christopher Faulet86bc8df2019-06-11 10:38:38 +02001012 blk = htx_add_data_atonce(htx, data);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +01001013 if (!blk)
1014 return NULL;
Christopher Faulet24ed8352018-11-22 11:20:43 +01001015
Christopher Faulet86bc8df2019-06-11 10:38:38 +02001016 for (pblk = htx_get_prev_blk(htx, blk); pblk; pblk = htx_get_prev_blk(htx, pblk)) {
Christopher Faulet86bc8df2019-06-11 10:38:38 +02001017 if (htx_get_blk_type(pblk) <= HTX_BLK_DATA)
1018 break;
Christopher Faulet24ed8352018-11-22 11:20:43 +01001019
Christopher Faulet24ed8352018-11-22 11:20:43 +01001020 /* Swap .addr and .info fields */
1021 blk->addr ^= pblk->addr; pblk->addr ^= blk->addr; blk->addr ^= pblk->addr;
1022 blk->info ^= pblk->info; pblk->info ^= blk->info; blk->info ^= pblk->info;
1023
1024 if (blk->addr == pblk->addr)
1025 blk->addr += htx_get_blksz(pblk);
Christopher Faulet24ed8352018-11-22 11:20:43 +01001026 blk = pblk;
1027 }
Christopher Faulet05aab642019-04-11 13:43:57 +02001028
Christopher Faulet24ed8352018-11-22 11:20:43 +01001029 return blk;
1030}
Christopher Fauleta3d2a162018-10-22 08:59:39 +02001031
Christopher Faulet86fcf6d2019-06-11 10:41:19 +02001032/* Moves the block <blk> just before the block <ref>. Both blocks must be in the
1033 * HTX message <htx> and <blk> must be placed after <ref>. pointer to these
1034 * blocks are updated to remain valid after the move. */
1035void htx_move_blk_before(struct htx *htx, struct htx_blk **blk, struct htx_blk **ref)
1036{
1037 struct htx_blk *cblk, *pblk;
1038
1039 cblk = *blk;
1040 for (pblk = htx_get_prev_blk(htx, cblk); pblk; pblk = htx_get_prev_blk(htx, pblk)) {
1041 /* Swap .addr and .info fields */
1042 cblk->addr ^= pblk->addr; pblk->addr ^= cblk->addr; cblk->addr ^= pblk->addr;
1043 cblk->info ^= pblk->info; pblk->info ^= cblk->info; cblk->info ^= pblk->info;
1044
1045 if (cblk->addr == pblk->addr)
1046 cblk->addr += htx_get_blksz(pblk);
1047 if (pblk == *ref)
1048 break;
1049 cblk = pblk;
1050 }
1051 *blk = cblk;
1052 *ref = pblk;
1053}
Christopher Faulet0ea0c862020-01-23 11:47:53 +01001054
1055/* Append the HTX message <src> to the HTX message <dst>. It returns 1 on
1056 * success and 0 on error. All the message or nothing is copied. If an error
1057 * occurred, all blocks from <src> already appended to <dst> are truncated.
1058 */
1059int htx_append_msg(struct htx *dst, const struct htx *src)
1060{
1061 struct htx_blk *blk, *newblk;
1062 enum htx_blk_type type;
1063 uint32_t blksz, offset = dst->data;
1064
1065 for (blk = htx_get_head_blk(src); blk; blk = htx_get_next_blk(src, blk)) {
1066 type = htx_get_blk_type(blk);
1067
1068 if (type == HTX_BLK_UNUSED)
1069 continue;
1070
1071 blksz = htx_get_blksz(blk);
1072 newblk = htx_add_blk(dst, type, blksz);
1073 if (!newblk)
1074 goto error;
1075 newblk->info = blk->info;
1076 memcpy(htx_get_blk_ptr(dst, newblk), htx_get_blk_ptr(src, blk), blksz);
1077 }
1078
1079 return 1;
1080
1081 error:
1082 htx_truncate(dst, offset);
1083 return 0;
1084}