blob: 940989c5083634d5e05b9fe2aa33301a75890861 [file] [log] [blame]
Christopher Fauleta3d2a162018-10-22 08:59:39 +02001/*
2 * internal HTTP message
3 *
4 * Copyright 2018 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
Willy Tarreauc13ed532020-06-02 10:22:45 +020013#include <haproxy/chunk.h>
Willy Tarreau16f958c2020-06-03 08:44:35 +020014#include <haproxy/htx.h>
Christopher Fauleta3d2a162018-10-22 08:59:39 +020015
Christopher Faulet192c6a22019-06-11 16:32:24 +020016struct htx htx_empty = { .size = 0, .data = 0, .head = -1, .tail = -1, .first = -1 };
Christopher Fauleta3d2a162018-10-22 08:59:39 +020017
Christopher Faulet3b219722019-06-19 13:48:09 +020018/* Defragments an HTX message. It removes unused blocks and unwraps the payloads
Christopher Faulet1cf414b2021-06-09 17:30:40 +020019 * part. A temporary buffer is used to do so. This function never fails. Most of
20 * time, we need keep a ref on a specific HTX block. Thus is <blk> is set, the
21 * pointer on its new position, after defrag, is returned. In addition, if the
22 * size of the block must be altered, <blkinfo> info must be provided (!=
23 * 0). But in this case, it remains the caller responsibility to update the
24 * block content.
Christopher Fauleta3d2a162018-10-22 08:59:39 +020025 */
26/* TODO: merge data blocks into one */
Christopher Faulet1cf414b2021-06-09 17:30:40 +020027struct htx_blk *htx_defrag(struct htx *htx, struct htx_blk *blk, uint32_t blkinfo)
Christopher Fauleta3d2a162018-10-22 08:59:39 +020028{
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010029 struct buffer *chunk = get_trash_chunk();
30 struct htx *tmp = htxbuf(chunk);
31 struct htx_blk *newblk, *oldblk;
Christopher Faulet200f8952019-01-02 11:23:44 +010032 uint32_t new, old, blkpos;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010033 uint32_t addr, blksz;
Christopher Faulet29f17582019-05-23 11:03:26 +020034 int32_t first = -1;
Christopher Fauleta3d2a162018-10-22 08:59:39 +020035
Christopher Faulet192c6a22019-06-11 16:32:24 +020036 if (htx->head == -1)
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010037 return NULL;
Christopher Fauleta3d2a162018-10-22 08:59:39 +020038
Christopher Faulet200f8952019-01-02 11:23:44 +010039 blkpos = -1;
40
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010041 new = 0;
42 addr = 0;
43 tmp->size = htx->size;
Christopher Faulet1cf414b2021-06-09 17:30:40 +020044 tmp->data = 0;
Christopher Fauleta3d2a162018-10-22 08:59:39 +020045
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010046 /* start from the head */
47 for (old = htx_get_head(htx); old != -1; old = htx_get_next(htx, old)) {
48 oldblk = htx_get_blk(htx, old);
Christopher Faulet28f29c72019-04-30 17:55:45 +020049 if (htx_get_blk_type(oldblk) == HTX_BLK_UNUSED)
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010050 continue;
Christopher Fauleta3d2a162018-10-22 08:59:39 +020051
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010052 blksz = htx_get_blksz(oldblk);
Christopher Faulet1cf414b2021-06-09 17:30:40 +020053 memcpy((void *)tmp->blocks + addr, htx_get_blk_ptr(htx, oldblk), blksz);
Christopher Fauleta3d2a162018-10-22 08:59:39 +020054
Christopher Faulet9c66b982019-04-30 18:08:26 +020055 /* update the start-line position */
Christopher Faulet29f17582019-05-23 11:03:26 +020056 if (htx->first == old)
57 first = new;
Christopher Faulet174bfb12018-12-06 14:31:12 +010058
Christopher Faulet1cf414b2021-06-09 17:30:40 +020059 newblk = htx_get_blk(tmp, new);
60 newblk->addr = addr;
61 newblk->info = oldblk->info;
62
Christopher Faulet3b219722019-06-19 13:48:09 +020063 /* if <blk> is defined, save its new position */
Christopher Faulet1cf414b2021-06-09 17:30:40 +020064 if (blk != NULL && blk == oldblk) {
65 if (blkinfo)
66 newblk->info = blkinfo;
Christopher Faulet200f8952019-01-02 11:23:44 +010067 blkpos = new;
Christopher Faulet1cf414b2021-06-09 17:30:40 +020068 }
Christopher Faulet200f8952019-01-02 11:23:44 +010069
Christopher Faulet1cf414b2021-06-09 17:30:40 +020070 blksz = htx_get_blksz(newblk);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010071 addr += blksz;
Christopher Faulet1cf414b2021-06-09 17:30:40 +020072 tmp->data += blksz;
73 new++;
Christopher Fauletb8fd4c02019-05-20 09:32:25 +020074 }
Christopher Fauleta3d2a162018-10-22 08:59:39 +020075
Christopher Faulet1cf414b2021-06-09 17:30:40 +020076 htx->data = tmp->data;
Christopher Faulet29f17582019-05-23 11:03:26 +020077 htx->first = first;
Christopher Faulet28f29c72019-04-30 17:55:45 +020078 htx->head = 0;
Christopher Fauletd7884d32019-06-11 10:40:43 +020079 htx->tail = new - 1;
80 htx->head_addr = htx->end_addr = 0;
81 htx->tail_addr = addr;
Christopher Faulet4697c922021-09-21 15:39:30 +020082 htx->flags &= ~HTX_FL_FRAGMENTED;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +010083 memcpy((void *)htx->blocks, (void *)tmp->blocks, htx->size);
Christopher Fauleta3d2a162018-10-22 08:59:39 +020084
Christopher Faulet200f8952019-01-02 11:23:44 +010085 return ((blkpos == -1) ? NULL : htx_get_blk(htx, blkpos));
Christopher Fauleta3d2a162018-10-22 08:59:39 +020086}
87
Christopher Faulet3b219722019-06-19 13:48:09 +020088/* Degragments HTX blocks of an HTX message. Payloads part is keep untouched
89 * here. This function will move back all blocks starting at the position 0,
90 * removing unused blocks. It must never be called with an empty message.
91 */
Christopher Fauletd7884d32019-06-11 10:40:43 +020092static void htx_defrag_blks(struct htx *htx)
93{
94 int32_t pos, new;
95
96 new = 0;
97 for (pos = htx_get_head(htx); pos != -1; pos = htx_get_next(htx, pos)) {
98 struct htx_blk *posblk, *newblk;
99
100 if (pos == new) {
101 new++;
102 continue;
103 }
104
105 posblk = htx_get_blk(htx, pos);
106 if (htx_get_blk_type(posblk) == HTX_BLK_UNUSED)
107 continue;
108
109 if (htx->first == pos)
110 htx->first = new;
111 newblk = htx_get_blk(htx, new++);
112 newblk->info = posblk->info;
113 newblk->addr = posblk->addr;
114 }
115 BUG_ON(!new);
116 htx->head = 0;
117 htx->tail = new - 1;
118}
119
Christopher Faulet3b219722019-06-19 13:48:09 +0200120/* Reserves a new block in the HTX message <htx> with a content of <blksz>
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200121 * bytes. If there is not enough space, NULL is returned. Otherwise the reserved
Christopher Faulet3b219722019-06-19 13:48:09 +0200122 * block is returned and the HTX message is updated. Space for this new block is
123 * reserved in the HTX message. But it is the caller responsibility to set right
124 * info in the block to reflect the stored data.
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200125 */
126static struct htx_blk *htx_reserve_nxblk(struct htx *htx, uint32_t blksz)
127{
Christopher Fauletd7884d32019-06-11 10:40:43 +0200128 struct htx_blk *blk;
Christopher Faulet192c6a22019-06-11 16:32:24 +0200129 uint32_t tail, headroom, tailroom;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200130
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100131 if (blksz > htx_free_data_space(htx))
132 return NULL; /* full */
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200133
Christopher Faulet192c6a22019-06-11 16:32:24 +0200134 if (htx->head == -1) {
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100135 /* Empty message */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200136 htx->head = htx->tail = htx->first = 0;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100137 blk = htx_get_blk(htx, htx->tail);
138 blk->addr = 0;
139 htx->data = blksz;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200140 htx->tail_addr = blksz;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100141 return blk;
142 }
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200143
Christopher Fauletd7884d32019-06-11 10:40:43 +0200144 /* Find the block's position. First, we try to get the next position in
145 * the message, increasing the tail by one. If this position is not
146 * available with some holes, we try to defrag the blocks without
147 * touching their paylood. If it is impossible, we fully defrag the
148 * message.
149 */
Christopher Faulet28f29c72019-04-30 17:55:45 +0200150 tail = htx->tail + 1;
Christopher Faulet2bf43f02019-06-12 11:28:11 +0200151 if (htx_pos_to_addr(htx, tail) >= htx->tail_addr)
Christopher Faulet192c6a22019-06-11 16:32:24 +0200152 ;
153 else if (htx->head > 0) {
Christopher Fauletd7884d32019-06-11 10:40:43 +0200154 htx_defrag_blks(htx);
155 tail = htx->tail + 1;
Christopher Faulet2bf43f02019-06-12 11:28:11 +0200156 BUG_ON(htx_pos_to_addr(htx, tail) < htx->tail_addr);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100157 }
Christopher Fauletd7884d32019-06-11 10:40:43 +0200158 else
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100159 goto defrag;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200160
Thayne McCombs8f0cc5c2021-01-07 21:35:52 -0700161 /* Now, we have found the block's position. Try to find where to put its
Christopher Fauletd7884d32019-06-11 10:40:43 +0200162 * payload. The free space is split in two areas:
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100163 *
Thayne McCombs8f0cc5c2021-01-07 21:35:52 -0700164 * * The free space in front of the blocks table. This one is used if and
165 * only if the other one was not used yet.
Christopher Fauletd7884d32019-06-11 10:40:43 +0200166 *
167 * * The free space at the beginning of the message. Once this one is
Thayne McCombs8f0cc5c2021-01-07 21:35:52 -0700168 * used, the other one is never used again, until the next defrag.
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100169 */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200170 headroom = (htx->end_addr - htx->head_addr);
Christopher Faulet2bf43f02019-06-12 11:28:11 +0200171 tailroom = (!htx->head_addr ? htx_pos_to_addr(htx, tail) - htx->tail_addr : 0);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200172 BUG_ON((int32_t)headroom < 0);
173 BUG_ON((int32_t)tailroom < 0);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200174
Christopher Fauletd7884d32019-06-11 10:40:43 +0200175 if (blksz <= tailroom) {
176 blk = htx_get_blk(htx, tail);
177 blk->addr = htx->tail_addr;
178 htx->tail_addr += blksz;
179 }
180 else if (blksz <= headroom) {
181 blk = htx_get_blk(htx, tail);
182 blk->addr = htx->head_addr;
183 htx->head_addr += blksz;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100184 }
185 else {
Christopher Fauletd7884d32019-06-11 10:40:43 +0200186 defrag:
Christopher Faulet3b219722019-06-19 13:48:09 +0200187 /* need to defragment the message before inserting upfront */
Christopher Faulet1cf414b2021-06-09 17:30:40 +0200188 htx_defrag(htx, NULL, 0);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200189 tail = htx->tail + 1;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200190 blk = htx_get_blk(htx, tail);
191 blk->addr = htx->tail_addr;
192 htx->tail_addr += blksz;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100193 }
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200194
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100195 htx->tail = tail;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100196 htx->data += blksz;
Christopher Faulet29f17582019-05-23 11:03:26 +0200197 /* Set first position if not already set */
198 if (htx->first == -1)
199 htx->first = tail;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200200
201 BUG_ON((int32_t)htx->tail_addr < 0);
202 BUG_ON((int32_t)htx->head_addr < 0);
203 BUG_ON(htx->end_addr > htx->tail_addr);
204 BUG_ON(htx->head_addr > htx->end_addr);
205
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100206 return blk;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200207}
208
Christopher Fauletd7884d32019-06-11 10:40:43 +0200209/* Prepares the block to an expansion of its payload. The payload will be
210 * expanded by <delta> bytes and we need find where this expansion will be
211 * performed. It can be a compression if <delta> is negative. This function only
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500212 * updates all addresses. The caller have the responsibility to perform the
Christopher Faulet3b219722019-06-19 13:48:09 +0200213 * expansion and update the block and the HTX message accordingly. No error must
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500214 * occur. It returns following values:
Christopher Fauletd7884d32019-06-11 10:40:43 +0200215 *
216 * 0: The expansion cannot be performed, there is not enough space.
217 *
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500218 * 1: the expansion must be performed in place, there is enough space after
Christopher Fauletd7884d32019-06-11 10:40:43 +0200219 * the block's payload to handle it. This is especially true if it is a
220 * compression and not an expension.
221 *
222 * 2: the block's payload must be moved at the new block address before doing
223 * the expansion.
224 *
225 * 3: the HTX message message must be defragmented
226 */
227static int htx_prepare_blk_expansion(struct htx *htx, struct htx_blk *blk, int32_t delta)
228{
229 uint32_t sz, tailroom, headroom;
230 int ret = 3;
231
Christopher Faulet192c6a22019-06-11 16:32:24 +0200232 BUG_ON(htx->head == -1);
233
Christopher Fauletd7884d32019-06-11 10:40:43 +0200234 headroom = (htx->end_addr - htx->head_addr);
Christopher Faulet2bf43f02019-06-12 11:28:11 +0200235 tailroom = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200236 BUG_ON((int32_t)headroom < 0);
237 BUG_ON((int32_t)tailroom < 0);
238
239 sz = htx_get_blksz(blk);
240 if (delta <= 0) {
241 /* It is a compression, it can be performed in place */
242 if (blk->addr+sz == htx->tail_addr)
243 htx->tail_addr += delta;
244 else if (blk->addr+sz == htx->head_addr)
245 htx->head_addr += delta;
246 ret = 1;
247 }
248 else if (delta > htx_free_space(htx)) {
Ilya Shipitsin46a030c2020-07-05 16:36:08 +0500249 /* There is not enough space to handle the expansion */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200250 ret = 0;
251 }
252 else if (blk->addr+sz == htx->tail_addr) {
253 /* The block's payload is just before the tail room */
254 if (delta < tailroom) {
255 /* Expand the block's payload */
256 htx->tail_addr += delta;
257 ret = 1;
258 }
259 else if ((sz + delta) < headroom) {
Christopher Faulet61ed7792019-07-29 10:50:28 +0200260 uint32_t oldaddr = blk->addr;
261
Christopher Fauletd7884d32019-06-11 10:40:43 +0200262 /* Move the block's payload into the headroom */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200263 blk->addr = htx->head_addr;
264 htx->tail_addr -= sz;
265 htx->head_addr += sz + delta;
Christopher Faulet61ed7792019-07-29 10:50:28 +0200266 if (oldaddr == htx->end_addr) {
Christopher Faulet8c654862019-06-12 11:08:11 +0200267 if (htx->end_addr == htx->tail_addr) {
268 htx->tail_addr = htx->head_addr;
269 htx->head_addr = htx->end_addr = 0;
270 }
271 else
272 htx->end_addr += sz;
273 }
Christopher Fauletd7884d32019-06-11 10:40:43 +0200274 ret = 2;
275 }
276 }
277 else if (blk->addr+sz == htx->head_addr) {
278 /* The block's payload is just before the head room */
279 if (delta < headroom) {
280 /* Expand the block's payload */
281 htx->head_addr += delta;
282 ret = 1;
283 }
284 }
285 else {
286 /* The block's payload is not at the rooms edge */
287 if (!htx->head_addr && sz+delta < tailroom) {
288 /* Move the block's payload into the tailroom */
289 if (blk->addr == htx->end_addr)
290 htx->end_addr += sz;
291 blk->addr = htx->tail_addr;
292 htx->tail_addr += sz + delta;
293 ret = 2;
294 }
295 else if (sz+delta < headroom) {
296 /* Move the block's payload into the headroom */
297 if (blk->addr == htx->end_addr)
298 htx->end_addr += sz;
299 blk->addr = htx->head_addr;
300 htx->head_addr += sz + delta;
301 ret = 2;
302 }
303 }
304 /* Otherwise defrag the HTX message */
305
306 BUG_ON((int32_t)htx->tail_addr < 0);
307 BUG_ON((int32_t)htx->head_addr < 0);
308 BUG_ON(htx->end_addr > htx->tail_addr);
309 BUG_ON(htx->head_addr > htx->end_addr);
310 return ret;
311}
312
Christopher Faulet3b219722019-06-19 13:48:09 +0200313/* Adds a new block of type <type> in the HTX message <htx>. Its content size is
314 * passed but it is the caller responsibility to do the copy.
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200315 */
316struct htx_blk *htx_add_blk(struct htx *htx, enum htx_blk_type type, uint32_t blksz)
317{
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100318 struct htx_blk *blk;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200319
Willy Tarreau3d5f19e2021-08-26 16:07:22 +0200320 BUG_ON(blksz >= 256 << 20);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100321 blk = htx_reserve_nxblk(htx, blksz);
322 if (!blk)
323 return NULL;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200324 BUG_ON(blk->addr > htx->size);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200325
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100326 blk->info = (type << 28);
327 return blk;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200328}
329
Christopher Faulet3b219722019-06-19 13:48:09 +0200330/* Removes the block <blk> from the HTX message <htx>. The function returns the
331 * block following <blk> or NULL if <blk> is the last block or the last inserted
332 * one.
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200333 */
334struct htx_blk *htx_remove_blk(struct htx *htx, struct htx_blk *blk)
335{
Christopher Fauletd7884d32019-06-11 10:40:43 +0200336 enum htx_blk_type type;
337 uint32_t pos, addr, sz;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200338
Christopher Faulet192c6a22019-06-11 16:32:24 +0200339 BUG_ON(htx->head == -1);
340
Christopher Fauletd7884d32019-06-11 10:40:43 +0200341 /* This is the last block in use */
Christopher Faulet192c6a22019-06-11 16:32:24 +0200342 if (htx->head == htx->tail) {
Christopher Faulet4697c922021-09-21 15:39:30 +0200343 uint32_t flags = (htx->flags & ~HTX_FL_FRAGMENTED); /* Preserve flags except FRAGMENTED */
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100344
Christopher Fauletd7884d32019-06-11 10:40:43 +0200345 htx_reset(htx);
Christopher Faulet4697c922021-09-21 15:39:30 +0200346 htx->flags = flags; /* restore flags */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200347 return NULL;
348 }
349
350 type = htx_get_blk_type(blk);
Christopher Faulet9c66b982019-04-30 18:08:26 +0200351 pos = htx_get_blk_pos(htx, blk);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200352 sz = htx_get_blksz(blk);
353 addr = blk->addr;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100354 if (type != HTX_BLK_UNUSED) {
355 /* Mark the block as unused, decrement allocated size */
356 htx->data -= htx_get_blksz(blk);
357 blk->info = ((uint32_t)HTX_BLK_UNUSED << 28);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100358 }
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200359
Christopher Faulet3b219722019-06-19 13:48:09 +0200360 /* There is at least 2 blocks, so tail is always > 0 */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200361 if (pos == htx->head) {
362 /* move the head forward */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200363 htx->head++;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100364 }
Christopher Fauletd7884d32019-06-11 10:40:43 +0200365 else if (pos == htx->tail) {
366 /* remove the tail. this was the last inserted block so
367 * return NULL. */
368 htx->tail--;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200369 blk = NULL;
370 goto end;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100371 }
Christopher Faulet4697c922021-09-21 15:39:30 +0200372 else
373 htx->flags |= HTX_FL_FRAGMENTED;
374
Christopher Fauletd7884d32019-06-11 10:40:43 +0200375 blk = htx_get_blk(htx, pos+1);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200376
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200377 end:
Christopher Faulet29f17582019-05-23 11:03:26 +0200378 if (pos == htx->first)
379 htx->first = (blk ? htx_get_blk_pos(htx, blk) : -1);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200380
Christopher Faulet192c6a22019-06-11 16:32:24 +0200381 if (htx->head == htx->tail) {
Christopher Fauletd7884d32019-06-11 10:40:43 +0200382 /* If there is just one block in the HTX message, free space can
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500383 * be adjusted. This operation could save some defrags. */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200384 struct htx_blk *lastblk = htx_get_blk(htx, htx->tail);
385
386 htx->head_addr = 0;
387 htx->end_addr = lastblk->addr;
388 htx->tail_addr = lastblk->addr+htx->data;
389 }
390 else {
391 if (addr+sz == htx->tail_addr)
392 htx->tail_addr = addr;
393 else if (addr+sz == htx->head_addr)
394 htx->head_addr = addr;
Christopher Faulet8c654862019-06-12 11:08:11 +0200395 if (addr == htx->end_addr) {
396 if (htx->tail_addr == htx->end_addr) {
397 htx->tail_addr = htx->head_addr;
398 htx->head_addr = htx->end_addr = 0;
399 }
400 else
401 htx->end_addr += sz;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200402 }
403 }
404
405 BUG_ON((int32_t)htx->tail_addr < 0);
406 BUG_ON((int32_t)htx->head_addr < 0);
407 BUG_ON(htx->end_addr > htx->tail_addr);
408 BUG_ON(htx->head_addr > htx->end_addr);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100409 return blk;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200410}
411
Christopher Faulet1cdceb92020-02-24 11:41:59 +0100412/* Looks for the HTX block containing the offset <offset>, starting at the HTX
413 * message's head. The function returns an htx_ret with the found HTX block and
414 * the position inside this block where the offset is. If the offset <offset> is
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500415 * outside of the HTX message, htx_ret.blk is set to NULL.
Christopher Faulet1cdceb92020-02-24 11:41:59 +0100416 */
417struct htx_ret htx_find_offset(struct htx *htx, uint32_t offset)
418{
419 struct htx_blk *blk;
420 struct htx_ret htxret = { .blk = NULL, .ret = 0 };
421
422 if (offset >= htx->data)
423 return htxret;
424
425 for (blk = htx_get_head_blk(htx); blk && offset; blk = htx_get_next_blk(htx, blk)) {
426 uint32_t sz = htx_get_blksz(blk);
427
428 if (offset < sz)
429 break;
430 offset -= sz;
431 }
432 htxret.blk = blk;
433 htxret.ret = offset;
434 return htxret;
435}
436
Christopher Faulet3b219722019-06-19 13:48:09 +0200437/* Removes all blocks after the one containing the offset <offset>. This last
438 * one may be truncated if it is a DATA block.
Christopher Faulet00cf6972019-01-07 14:53:27 +0100439 */
440void htx_truncate(struct htx *htx, uint32_t offset)
441{
442 struct htx_blk *blk;
Christopher Fauletbb76aa42020-02-24 15:09:24 +0100443 struct htx_ret htxret = htx_find_offset(htx, offset);
Christopher Faulet00cf6972019-01-07 14:53:27 +0100444
Christopher Fauletbb76aa42020-02-24 15:09:24 +0100445 blk = htxret.blk;
446 if (blk && htxret.ret && htx_get_blk_type(blk) == HTX_BLK_DATA) {
447 htx_change_blk_value_len(htx, blk, htxret.ret);
448 blk = htx_get_next_blk(htx, blk);
Christopher Faulet00cf6972019-01-07 14:53:27 +0100449 }
450 while (blk)
451 blk = htx_remove_blk(htx, blk);
452}
453
Christopher Faulet3b219722019-06-19 13:48:09 +0200454/* Drains <count> bytes from the HTX message <htx>. If the last block is a DATA
455 * block, it will be cut if necessary. Others blocks will be removed at once if
456 * <count> is large enough. The function returns an htx_ret with the first block
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500457 * remaining in the message and the amount of data drained. If everything is
Christopher Faulet3b219722019-06-19 13:48:09 +0200458 * removed, htx_ret.blk is set to NULL.
Christopher Faulet549822f2019-02-25 10:23:19 +0100459 */
460struct htx_ret htx_drain(struct htx *htx, uint32_t count)
461{
462 struct htx_blk *blk;
463 struct htx_ret htxret = { .blk = NULL, .ret = 0 };
464
Christopher Faulet0f6d6a92019-05-23 11:11:52 +0200465 if (count == htx->data) {
Christopher Faulet4697c922021-09-21 15:39:30 +0200466 uint32_t flags = (htx->flags & ~HTX_FL_FRAGMENTED); /* Preserve flags except FRAGMENTED */
Christopher Faulet5e9b24f2021-04-22 09:43:47 +0200467
Christopher Faulet0f6d6a92019-05-23 11:11:52 +0200468 htx_reset(htx);
Christopher Faulet5e9b24f2021-04-22 09:43:47 +0200469 htx->flags = flags; /* restore flags */
Christopher Faulet0f6d6a92019-05-23 11:11:52 +0200470 htxret.ret = count;
471 return htxret;
472 }
473
Christopher Faulet549822f2019-02-25 10:23:19 +0100474 blk = htx_get_head_blk(htx);
475 while (count && blk) {
476 uint32_t sz = htx_get_blksz(blk);
477 enum htx_blk_type type = htx_get_blk_type(blk);
478
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500479 /* Ignore unused block */
Christopher Faulet549822f2019-02-25 10:23:19 +0100480 if (type == HTX_BLK_UNUSED)
481 goto next;
482
483 if (sz > count) {
484 if (type == HTX_BLK_DATA) {
485 htx_cut_data_blk(htx, blk, count);
486 htxret.ret += count;
487 }
488 break;
489 }
490 count -= sz;
491 htxret.ret += sz;
492 next:
493 blk = htx_remove_blk(htx, blk);
494 }
495 htxret.blk = blk;
496
497 return htxret;
498}
499
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200500/* Tries to append data to the last inserted block, if the type matches and if
Willy Tarreaud4908fa2019-05-28 10:23:46 +0200501 * there is enough space to take it all. If the space wraps, the buffer is
502 * defragmented and a new block is inserted. If an error occurred, NULL is
Christopher Faulet61775092019-05-07 21:42:27 +0200503 * returned. Otherwise, on success, the updated block (or the new one) is
Willy Tarreaud4908fa2019-05-28 10:23:46 +0200504 * returned. Due to its nature this function can be expensive and should be
505 * avoided whenever possible.
506 */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200507struct htx_blk *htx_add_data_atonce(struct htx *htx, struct ist data)
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200508{
Christopher Fauletd7884d32019-06-11 10:40:43 +0200509 struct htx_blk *blk, *tailblk;
510 void *ptr;
511 uint32_t len, sz, tailroom, headroom;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200512
Christopher Faulet192c6a22019-06-11 16:32:24 +0200513 if (htx->head == -1)
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100514 goto add_new_block;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200515
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100516 /* Not enough space to store data */
517 if (data.len > htx_free_data_space(htx))
518 return NULL;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200519
Christopher Fauletd7884d32019-06-11 10:40:43 +0200520 /* get the tail block and its size */
Christopher Fauletf1449b72019-04-10 14:54:46 +0200521 tailblk = htx_get_tail_blk(htx);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200522 if (tailblk == NULL)
Christopher Fauletf1449b72019-04-10 14:54:46 +0200523 goto add_new_block;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200524 sz = htx_get_blksz(tailblk);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200525
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100526 /* Don't try to append data if the last inserted block is not of the
527 * same type */
Willy Tarreaud4908fa2019-05-28 10:23:46 +0200528 if (htx_get_blk_type(tailblk) != HTX_BLK_DATA)
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100529 goto add_new_block;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200530
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100531 /*
532 * Same type and enough space: append data
533 */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200534 headroom = (htx->end_addr - htx->head_addr);
Christopher Faulet2bf43f02019-06-12 11:28:11 +0200535 tailroom = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200536 BUG_ON((int32_t)headroom < 0);
537 BUG_ON((int32_t)tailroom < 0);
538
539 len = data.len;
540 if (tailblk->addr+sz == htx->tail_addr) {
541 if (data.len <= tailroom)
542 goto append_data;
543 else if (!htx->head_addr) {
544 len = tailroom;
545 goto append_data;
546 }
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100547 }
Christopher Fauletd7884d32019-06-11 10:40:43 +0200548 else if (tailblk->addr+sz == htx->head_addr && data.len <= headroom)
549 goto append_data;
Christopher Fauletf1449b72019-04-10 14:54:46 +0200550
Christopher Fauletd7884d32019-06-11 10:40:43 +0200551 goto add_new_block;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200552
553 append_data:
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100554 /* Append data and update the block itself */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200555 ptr = htx_get_blk_ptr(htx, tailblk);
556 memcpy(ptr+sz, data.ptr, len);
Christopher Faulet3e2638e2019-06-18 09:49:16 +0200557 htx_change_blk_value_len(htx, tailblk, sz+len);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200558
Christopher Fauletd7884d32019-06-11 10:40:43 +0200559 if (data.len == len) {
560 blk = tailblk;
561 goto end;
562 }
Tim Duesterhus154374c2021-03-02 18:57:27 +0100563 data = istadv(data, len);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200564
565 add_new_block:
Willy Tarreaud4908fa2019-05-28 10:23:46 +0200566 blk = htx_add_blk(htx, HTX_BLK_DATA, data.len);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100567 if (!blk)
568 return NULL;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200569
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100570 blk->info += data.len;
571 memcpy(htx_get_blk_ptr(htx, blk), data.ptr, data.len);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200572
573 end:
574 BUG_ON((int32_t)htx->tail_addr < 0);
575 BUG_ON((int32_t)htx->head_addr < 0);
576 BUG_ON(htx->end_addr > htx->tail_addr);
577 BUG_ON(htx->head_addr > htx->end_addr);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100578 return blk;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200579}
580
581/* Replaces a value part of a block by a new one. The new part can be smaller or
582 * larger than the old one. This function works for any kind of block with
583 * attached data. It returns the new block on success, otherwise it returns
584 * NULL.
585 */
586struct htx_blk *htx_replace_blk_value(struct htx *htx, struct htx_blk *blk,
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100587 const struct ist old, const struct ist new)
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200588{
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100589 struct ist n, v;
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100590 int32_t delta;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200591 int ret;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200592
Christopher Fauletd7884d32019-06-11 10:40:43 +0200593 n = htx_get_blk_name(htx, blk);
594 v = htx_get_blk_value(htx, blk);
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100595 delta = new.len - old.len;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200596 ret = htx_prepare_blk_expansion(htx, blk, delta);
597 if (!ret)
598 return NULL; /* not enough space */
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100599
Christopher Faulet3b219722019-06-19 13:48:09 +0200600 if (ret == 1) { /* Replace in place */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200601 if (delta <= 0) {
Christopher Faulet3b219722019-06-19 13:48:09 +0200602 /* compression: copy new data first then move the end */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200603 memcpy(old.ptr, new.ptr, new.len);
Tim Duesterhus4c8f75f2021-11-06 15:14:44 +0100604 memmove(old.ptr + new.len, istend(old),
605 istend(v) - istend(old));
Christopher Fauletd7884d32019-06-11 10:40:43 +0200606 }
607 else {
Christopher Faulet3b219722019-06-19 13:48:09 +0200608 /* expansion: move the end first then copy new data */
Tim Duesterhus4c8f75f2021-11-06 15:14:44 +0100609 memmove(old.ptr + new.len, istend(old),
610 istend(v) - istend(old));
Christopher Fauletd7884d32019-06-11 10:40:43 +0200611 memcpy(old.ptr, new.ptr, new.len);
612 }
Christopher Faulet1cf414b2021-06-09 17:30:40 +0200613
614 /* set the new block size and update HTX message */
615 htx_set_blk_value_len(blk, v.len + delta);
616 htx->data += delta;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100617 }
Christopher Faulet3b219722019-06-19 13:48:09 +0200618 else if (ret == 2) { /* New address but no defrag */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200619 void *ptr = htx_get_blk_ptr(htx, blk);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200620
Christopher Fauletd7884d32019-06-11 10:40:43 +0200621 /* Copy the name, if any */
622 memcpy(ptr, n.ptr, n.len);
623 ptr += n.len;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200624
Christopher Fauletd7884d32019-06-11 10:40:43 +0200625 /* Copy value before old part, if any */
626 memcpy(ptr, v.ptr, old.ptr - v.ptr);
627 ptr += old.ptr - v.ptr;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200628
Christopher Fauletd7884d32019-06-11 10:40:43 +0200629 /* Copy new value */
630 memcpy(ptr, new.ptr, new.len);
631 ptr += new.len;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200632
Christopher Fauletd7884d32019-06-11 10:40:43 +0200633 /* Copy value after old part, if any */
Tim Duesterhus4c8f75f2021-11-06 15:14:44 +0100634 memcpy(ptr, istend(old), istend(v) - istend(old));
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200635
Christopher Faulet1cf414b2021-06-09 17:30:40 +0200636 /* set the new block size and update HTX message */
637 htx_set_blk_value_len(blk, v.len + delta);
638 htx->data += delta;
639 }
640 else { /* Do a degrag first (it is always an expansion) */
641 struct htx_blk tmpblk;
642 int32_t offset;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200643
Christopher Faulet1cf414b2021-06-09 17:30:40 +0200644 /* use tmpblk to set new block size before defrag and to compute
645 * the offset after defrag
646 */
647 tmpblk.addr = blk->addr;
648 tmpblk.info = blk->info;
649 htx_set_blk_value_len(&tmpblk, v.len + delta);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200650
Christopher Faulet1cf414b2021-06-09 17:30:40 +0200651 /* htx_defrag() will take care to update the block size and the htx message */
652 blk = htx_defrag(htx, blk, tmpblk.info);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200653
Christopher Faulet1cf414b2021-06-09 17:30:40 +0200654 /* newblk is now the new HTX block. Compute the offset to copy/move payload */
655 offset = blk->addr - tmpblk.addr;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200656
Christopher Faulet1cf414b2021-06-09 17:30:40 +0200657 /* move the end first and copy new data
658 */
Tim Duesterhus4c8f75f2021-11-06 15:14:44 +0100659 memmove(old.ptr + offset + new.len, old.ptr + offset + old.len,
660 istend(v) - istend(old));
Christopher Faulet1cf414b2021-06-09 17:30:40 +0200661 memcpy(old.ptr + offset, new.ptr, new.len);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200662 }
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100663 return blk;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200664}
665
666/* Transfer HTX blocks from <src> to <dst>, stopping on the first block of the
Christopher Fauletd1ac2b92020-12-02 19:12:22 +0100667 * type <mark> (typically EOH or EOT) or when <count> bytes were moved
Christopher Faulet156852b2019-05-16 11:29:13 +0200668 * (including payload and meta-data). It returns the number of bytes moved and
669 * the last HTX block inserted in <dst>.
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200670 */
671struct htx_ret htx_xfer_blks(struct htx *dst, struct htx *src, uint32_t count,
672 enum htx_blk_type mark)
673{
674 struct htx_blk *blk, *dstblk;
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200675 struct htx_blk *srcref, *dstref;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200676 enum htx_blk_type type;
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100677 uint32_t info, max, sz, ret;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200678
Christopher Faulet156852b2019-05-16 11:29:13 +0200679 ret = htx_used_space(dst);
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200680 srcref = dstref = dstblk = NULL;
Christopher Faulet156852b2019-05-16 11:29:13 +0200681
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200682 /* blocks are not removed yet from <src> HTX message to be able to
683 * rollback the transfer if all the headers/trailers are not copied.
684 */
685 for (blk = htx_get_head_blk(src); blk && count; blk = htx_get_next_blk(src, blk)) {
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200686 type = htx_get_blk_type(blk);
687
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500688 /* Ignore unused block */
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200689 if (type == HTX_BLK_UNUSED)
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200690 continue;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200691
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200692
Christopher Faulet156852b2019-05-16 11:29:13 +0200693 max = htx_get_max_blksz(dst, count);
694 if (!max)
695 break;
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200696
697 sz = htx_get_blksz(blk);
698 info = blk->info;
Willy Tarreau90caa072019-04-09 16:21:54 +0200699 if (sz > max) {
Christopher Faulet3b219722019-06-19 13:48:09 +0200700 /* Only DATA blocks can be partially xferred */
Christopher Faulet156852b2019-05-16 11:29:13 +0200701 if (type != HTX_BLK_DATA)
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200702 break;
Christopher Faulet156852b2019-05-16 11:29:13 +0200703 sz = max;
704 info = (type << 28) + sz;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200705 }
706
707 dstblk = htx_reserve_nxblk(dst, sz);
708 if (!dstblk)
709 break;
710 dstblk->info = info;
711 memcpy(htx_get_blk_ptr(dst, dstblk), htx_get_blk_ptr(src, blk), sz);
712
Christopher Faulet156852b2019-05-16 11:29:13 +0200713 count -= sizeof(dstblk) + sz;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200714 if (blk->info != info) {
Christopher Faulet3b219722019-06-19 13:48:09 +0200715 /* Partial xfer: don't remove <blk> from <src> but
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200716 * resize its content */
Christopher Faulet156852b2019-05-16 11:29:13 +0200717 htx_cut_data_blk(src, blk, sz);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200718 break;
719 }
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200720
721 if (type == mark) {
722 blk = htx_get_next_blk(src, blk);
723 srcref = dstref = NULL;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200724 break;
Christopher Fauletc92ec0b2021-04-22 09:45:18 +0200725 }
726
727 /* Save <blk> to <srcref> and <dstblk> to <dstref> when we start
728 * to xfer headers or trailers. When EOH/EOT block is reached,
729 * both are reset. It is mandatory to be able to rollback a
730 * partial transfer.
731 */
732 if (!srcref && !dstref &&
733 (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL || type == HTX_BLK_TLR)) {
734 srcref = blk;
735 dstref = dstblk;
736 }
737 else if (type == HTX_BLK_EOH || type == HTX_BLK_EOT)
738 srcref = dstref = NULL;
739 }
740
741 if (unlikely(dstref)) {
742 /* Headers or trailers part was partially xferred, so rollback the copy
743 * by removing all block between <dstref> and <dstblk>, both included.
744 */
745 while (dstref && dstref != dstblk)
746 dstref = htx_remove_blk(dst, dstref);
747 htx_remove_blk(dst, dstblk);
748
749 /* <dst> HTX message is empty, it means the headers or trailers
750 * part is too big to be copied at once.
751 */
752 if (htx_is_empty(dst))
753 src->flags |= HTX_FL_PARSING_ERROR;
754 }
755
756 /* Now, remove xferred blocks from <src> htx message */
757 if (!blk && !srcref) {
758 /* End of src reached, all blocks were consumed, drain all data */
759 htx_drain(src, src->data);
760 }
761 else {
762 /* Remove all block from the head to <blk>, or <srcref> if defined, excluded */
763 srcref = (srcref ? srcref : blk);
764 for (blk = htx_get_head_blk(src); blk && blk != srcref; blk = htx_remove_blk(src, blk));
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200765 }
766
Christopher Faulet156852b2019-05-16 11:29:13 +0200767 end:
768 ret = htx_used_space(dst) - ret;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200769 return (struct htx_ret){.ret = ret, .blk = dstblk};
770}
771
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200772/* Replaces an header by a new one. The new header can be smaller or larger than
773 * the old one. It returns the new block on success, otherwise it returns NULL.
Willy Tarreaued00e342018-12-07 08:47:45 +0100774 * The header name is always lower cased.
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200775 */
776struct htx_blk *htx_replace_header(struct htx *htx, struct htx_blk *blk,
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100777 const struct ist name, const struct ist value)
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200778{
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100779 enum htx_blk_type type;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200780 void *ptr;
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100781 int32_t delta;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200782 int ret;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200783
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100784 type = htx_get_blk_type(blk);
785 if (type != HTX_BLK_HDR)
786 return NULL;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200787
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100788 delta = name.len + value.len - htx_get_blksz(blk);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200789 ret = htx_prepare_blk_expansion(htx, blk, delta);
790 if (!ret)
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100791 return NULL; /* not enough space */
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200792
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100793
Christopher Faulet3b219722019-06-19 13:48:09 +0200794 /* Replace in place or at a new address is the same. We replace all the
795 * header (name+value). Only take care to defrag the message if
796 * necessary. */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200797 if (ret == 3)
Christopher Faulet1cf414b2021-06-09 17:30:40 +0200798 blk = htx_defrag(htx, blk, (type << 28) + (value.len << 8) + name.len);
799 else {
800 /* Set the new block size and update HTX message */
801 blk->info = (type << 28) + (value.len << 8) + name.len;
802 htx->data += delta;
803 }
Christopher Fauletd7884d32019-06-11 10:40:43 +0200804
Ilya Shipitsin47d17182020-06-21 21:42:57 +0500805 /* Finally, copy data. */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200806 ptr = htx_get_blk_ptr(htx, blk);
807 ist2bin_lc(ptr, name);
808 memcpy(ptr + name.len, value.ptr, value.len);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100809 return blk;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200810}
811
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100812/* Replaces the parts of the start-line. It returns the new start-line on
813 * success, otherwise it returns NULL. It is the caller responsibility to update
814 * sl->info, if necessary.
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200815 */
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100816struct htx_sl *htx_replace_stline(struct htx *htx, struct htx_blk *blk, const struct ist p1,
817 const struct ist p2, const struct ist p3)
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200818{
Christopher Fauletd7884d32019-06-11 10:40:43 +0200819 enum htx_blk_type type;
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100820 struct htx_sl *sl;
821 struct htx_sl tmp; /* used to save sl->info and sl->flags */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200822 uint32_t sz;
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100823 int32_t delta;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200824 int ret;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200825
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100826 type = htx_get_blk_type(blk);
Willy Tarreauc706cd72018-12-07 17:12:22 +0100827 if (type != HTX_BLK_REQ_SL && type != HTX_BLK_RES_SL)
Christopher Fauletaa75b3d2018-12-05 16:20:40 +0100828 return NULL;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200829
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100830 /* Save start-line info and flags */
831 sl = htx_get_blk_ptr(htx, blk);
832 tmp.info = sl->info;
833 tmp.flags = sl->flags;
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100834
Christopher Fauletd7884d32019-06-11 10:40:43 +0200835 sz = htx_get_blksz(blk);
836 delta = sizeof(*sl) + p1.len + p2.len + p3.len - sz;
837 ret = htx_prepare_blk_expansion(htx, blk, delta);
838 if (!ret)
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100839 return NULL; /* not enough space */
840
Christopher Faulet3b219722019-06-19 13:48:09 +0200841 /* Replace in place or at a new address is the same. We replace all the
842 * start-line. Only take care to defrag the message if necessary. */
Christopher Faulet1cf414b2021-06-09 17:30:40 +0200843 if (ret == 3) {
844 blk = htx_defrag(htx, blk, (type << 28) + sz + delta);
845 }
846 else {
847 /* Set the new block size and update HTX message */
848 blk->info = (type << 28) + sz + delta;
849 htx->data += delta;
850 }
Christopher Fauletd7884d32019-06-11 10:40:43 +0200851
Christopher Faulete97f3ba2018-12-10 15:39:40 +0100852 /* Restore start-line info and flags and copy parts of the start-line */
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100853 sl = htx_get_blk_ptr(htx, blk);
854 sl->info = tmp.info;
855 sl->flags = tmp.flags;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200856
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100857 HTX_SL_P1_LEN(sl) = p1.len;
858 HTX_SL_P2_LEN(sl) = p2.len;
859 HTX_SL_P3_LEN(sl) = p3.len;
Christopher Faulet54483df2018-11-26 15:05:52 +0100860
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100861 memcpy(HTX_SL_P1_PTR(sl), p1.ptr, p1.len);
862 memcpy(HTX_SL_P2_PTR(sl), p2.ptr, p2.len);
863 memcpy(HTX_SL_P3_PTR(sl), p3.ptr, p3.len);
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200864
Christopher Fauletf1ba18d2018-11-26 21:37:08 +0100865 return sl;
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200866}
867
Christopher Faulete071f0e2021-02-03 12:11:31 +0100868/* Reserves the maximum possible size for an HTX data block, by extending an
869 * existing one or by creating a now one. It returns a compound result with the
870 * HTX block and the position where new data must be inserted (0 for a new
871 * block). If an error occurs or if there is no space left, NULL is returned
872 * instead of a pointer on an HTX block.
873 */
874struct htx_ret htx_reserve_max_data(struct htx *htx)
875{
876 struct htx_blk *blk, *tailblk;
877 uint32_t sz, room;
878 int32_t len = htx_free_data_space(htx);
879
880 if (htx->head == -1)
881 goto rsv_new_block;
882
883 if (!len)
884 return (struct htx_ret){.ret = 0, .blk = NULL};
885
886 /* get the tail and head block */
887 tailblk = htx_get_tail_blk(htx);
888 if (tailblk == NULL)
889 goto rsv_new_block;
890 sz = htx_get_blksz(tailblk);
891
892 /* Don't try to append data if the last inserted block is not of the
893 * same type */
894 if (htx_get_blk_type(tailblk) != HTX_BLK_DATA)
895 goto rsv_new_block;
896
897 /*
898 * Same type and enough space: append data
899 */
900 if (!htx->head_addr) {
901 if (tailblk->addr+sz != htx->tail_addr)
902 goto rsv_new_block;
903 room = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr);
904 }
905 else {
906 if (tailblk->addr+sz != htx->head_addr)
907 goto rsv_new_block;
908 room = (htx->end_addr - htx->head_addr);
909 }
910 BUG_ON((int32_t)room < 0);
911 if (room < len)
912 len = room;
913
914 append_data:
915 htx_change_blk_value_len(htx, tailblk, sz+len);
916
917 BUG_ON((int32_t)htx->tail_addr < 0);
918 BUG_ON((int32_t)htx->head_addr < 0);
919 BUG_ON(htx->end_addr > htx->tail_addr);
920 BUG_ON(htx->head_addr > htx->end_addr);
921 return (struct htx_ret){.ret = sz, .blk = tailblk};
922
923 rsv_new_block:
Christopher Faulete071f0e2021-02-03 12:11:31 +0100924 blk = htx_add_blk(htx, HTX_BLK_DATA, len);
925 if (!blk)
926 return (struct htx_ret){.ret = 0, .blk = NULL};
927 blk->info += len;
928 return (struct htx_ret){.ret = 0, .blk = blk};
929}
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200930
931/* Adds an HTX block of type DATA in <htx>. It first tries to append data if
Willy Tarreau0a7ef022019-05-28 10:30:11 +0200932 * possible. It returns the number of bytes consumed from <data>, which may be
933 * zero if nothing could be copied.
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200934 */
Willy Tarreau0a7ef022019-05-28 10:30:11 +0200935size_t htx_add_data(struct htx *htx, const struct ist data)
Christopher Fauleta3d2a162018-10-22 08:59:39 +0200936{
Christopher Fauletd7884d32019-06-11 10:40:43 +0200937 struct htx_blk *blk, *tailblk;
938 void *ptr;
939 uint32_t sz, room;
Willy Tarreau0350b902019-05-28 10:58:50 +0200940 int32_t len = data.len;
Willy Tarreau0a7ef022019-05-28 10:30:11 +0200941
Christopher Faulet192c6a22019-06-11 16:32:24 +0200942 if (htx->head == -1)
Willy Tarreau0350b902019-05-28 10:58:50 +0200943 goto add_new_block;
944
945 /* Not enough space to store data */
946 if (len > htx_free_data_space(htx))
947 len = htx_free_data_space(htx);
948
949 if (!len)
950 return 0;
951
952 /* get the tail and head block */
953 tailblk = htx_get_tail_blk(htx);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200954 if (tailblk == NULL)
Willy Tarreau0350b902019-05-28 10:58:50 +0200955 goto add_new_block;
Christopher Fauletd7884d32019-06-11 10:40:43 +0200956 sz = htx_get_blksz(tailblk);
Willy Tarreau0350b902019-05-28 10:58:50 +0200957
958 /* Don't try to append data if the last inserted block is not of the
959 * same type */
960 if (htx_get_blk_type(tailblk) != HTX_BLK_DATA)
961 goto add_new_block;
962
963 /*
964 * Same type and enough space: append data
965 */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200966 if (!htx->head_addr) {
967 if (tailblk->addr+sz != htx->tail_addr)
Willy Tarreau0350b902019-05-28 10:58:50 +0200968 goto add_new_block;
Christopher Faulet2bf43f02019-06-12 11:28:11 +0200969 room = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr);
Willy Tarreau0350b902019-05-28 10:58:50 +0200970 }
Christopher Fauletd7884d32019-06-11 10:40:43 +0200971 else {
972 if (tailblk->addr+sz != htx->head_addr)
973 goto add_new_block;
974 room = (htx->end_addr - htx->head_addr);
975 }
976 BUG_ON((int32_t)room < 0);
Willy Tarreau0350b902019-05-28 10:58:50 +0200977 if (room < len)
978 len = room;
979
980 append_data:
Willy Tarreau0350b902019-05-28 10:58:50 +0200981 /* Append data and update the block itself */
Christopher Fauletd7884d32019-06-11 10:40:43 +0200982 ptr = htx_get_blk_ptr(htx, tailblk);
983 memcpy(ptr + sz, data.ptr, len);
Christopher Faulet3e2638e2019-06-18 09:49:16 +0200984 htx_change_blk_value_len(htx, tailblk, sz+len);
Christopher Fauletd7884d32019-06-11 10:40:43 +0200985
986 BUG_ON((int32_t)htx->tail_addr < 0);
987 BUG_ON((int32_t)htx->head_addr < 0);
988 BUG_ON(htx->end_addr > htx->tail_addr);
989 BUG_ON(htx->head_addr > htx->end_addr);
Willy Tarreau0350b902019-05-28 10:58:50 +0200990 return len;
991
992 add_new_block:
Willy Tarreau0350b902019-05-28 10:58:50 +0200993 blk = htx_add_blk(htx, HTX_BLK_DATA, len);
994 if (!blk)
Willy Tarreau0a7ef022019-05-28 10:30:11 +0200995 return 0;
Willy Tarreau0350b902019-05-28 10:58:50 +0200996
997 blk->info += len;
998 memcpy(htx_get_blk_ptr(htx, blk), data.ptr, len);
999 return len;
Christopher Fauleta3d2a162018-10-22 08:59:39 +02001000}
1001
Christopher Faulet86bc8df2019-06-11 10:38:38 +02001002
1003/* Adds an HTX block of type DATA in <htx> just after all other DATA
1004 * blocks. Because it relies on htx_add_data_atonce(), It may be happened to a
1005 * DATA block if possible. But, if the function succeeds, it will be the last
1006 * DATA block in all cases. If an error occurred, NULL is returned. Otherwise,
1007 * on success, the updated block (or the new one) is returned.
1008 */
1009struct htx_blk *htx_add_last_data(struct htx *htx, struct ist data)
Christopher Faulet24ed8352018-11-22 11:20:43 +01001010{
Christopher Faulet86bc8df2019-06-11 10:38:38 +02001011 struct htx_blk *blk, *pblk;
Christopher Faulet24ed8352018-11-22 11:20:43 +01001012
Christopher Faulet86bc8df2019-06-11 10:38:38 +02001013 blk = htx_add_data_atonce(htx, data);
Christopher Fauletaa75b3d2018-12-05 16:20:40 +01001014 if (!blk)
1015 return NULL;
Christopher Faulet24ed8352018-11-22 11:20:43 +01001016
Christopher Faulet86bc8df2019-06-11 10:38:38 +02001017 for (pblk = htx_get_prev_blk(htx, blk); pblk; pblk = htx_get_prev_blk(htx, pblk)) {
Christopher Faulet86bc8df2019-06-11 10:38:38 +02001018 if (htx_get_blk_type(pblk) <= HTX_BLK_DATA)
1019 break;
Christopher Faulet24ed8352018-11-22 11:20:43 +01001020
Christopher Faulet24ed8352018-11-22 11:20:43 +01001021 /* Swap .addr and .info fields */
1022 blk->addr ^= pblk->addr; pblk->addr ^= blk->addr; blk->addr ^= pblk->addr;
1023 blk->info ^= pblk->info; pblk->info ^= blk->info; blk->info ^= pblk->info;
1024
1025 if (blk->addr == pblk->addr)
1026 blk->addr += htx_get_blksz(pblk);
Christopher Faulet24ed8352018-11-22 11:20:43 +01001027 blk = pblk;
1028 }
Christopher Faulet05aab642019-04-11 13:43:57 +02001029
Christopher Faulet24ed8352018-11-22 11:20:43 +01001030 return blk;
1031}
Christopher Fauleta3d2a162018-10-22 08:59:39 +02001032
Christopher Faulet86fcf6d2019-06-11 10:41:19 +02001033/* Moves the block <blk> just before the block <ref>. Both blocks must be in the
1034 * HTX message <htx> and <blk> must be placed after <ref>. pointer to these
1035 * blocks are updated to remain valid after the move. */
1036void htx_move_blk_before(struct htx *htx, struct htx_blk **blk, struct htx_blk **ref)
1037{
1038 struct htx_blk *cblk, *pblk;
1039
1040 cblk = *blk;
1041 for (pblk = htx_get_prev_blk(htx, cblk); pblk; pblk = htx_get_prev_blk(htx, pblk)) {
1042 /* Swap .addr and .info fields */
1043 cblk->addr ^= pblk->addr; pblk->addr ^= cblk->addr; cblk->addr ^= pblk->addr;
1044 cblk->info ^= pblk->info; pblk->info ^= cblk->info; cblk->info ^= pblk->info;
1045
1046 if (cblk->addr == pblk->addr)
1047 cblk->addr += htx_get_blksz(pblk);
1048 if (pblk == *ref)
1049 break;
1050 cblk = pblk;
1051 }
1052 *blk = cblk;
1053 *ref = pblk;
1054}
Christopher Faulet0ea0c862020-01-23 11:47:53 +01001055
1056/* Append the HTX message <src> to the HTX message <dst>. It returns 1 on
1057 * success and 0 on error. All the message or nothing is copied. If an error
1058 * occurred, all blocks from <src> already appended to <dst> are truncated.
1059 */
1060int htx_append_msg(struct htx *dst, const struct htx *src)
1061{
1062 struct htx_blk *blk, *newblk;
1063 enum htx_blk_type type;
1064 uint32_t blksz, offset = dst->data;
1065
1066 for (blk = htx_get_head_blk(src); blk; blk = htx_get_next_blk(src, blk)) {
1067 type = htx_get_blk_type(blk);
1068
1069 if (type == HTX_BLK_UNUSED)
1070 continue;
1071
1072 blksz = htx_get_blksz(blk);
1073 newblk = htx_add_blk(dst, type, blksz);
1074 if (!newblk)
1075 goto error;
1076 newblk->info = blk->info;
1077 memcpy(htx_get_blk_ptr(dst, newblk), htx_get_blk_ptr(src, blk), blksz);
1078 }
1079
1080 return 1;
1081
1082 error:
1083 htx_truncate(dst, offset);
1084 return 0;
1085}