Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 1 | /* |
| 2 | * internal HTTP message |
| 3 | * |
| 4 | * Copyright 2018 HAProxy Technologies, Christopher Faulet <cfaulet@haproxy.com> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | */ |
| 12 | |
Willy Tarreau | c13ed53 | 2020-06-02 10:22:45 +0200 | [diff] [blame] | 13 | #include <haproxy/chunk.h> |
Willy Tarreau | 16f958c | 2020-06-03 08:44:35 +0200 | [diff] [blame] | 14 | #include <haproxy/htx.h> |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 15 | #include <haproxy/net_helper.h> |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 16 | |
Christopher Faulet | 192c6a2 | 2019-06-11 16:32:24 +0200 | [diff] [blame] | 17 | struct htx htx_empty = { .size = 0, .data = 0, .head = -1, .tail = -1, .first = -1 }; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 18 | |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 19 | /* tests show that 63% of these calls are for 64-bit chunks, so better avoid calling |
| 20 | * memcpy() for that! |
| 21 | */ |
| 22 | static inline __attribute__((always_inline)) void htx_memcpy(void *dst, void *src, size_t len) |
| 23 | { |
| 24 | if (likely(len == 8)) |
| 25 | write_u64(dst, read_u64(src)); |
| 26 | else |
| 27 | memcpy(dst, src, len); |
| 28 | } |
| 29 | |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 30 | /* Defragments an HTX message. It removes unused blocks and unwraps the payloads |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 31 | * part. A temporary buffer is used to do so. This function never fails. Most of |
| 32 | * time, we need keep a ref on a specific HTX block. Thus is <blk> is set, the |
| 33 | * pointer on its new position, after defrag, is returned. In addition, if the |
| 34 | * size of the block must be altered, <blkinfo> info must be provided (!= |
| 35 | * 0). But in this case, it remains the caller responsibility to update the |
| 36 | * block content. |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 37 | */ |
| 38 | /* TODO: merge data blocks into one */ |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 39 | struct htx_blk *htx_defrag(struct htx *htx, struct htx_blk *blk, uint32_t blkinfo) |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 40 | { |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 41 | struct buffer *chunk = get_trash_chunk(); |
| 42 | struct htx *tmp = htxbuf(chunk); |
| 43 | struct htx_blk *newblk, *oldblk; |
Christopher Faulet | 200f895 | 2019-01-02 11:23:44 +0100 | [diff] [blame] | 44 | uint32_t new, old, blkpos; |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 45 | uint32_t addr, blksz; |
Christopher Faulet | 29f1758 | 2019-05-23 11:03:26 +0200 | [diff] [blame] | 46 | int32_t first = -1; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 47 | |
Christopher Faulet | 192c6a2 | 2019-06-11 16:32:24 +0200 | [diff] [blame] | 48 | if (htx->head == -1) |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 49 | return NULL; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 50 | |
Christopher Faulet | 200f895 | 2019-01-02 11:23:44 +0100 | [diff] [blame] | 51 | blkpos = -1; |
| 52 | |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 53 | new = 0; |
| 54 | addr = 0; |
| 55 | tmp->size = htx->size; |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 56 | tmp->data = 0; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 57 | |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 58 | /* start from the head */ |
| 59 | for (old = htx_get_head(htx); old != -1; old = htx_get_next(htx, old)) { |
| 60 | oldblk = htx_get_blk(htx, old); |
Christopher Faulet | 28f29c7 | 2019-04-30 17:55:45 +0200 | [diff] [blame] | 61 | if (htx_get_blk_type(oldblk) == HTX_BLK_UNUSED) |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 62 | continue; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 63 | |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 64 | blksz = htx_get_blksz(oldblk); |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 65 | htx_memcpy((void *)tmp->blocks + addr, htx_get_blk_ptr(htx, oldblk), blksz); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 66 | |
Christopher Faulet | 9c66b98 | 2019-04-30 18:08:26 +0200 | [diff] [blame] | 67 | /* update the start-line position */ |
Christopher Faulet | 29f1758 | 2019-05-23 11:03:26 +0200 | [diff] [blame] | 68 | if (htx->first == old) |
| 69 | first = new; |
Christopher Faulet | 174bfb1 | 2018-12-06 14:31:12 +0100 | [diff] [blame] | 70 | |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 71 | newblk = htx_get_blk(tmp, new); |
| 72 | newblk->addr = addr; |
| 73 | newblk->info = oldblk->info; |
| 74 | |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 75 | /* if <blk> is defined, save its new position */ |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 76 | if (blk != NULL && blk == oldblk) { |
| 77 | if (blkinfo) |
| 78 | newblk->info = blkinfo; |
Christopher Faulet | 200f895 | 2019-01-02 11:23:44 +0100 | [diff] [blame] | 79 | blkpos = new; |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 80 | } |
Christopher Faulet | 200f895 | 2019-01-02 11:23:44 +0100 | [diff] [blame] | 81 | |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 82 | blksz = htx_get_blksz(newblk); |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 83 | addr += blksz; |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 84 | tmp->data += blksz; |
| 85 | new++; |
Christopher Faulet | b8fd4c0 | 2019-05-20 09:32:25 +0200 | [diff] [blame] | 86 | } |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 87 | |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 88 | htx->data = tmp->data; |
Christopher Faulet | 29f1758 | 2019-05-23 11:03:26 +0200 | [diff] [blame] | 89 | htx->first = first; |
Christopher Faulet | 28f29c7 | 2019-04-30 17:55:45 +0200 | [diff] [blame] | 90 | htx->head = 0; |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 91 | htx->tail = new - 1; |
| 92 | htx->head_addr = htx->end_addr = 0; |
| 93 | htx->tail_addr = addr; |
Christopher Faulet | 4697c92 | 2021-09-21 15:39:30 +0200 | [diff] [blame] | 94 | htx->flags &= ~HTX_FL_FRAGMENTED; |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 95 | htx_memcpy((void *)htx->blocks, (void *)tmp->blocks, htx->size); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 96 | |
Christopher Faulet | 200f895 | 2019-01-02 11:23:44 +0100 | [diff] [blame] | 97 | return ((blkpos == -1) ? NULL : htx_get_blk(htx, blkpos)); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 98 | } |
| 99 | |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 100 | /* Degragments HTX blocks of an HTX message. Payloads part is keep untouched |
| 101 | * here. This function will move back all blocks starting at the position 0, |
| 102 | * removing unused blocks. It must never be called with an empty message. |
| 103 | */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 104 | static void htx_defrag_blks(struct htx *htx) |
| 105 | { |
| 106 | int32_t pos, new; |
| 107 | |
| 108 | new = 0; |
| 109 | for (pos = htx_get_head(htx); pos != -1; pos = htx_get_next(htx, pos)) { |
| 110 | struct htx_blk *posblk, *newblk; |
| 111 | |
| 112 | if (pos == new) { |
| 113 | new++; |
| 114 | continue; |
| 115 | } |
| 116 | |
| 117 | posblk = htx_get_blk(htx, pos); |
| 118 | if (htx_get_blk_type(posblk) == HTX_BLK_UNUSED) |
| 119 | continue; |
| 120 | |
| 121 | if (htx->first == pos) |
| 122 | htx->first = new; |
| 123 | newblk = htx_get_blk(htx, new++); |
| 124 | newblk->info = posblk->info; |
| 125 | newblk->addr = posblk->addr; |
| 126 | } |
| 127 | BUG_ON(!new); |
| 128 | htx->head = 0; |
| 129 | htx->tail = new - 1; |
| 130 | } |
| 131 | |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 132 | /* Reserves a new block in the HTX message <htx> with a content of <blksz> |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 133 | * bytes. If there is not enough space, NULL is returned. Otherwise the reserved |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 134 | * block is returned and the HTX message is updated. Space for this new block is |
| 135 | * reserved in the HTX message. But it is the caller responsibility to set right |
| 136 | * info in the block to reflect the stored data. |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 137 | */ |
| 138 | static struct htx_blk *htx_reserve_nxblk(struct htx *htx, uint32_t blksz) |
| 139 | { |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 140 | struct htx_blk *blk; |
Christopher Faulet | 192c6a2 | 2019-06-11 16:32:24 +0200 | [diff] [blame] | 141 | uint32_t tail, headroom, tailroom; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 142 | |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 143 | if (blksz > htx_free_data_space(htx)) |
| 144 | return NULL; /* full */ |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 145 | |
Christopher Faulet | 192c6a2 | 2019-06-11 16:32:24 +0200 | [diff] [blame] | 146 | if (htx->head == -1) { |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 147 | /* Empty message */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 148 | htx->head = htx->tail = htx->first = 0; |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 149 | blk = htx_get_blk(htx, htx->tail); |
| 150 | blk->addr = 0; |
| 151 | htx->data = blksz; |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 152 | htx->tail_addr = blksz; |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 153 | return blk; |
| 154 | } |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 155 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 156 | /* Find the block's position. First, we try to get the next position in |
| 157 | * the message, increasing the tail by one. If this position is not |
| 158 | * available with some holes, we try to defrag the blocks without |
| 159 | * touching their paylood. If it is impossible, we fully defrag the |
| 160 | * message. |
| 161 | */ |
Christopher Faulet | 28f29c7 | 2019-04-30 17:55:45 +0200 | [diff] [blame] | 162 | tail = htx->tail + 1; |
Christopher Faulet | 2bf43f0 | 2019-06-12 11:28:11 +0200 | [diff] [blame] | 163 | if (htx_pos_to_addr(htx, tail) >= htx->tail_addr) |
Christopher Faulet | 192c6a2 | 2019-06-11 16:32:24 +0200 | [diff] [blame] | 164 | ; |
| 165 | else if (htx->head > 0) { |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 166 | htx_defrag_blks(htx); |
| 167 | tail = htx->tail + 1; |
Christopher Faulet | 2bf43f0 | 2019-06-12 11:28:11 +0200 | [diff] [blame] | 168 | BUG_ON(htx_pos_to_addr(htx, tail) < htx->tail_addr); |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 169 | } |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 170 | else |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 171 | goto defrag; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 172 | |
Thayne McCombs | 8f0cc5c | 2021-01-07 21:35:52 -0700 | [diff] [blame] | 173 | /* Now, we have found the block's position. Try to find where to put its |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 174 | * payload. The free space is split in two areas: |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 175 | * |
Thayne McCombs | 8f0cc5c | 2021-01-07 21:35:52 -0700 | [diff] [blame] | 176 | * * The free space in front of the blocks table. This one is used if and |
| 177 | * only if the other one was not used yet. |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 178 | * |
| 179 | * * The free space at the beginning of the message. Once this one is |
Thayne McCombs | 8f0cc5c | 2021-01-07 21:35:52 -0700 | [diff] [blame] | 180 | * used, the other one is never used again, until the next defrag. |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 181 | */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 182 | headroom = (htx->end_addr - htx->head_addr); |
Christopher Faulet | 2bf43f0 | 2019-06-12 11:28:11 +0200 | [diff] [blame] | 183 | tailroom = (!htx->head_addr ? htx_pos_to_addr(htx, tail) - htx->tail_addr : 0); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 184 | BUG_ON((int32_t)headroom < 0); |
| 185 | BUG_ON((int32_t)tailroom < 0); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 186 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 187 | if (blksz <= tailroom) { |
| 188 | blk = htx_get_blk(htx, tail); |
| 189 | blk->addr = htx->tail_addr; |
| 190 | htx->tail_addr += blksz; |
| 191 | } |
| 192 | else if (blksz <= headroom) { |
| 193 | blk = htx_get_blk(htx, tail); |
| 194 | blk->addr = htx->head_addr; |
| 195 | htx->head_addr += blksz; |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 196 | } |
| 197 | else { |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 198 | defrag: |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 199 | /* need to defragment the message before inserting upfront */ |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 200 | htx_defrag(htx, NULL, 0); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 201 | tail = htx->tail + 1; |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 202 | blk = htx_get_blk(htx, tail); |
| 203 | blk->addr = htx->tail_addr; |
| 204 | htx->tail_addr += blksz; |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 205 | } |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 206 | |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 207 | htx->tail = tail; |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 208 | htx->data += blksz; |
Christopher Faulet | 29f1758 | 2019-05-23 11:03:26 +0200 | [diff] [blame] | 209 | /* Set first position if not already set */ |
| 210 | if (htx->first == -1) |
| 211 | htx->first = tail; |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 212 | |
| 213 | BUG_ON((int32_t)htx->tail_addr < 0); |
| 214 | BUG_ON((int32_t)htx->head_addr < 0); |
| 215 | BUG_ON(htx->end_addr > htx->tail_addr); |
| 216 | BUG_ON(htx->head_addr > htx->end_addr); |
| 217 | |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 218 | return blk; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 219 | } |
| 220 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 221 | /* Prepares the block to an expansion of its payload. The payload will be |
| 222 | * expanded by <delta> bytes and we need find where this expansion will be |
| 223 | * performed. It can be a compression if <delta> is negative. This function only |
Ilya Shipitsin | 47d1718 | 2020-06-21 21:42:57 +0500 | [diff] [blame] | 224 | * updates all addresses. The caller have the responsibility to perform the |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 225 | * expansion and update the block and the HTX message accordingly. No error must |
Ilya Shipitsin | 47d1718 | 2020-06-21 21:42:57 +0500 | [diff] [blame] | 226 | * occur. It returns following values: |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 227 | * |
| 228 | * 0: The expansion cannot be performed, there is not enough space. |
| 229 | * |
Ilya Shipitsin | 47d1718 | 2020-06-21 21:42:57 +0500 | [diff] [blame] | 230 | * 1: the expansion must be performed in place, there is enough space after |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 231 | * the block's payload to handle it. This is especially true if it is a |
| 232 | * compression and not an expension. |
| 233 | * |
| 234 | * 2: the block's payload must be moved at the new block address before doing |
| 235 | * the expansion. |
| 236 | * |
| 237 | * 3: the HTX message message must be defragmented |
| 238 | */ |
| 239 | static int htx_prepare_blk_expansion(struct htx *htx, struct htx_blk *blk, int32_t delta) |
| 240 | { |
| 241 | uint32_t sz, tailroom, headroom; |
| 242 | int ret = 3; |
| 243 | |
Christopher Faulet | 192c6a2 | 2019-06-11 16:32:24 +0200 | [diff] [blame] | 244 | BUG_ON(htx->head == -1); |
| 245 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 246 | headroom = (htx->end_addr - htx->head_addr); |
Christopher Faulet | 2bf43f0 | 2019-06-12 11:28:11 +0200 | [diff] [blame] | 247 | tailroom = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 248 | BUG_ON((int32_t)headroom < 0); |
| 249 | BUG_ON((int32_t)tailroom < 0); |
| 250 | |
| 251 | sz = htx_get_blksz(blk); |
| 252 | if (delta <= 0) { |
| 253 | /* It is a compression, it can be performed in place */ |
| 254 | if (blk->addr+sz == htx->tail_addr) |
| 255 | htx->tail_addr += delta; |
| 256 | else if (blk->addr+sz == htx->head_addr) |
| 257 | htx->head_addr += delta; |
| 258 | ret = 1; |
| 259 | } |
| 260 | else if (delta > htx_free_space(htx)) { |
Ilya Shipitsin | 46a030c | 2020-07-05 16:36:08 +0500 | [diff] [blame] | 261 | /* There is not enough space to handle the expansion */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 262 | ret = 0; |
| 263 | } |
| 264 | else if (blk->addr+sz == htx->tail_addr) { |
| 265 | /* The block's payload is just before the tail room */ |
| 266 | if (delta < tailroom) { |
| 267 | /* Expand the block's payload */ |
| 268 | htx->tail_addr += delta; |
| 269 | ret = 1; |
| 270 | } |
| 271 | else if ((sz + delta) < headroom) { |
Christopher Faulet | 61ed779 | 2019-07-29 10:50:28 +0200 | [diff] [blame] | 272 | uint32_t oldaddr = blk->addr; |
| 273 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 274 | /* Move the block's payload into the headroom */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 275 | blk->addr = htx->head_addr; |
| 276 | htx->tail_addr -= sz; |
| 277 | htx->head_addr += sz + delta; |
Christopher Faulet | 61ed779 | 2019-07-29 10:50:28 +0200 | [diff] [blame] | 278 | if (oldaddr == htx->end_addr) { |
Christopher Faulet | 8c65486 | 2019-06-12 11:08:11 +0200 | [diff] [blame] | 279 | if (htx->end_addr == htx->tail_addr) { |
| 280 | htx->tail_addr = htx->head_addr; |
| 281 | htx->head_addr = htx->end_addr = 0; |
| 282 | } |
| 283 | else |
| 284 | htx->end_addr += sz; |
| 285 | } |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 286 | ret = 2; |
| 287 | } |
| 288 | } |
| 289 | else if (blk->addr+sz == htx->head_addr) { |
| 290 | /* The block's payload is just before the head room */ |
| 291 | if (delta < headroom) { |
| 292 | /* Expand the block's payload */ |
| 293 | htx->head_addr += delta; |
| 294 | ret = 1; |
| 295 | } |
| 296 | } |
| 297 | else { |
| 298 | /* The block's payload is not at the rooms edge */ |
| 299 | if (!htx->head_addr && sz+delta < tailroom) { |
| 300 | /* Move the block's payload into the tailroom */ |
| 301 | if (blk->addr == htx->end_addr) |
| 302 | htx->end_addr += sz; |
| 303 | blk->addr = htx->tail_addr; |
| 304 | htx->tail_addr += sz + delta; |
| 305 | ret = 2; |
| 306 | } |
| 307 | else if (sz+delta < headroom) { |
| 308 | /* Move the block's payload into the headroom */ |
| 309 | if (blk->addr == htx->end_addr) |
| 310 | htx->end_addr += sz; |
| 311 | blk->addr = htx->head_addr; |
| 312 | htx->head_addr += sz + delta; |
| 313 | ret = 2; |
| 314 | } |
| 315 | } |
| 316 | /* Otherwise defrag the HTX message */ |
| 317 | |
| 318 | BUG_ON((int32_t)htx->tail_addr < 0); |
| 319 | BUG_ON((int32_t)htx->head_addr < 0); |
| 320 | BUG_ON(htx->end_addr > htx->tail_addr); |
| 321 | BUG_ON(htx->head_addr > htx->end_addr); |
| 322 | return ret; |
| 323 | } |
| 324 | |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 325 | /* Adds a new block of type <type> in the HTX message <htx>. Its content size is |
| 326 | * passed but it is the caller responsibility to do the copy. |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 327 | */ |
| 328 | struct htx_blk *htx_add_blk(struct htx *htx, enum htx_blk_type type, uint32_t blksz) |
| 329 | { |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 330 | struct htx_blk *blk; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 331 | |
Willy Tarreau | 3d5f19e | 2021-08-26 16:07:22 +0200 | [diff] [blame] | 332 | BUG_ON(blksz >= 256 << 20); |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 333 | blk = htx_reserve_nxblk(htx, blksz); |
| 334 | if (!blk) |
| 335 | return NULL; |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 336 | BUG_ON(blk->addr > htx->size); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 337 | |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 338 | blk->info = (type << 28); |
| 339 | return blk; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 340 | } |
| 341 | |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 342 | /* Removes the block <blk> from the HTX message <htx>. The function returns the |
| 343 | * block following <blk> or NULL if <blk> is the last block or the last inserted |
| 344 | * one. |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 345 | */ |
| 346 | struct htx_blk *htx_remove_blk(struct htx *htx, struct htx_blk *blk) |
| 347 | { |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 348 | enum htx_blk_type type; |
| 349 | uint32_t pos, addr, sz; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 350 | |
Christopher Faulet | 234a10a | 2022-02-28 15:29:56 +0100 | [diff] [blame] | 351 | BUG_ON(!blk || htx->head == -1); |
Christopher Faulet | 192c6a2 | 2019-06-11 16:32:24 +0200 | [diff] [blame] | 352 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 353 | /* This is the last block in use */ |
Christopher Faulet | 192c6a2 | 2019-06-11 16:32:24 +0200 | [diff] [blame] | 354 | if (htx->head == htx->tail) { |
Christopher Faulet | 4697c92 | 2021-09-21 15:39:30 +0200 | [diff] [blame] | 355 | uint32_t flags = (htx->flags & ~HTX_FL_FRAGMENTED); /* Preserve flags except FRAGMENTED */ |
Christopher Faulet | d1ac2b9 | 2020-12-02 19:12:22 +0100 | [diff] [blame] | 356 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 357 | htx_reset(htx); |
Christopher Faulet | 4697c92 | 2021-09-21 15:39:30 +0200 | [diff] [blame] | 358 | htx->flags = flags; /* restore flags */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 359 | return NULL; |
| 360 | } |
| 361 | |
| 362 | type = htx_get_blk_type(blk); |
Christopher Faulet | 9c66b98 | 2019-04-30 18:08:26 +0200 | [diff] [blame] | 363 | pos = htx_get_blk_pos(htx, blk); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 364 | sz = htx_get_blksz(blk); |
| 365 | addr = blk->addr; |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 366 | if (type != HTX_BLK_UNUSED) { |
| 367 | /* Mark the block as unused, decrement allocated size */ |
| 368 | htx->data -= htx_get_blksz(blk); |
| 369 | blk->info = ((uint32_t)HTX_BLK_UNUSED << 28); |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 370 | } |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 371 | |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 372 | /* There is at least 2 blocks, so tail is always > 0 */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 373 | if (pos == htx->head) { |
| 374 | /* move the head forward */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 375 | htx->head++; |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 376 | } |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 377 | else if (pos == htx->tail) { |
| 378 | /* remove the tail. this was the last inserted block so |
| 379 | * return NULL. */ |
| 380 | htx->tail--; |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 381 | blk = NULL; |
| 382 | goto end; |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 383 | } |
Christopher Faulet | 4697c92 | 2021-09-21 15:39:30 +0200 | [diff] [blame] | 384 | else |
| 385 | htx->flags |= HTX_FL_FRAGMENTED; |
| 386 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 387 | blk = htx_get_blk(htx, pos+1); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 388 | |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 389 | end: |
Christopher Faulet | 29f1758 | 2019-05-23 11:03:26 +0200 | [diff] [blame] | 390 | if (pos == htx->first) |
| 391 | htx->first = (blk ? htx_get_blk_pos(htx, blk) : -1); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 392 | |
Christopher Faulet | 192c6a2 | 2019-06-11 16:32:24 +0200 | [diff] [blame] | 393 | if (htx->head == htx->tail) { |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 394 | /* If there is just one block in the HTX message, free space can |
Ilya Shipitsin | 47d1718 | 2020-06-21 21:42:57 +0500 | [diff] [blame] | 395 | * be adjusted. This operation could save some defrags. */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 396 | struct htx_blk *lastblk = htx_get_blk(htx, htx->tail); |
| 397 | |
| 398 | htx->head_addr = 0; |
| 399 | htx->end_addr = lastblk->addr; |
| 400 | htx->tail_addr = lastblk->addr+htx->data; |
| 401 | } |
| 402 | else { |
| 403 | if (addr+sz == htx->tail_addr) |
| 404 | htx->tail_addr = addr; |
| 405 | else if (addr+sz == htx->head_addr) |
| 406 | htx->head_addr = addr; |
Christopher Faulet | 8c65486 | 2019-06-12 11:08:11 +0200 | [diff] [blame] | 407 | if (addr == htx->end_addr) { |
| 408 | if (htx->tail_addr == htx->end_addr) { |
| 409 | htx->tail_addr = htx->head_addr; |
| 410 | htx->head_addr = htx->end_addr = 0; |
| 411 | } |
| 412 | else |
| 413 | htx->end_addr += sz; |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 414 | } |
| 415 | } |
| 416 | |
| 417 | BUG_ON((int32_t)htx->tail_addr < 0); |
| 418 | BUG_ON((int32_t)htx->head_addr < 0); |
| 419 | BUG_ON(htx->end_addr > htx->tail_addr); |
| 420 | BUG_ON(htx->head_addr > htx->end_addr); |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 421 | return blk; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 422 | } |
| 423 | |
Christopher Faulet | 1cdceb9 | 2020-02-24 11:41:59 +0100 | [diff] [blame] | 424 | /* Looks for the HTX block containing the offset <offset>, starting at the HTX |
| 425 | * message's head. The function returns an htx_ret with the found HTX block and |
| 426 | * the position inside this block where the offset is. If the offset <offset> is |
Ilya Shipitsin | 47d1718 | 2020-06-21 21:42:57 +0500 | [diff] [blame] | 427 | * outside of the HTX message, htx_ret.blk is set to NULL. |
Christopher Faulet | 1cdceb9 | 2020-02-24 11:41:59 +0100 | [diff] [blame] | 428 | */ |
| 429 | struct htx_ret htx_find_offset(struct htx *htx, uint32_t offset) |
| 430 | { |
| 431 | struct htx_blk *blk; |
| 432 | struct htx_ret htxret = { .blk = NULL, .ret = 0 }; |
| 433 | |
| 434 | if (offset >= htx->data) |
| 435 | return htxret; |
| 436 | |
| 437 | for (blk = htx_get_head_blk(htx); blk && offset; blk = htx_get_next_blk(htx, blk)) { |
| 438 | uint32_t sz = htx_get_blksz(blk); |
| 439 | |
| 440 | if (offset < sz) |
| 441 | break; |
| 442 | offset -= sz; |
| 443 | } |
| 444 | htxret.blk = blk; |
| 445 | htxret.ret = offset; |
| 446 | return htxret; |
| 447 | } |
| 448 | |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 449 | /* Removes all blocks after the one containing the offset <offset>. This last |
| 450 | * one may be truncated if it is a DATA block. |
Christopher Faulet | 00cf697 | 2019-01-07 14:53:27 +0100 | [diff] [blame] | 451 | */ |
| 452 | void htx_truncate(struct htx *htx, uint32_t offset) |
| 453 | { |
| 454 | struct htx_blk *blk; |
Christopher Faulet | bb76aa4 | 2020-02-24 15:09:24 +0100 | [diff] [blame] | 455 | struct htx_ret htxret = htx_find_offset(htx, offset); |
Christopher Faulet | 00cf697 | 2019-01-07 14:53:27 +0100 | [diff] [blame] | 456 | |
Christopher Faulet | bb76aa4 | 2020-02-24 15:09:24 +0100 | [diff] [blame] | 457 | blk = htxret.blk; |
| 458 | if (blk && htxret.ret && htx_get_blk_type(blk) == HTX_BLK_DATA) { |
| 459 | htx_change_blk_value_len(htx, blk, htxret.ret); |
| 460 | blk = htx_get_next_blk(htx, blk); |
Christopher Faulet | 00cf697 | 2019-01-07 14:53:27 +0100 | [diff] [blame] | 461 | } |
| 462 | while (blk) |
| 463 | blk = htx_remove_blk(htx, blk); |
| 464 | } |
| 465 | |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 466 | /* Drains <count> bytes from the HTX message <htx>. If the last block is a DATA |
| 467 | * block, it will be cut if necessary. Others blocks will be removed at once if |
| 468 | * <count> is large enough. The function returns an htx_ret with the first block |
Ilya Shipitsin | 47d1718 | 2020-06-21 21:42:57 +0500 | [diff] [blame] | 469 | * remaining in the message and the amount of data drained. If everything is |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 470 | * removed, htx_ret.blk is set to NULL. |
Christopher Faulet | 549822f | 2019-02-25 10:23:19 +0100 | [diff] [blame] | 471 | */ |
| 472 | struct htx_ret htx_drain(struct htx *htx, uint32_t count) |
| 473 | { |
| 474 | struct htx_blk *blk; |
| 475 | struct htx_ret htxret = { .blk = NULL, .ret = 0 }; |
| 476 | |
Christopher Faulet | 0f6d6a9 | 2019-05-23 11:11:52 +0200 | [diff] [blame] | 477 | if (count == htx->data) { |
Christopher Faulet | 4697c92 | 2021-09-21 15:39:30 +0200 | [diff] [blame] | 478 | uint32_t flags = (htx->flags & ~HTX_FL_FRAGMENTED); /* Preserve flags except FRAGMENTED */ |
Christopher Faulet | 5e9b24f | 2021-04-22 09:43:47 +0200 | [diff] [blame] | 479 | |
Christopher Faulet | 0f6d6a9 | 2019-05-23 11:11:52 +0200 | [diff] [blame] | 480 | htx_reset(htx); |
Christopher Faulet | 5e9b24f | 2021-04-22 09:43:47 +0200 | [diff] [blame] | 481 | htx->flags = flags; /* restore flags */ |
Christopher Faulet | 0f6d6a9 | 2019-05-23 11:11:52 +0200 | [diff] [blame] | 482 | htxret.ret = count; |
| 483 | return htxret; |
| 484 | } |
| 485 | |
Christopher Faulet | 549822f | 2019-02-25 10:23:19 +0100 | [diff] [blame] | 486 | blk = htx_get_head_blk(htx); |
| 487 | while (count && blk) { |
| 488 | uint32_t sz = htx_get_blksz(blk); |
| 489 | enum htx_blk_type type = htx_get_blk_type(blk); |
| 490 | |
Ilya Shipitsin | 47d1718 | 2020-06-21 21:42:57 +0500 | [diff] [blame] | 491 | /* Ignore unused block */ |
Christopher Faulet | 549822f | 2019-02-25 10:23:19 +0100 | [diff] [blame] | 492 | if (type == HTX_BLK_UNUSED) |
| 493 | goto next; |
| 494 | |
| 495 | if (sz > count) { |
| 496 | if (type == HTX_BLK_DATA) { |
| 497 | htx_cut_data_blk(htx, blk, count); |
| 498 | htxret.ret += count; |
| 499 | } |
| 500 | break; |
| 501 | } |
| 502 | count -= sz; |
| 503 | htxret.ret += sz; |
| 504 | next: |
| 505 | blk = htx_remove_blk(htx, blk); |
| 506 | } |
| 507 | htxret.blk = blk; |
| 508 | |
| 509 | return htxret; |
| 510 | } |
| 511 | |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 512 | /* Tries to append data to the last inserted block, if the type matches and if |
Willy Tarreau | d4908fa | 2019-05-28 10:23:46 +0200 | [diff] [blame] | 513 | * there is enough space to take it all. If the space wraps, the buffer is |
| 514 | * defragmented and a new block is inserted. If an error occurred, NULL is |
Christopher Faulet | 6177509 | 2019-05-07 21:42:27 +0200 | [diff] [blame] | 515 | * returned. Otherwise, on success, the updated block (or the new one) is |
Willy Tarreau | d4908fa | 2019-05-28 10:23:46 +0200 | [diff] [blame] | 516 | * returned. Due to its nature this function can be expensive and should be |
| 517 | * avoided whenever possible. |
| 518 | */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 519 | struct htx_blk *htx_add_data_atonce(struct htx *htx, struct ist data) |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 520 | { |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 521 | struct htx_blk *blk, *tailblk; |
| 522 | void *ptr; |
| 523 | uint32_t len, sz, tailroom, headroom; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 524 | |
Christopher Faulet | 192c6a2 | 2019-06-11 16:32:24 +0200 | [diff] [blame] | 525 | if (htx->head == -1) |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 526 | goto add_new_block; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 527 | |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 528 | /* Not enough space to store data */ |
| 529 | if (data.len > htx_free_data_space(htx)) |
| 530 | return NULL; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 531 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 532 | /* get the tail block and its size */ |
Christopher Faulet | f1449b7 | 2019-04-10 14:54:46 +0200 | [diff] [blame] | 533 | tailblk = htx_get_tail_blk(htx); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 534 | if (tailblk == NULL) |
Christopher Faulet | f1449b7 | 2019-04-10 14:54:46 +0200 | [diff] [blame] | 535 | goto add_new_block; |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 536 | sz = htx_get_blksz(tailblk); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 537 | |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 538 | /* Don't try to append data if the last inserted block is not of the |
| 539 | * same type */ |
Willy Tarreau | d4908fa | 2019-05-28 10:23:46 +0200 | [diff] [blame] | 540 | if (htx_get_blk_type(tailblk) != HTX_BLK_DATA) |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 541 | goto add_new_block; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 542 | |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 543 | /* |
| 544 | * Same type and enough space: append data |
| 545 | */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 546 | headroom = (htx->end_addr - htx->head_addr); |
Christopher Faulet | 2bf43f0 | 2019-06-12 11:28:11 +0200 | [diff] [blame] | 547 | tailroom = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 548 | BUG_ON((int32_t)headroom < 0); |
| 549 | BUG_ON((int32_t)tailroom < 0); |
| 550 | |
| 551 | len = data.len; |
| 552 | if (tailblk->addr+sz == htx->tail_addr) { |
| 553 | if (data.len <= tailroom) |
| 554 | goto append_data; |
| 555 | else if (!htx->head_addr) { |
| 556 | len = tailroom; |
| 557 | goto append_data; |
| 558 | } |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 559 | } |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 560 | else if (tailblk->addr+sz == htx->head_addr && data.len <= headroom) |
| 561 | goto append_data; |
Christopher Faulet | f1449b7 | 2019-04-10 14:54:46 +0200 | [diff] [blame] | 562 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 563 | goto add_new_block; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 564 | |
| 565 | append_data: |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 566 | /* Append data and update the block itself */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 567 | ptr = htx_get_blk_ptr(htx, tailblk); |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 568 | htx_memcpy(ptr+sz, data.ptr, len); |
Christopher Faulet | 3e2638e | 2019-06-18 09:49:16 +0200 | [diff] [blame] | 569 | htx_change_blk_value_len(htx, tailblk, sz+len); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 570 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 571 | if (data.len == len) { |
| 572 | blk = tailblk; |
| 573 | goto end; |
| 574 | } |
Tim Duesterhus | 154374c | 2021-03-02 18:57:27 +0100 | [diff] [blame] | 575 | data = istadv(data, len); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 576 | |
| 577 | add_new_block: |
Willy Tarreau | d4908fa | 2019-05-28 10:23:46 +0200 | [diff] [blame] | 578 | blk = htx_add_blk(htx, HTX_BLK_DATA, data.len); |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 579 | if (!blk) |
| 580 | return NULL; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 581 | |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 582 | blk->info += data.len; |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 583 | htx_memcpy(htx_get_blk_ptr(htx, blk), data.ptr, data.len); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 584 | |
| 585 | end: |
| 586 | BUG_ON((int32_t)htx->tail_addr < 0); |
| 587 | BUG_ON((int32_t)htx->head_addr < 0); |
| 588 | BUG_ON(htx->end_addr > htx->tail_addr); |
| 589 | BUG_ON(htx->head_addr > htx->end_addr); |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 590 | return blk; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 591 | } |
| 592 | |
| 593 | /* Replaces a value part of a block by a new one. The new part can be smaller or |
| 594 | * larger than the old one. This function works for any kind of block with |
| 595 | * attached data. It returns the new block on success, otherwise it returns |
| 596 | * NULL. |
| 597 | */ |
| 598 | struct htx_blk *htx_replace_blk_value(struct htx *htx, struct htx_blk *blk, |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 599 | const struct ist old, const struct ist new) |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 600 | { |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 601 | struct ist n, v; |
Christopher Faulet | e97f3ba | 2018-12-10 15:39:40 +0100 | [diff] [blame] | 602 | int32_t delta; |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 603 | int ret; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 604 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 605 | n = htx_get_blk_name(htx, blk); |
| 606 | v = htx_get_blk_value(htx, blk); |
Christopher Faulet | e97f3ba | 2018-12-10 15:39:40 +0100 | [diff] [blame] | 607 | delta = new.len - old.len; |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 608 | ret = htx_prepare_blk_expansion(htx, blk, delta); |
| 609 | if (!ret) |
| 610 | return NULL; /* not enough space */ |
Christopher Faulet | e97f3ba | 2018-12-10 15:39:40 +0100 | [diff] [blame] | 611 | |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 612 | if (ret == 1) { /* Replace in place */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 613 | if (delta <= 0) { |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 614 | /* compression: copy new data first then move the end */ |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 615 | htx_memcpy(old.ptr, new.ptr, new.len); |
Tim Duesterhus | 4c8f75f | 2021-11-06 15:14:44 +0100 | [diff] [blame] | 616 | memmove(old.ptr + new.len, istend(old), |
| 617 | istend(v) - istend(old)); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 618 | } |
| 619 | else { |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 620 | /* expansion: move the end first then copy new data */ |
Tim Duesterhus | 4c8f75f | 2021-11-06 15:14:44 +0100 | [diff] [blame] | 621 | memmove(old.ptr + new.len, istend(old), |
| 622 | istend(v) - istend(old)); |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 623 | htx_memcpy(old.ptr, new.ptr, new.len); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 624 | } |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 625 | |
| 626 | /* set the new block size and update HTX message */ |
| 627 | htx_set_blk_value_len(blk, v.len + delta); |
| 628 | htx->data += delta; |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 629 | } |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 630 | else if (ret == 2) { /* New address but no defrag */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 631 | void *ptr = htx_get_blk_ptr(htx, blk); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 632 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 633 | /* Copy the name, if any */ |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 634 | htx_memcpy(ptr, n.ptr, n.len); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 635 | ptr += n.len; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 636 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 637 | /* Copy value before old part, if any */ |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 638 | htx_memcpy(ptr, v.ptr, old.ptr - v.ptr); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 639 | ptr += old.ptr - v.ptr; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 640 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 641 | /* Copy new value */ |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 642 | htx_memcpy(ptr, new.ptr, new.len); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 643 | ptr += new.len; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 644 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 645 | /* Copy value after old part, if any */ |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 646 | htx_memcpy(ptr, istend(old), istend(v) - istend(old)); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 647 | |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 648 | /* set the new block size and update HTX message */ |
| 649 | htx_set_blk_value_len(blk, v.len + delta); |
| 650 | htx->data += delta; |
| 651 | } |
| 652 | else { /* Do a degrag first (it is always an expansion) */ |
| 653 | struct htx_blk tmpblk; |
| 654 | int32_t offset; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 655 | |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 656 | /* use tmpblk to set new block size before defrag and to compute |
| 657 | * the offset after defrag |
| 658 | */ |
| 659 | tmpblk.addr = blk->addr; |
| 660 | tmpblk.info = blk->info; |
| 661 | htx_set_blk_value_len(&tmpblk, v.len + delta); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 662 | |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 663 | /* htx_defrag() will take care to update the block size and the htx message */ |
| 664 | blk = htx_defrag(htx, blk, tmpblk.info); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 665 | |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 666 | /* newblk is now the new HTX block. Compute the offset to copy/move payload */ |
| 667 | offset = blk->addr - tmpblk.addr; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 668 | |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 669 | /* move the end first and copy new data |
| 670 | */ |
Tim Duesterhus | 4c8f75f | 2021-11-06 15:14:44 +0100 | [diff] [blame] | 671 | memmove(old.ptr + offset + new.len, old.ptr + offset + old.len, |
| 672 | istend(v) - istend(old)); |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 673 | htx_memcpy(old.ptr + offset, new.ptr, new.len); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 674 | } |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 675 | return blk; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 676 | } |
| 677 | |
| 678 | /* Transfer HTX blocks from <src> to <dst>, stopping on the first block of the |
Christopher Faulet | d1ac2b9 | 2020-12-02 19:12:22 +0100 | [diff] [blame] | 679 | * type <mark> (typically EOH or EOT) or when <count> bytes were moved |
Christopher Faulet | 156852b | 2019-05-16 11:29:13 +0200 | [diff] [blame] | 680 | * (including payload and meta-data). It returns the number of bytes moved and |
| 681 | * the last HTX block inserted in <dst>. |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 682 | */ |
| 683 | struct htx_ret htx_xfer_blks(struct htx *dst, struct htx *src, uint32_t count, |
| 684 | enum htx_blk_type mark) |
| 685 | { |
| 686 | struct htx_blk *blk, *dstblk; |
Christopher Faulet | c92ec0b | 2021-04-22 09:45:18 +0200 | [diff] [blame] | 687 | struct htx_blk *srcref, *dstref; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 688 | enum htx_blk_type type; |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 689 | uint32_t info, max, sz, ret; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 690 | |
Christopher Faulet | 156852b | 2019-05-16 11:29:13 +0200 | [diff] [blame] | 691 | ret = htx_used_space(dst); |
Christopher Faulet | c92ec0b | 2021-04-22 09:45:18 +0200 | [diff] [blame] | 692 | srcref = dstref = dstblk = NULL; |
Christopher Faulet | 156852b | 2019-05-16 11:29:13 +0200 | [diff] [blame] | 693 | |
Christopher Faulet | c92ec0b | 2021-04-22 09:45:18 +0200 | [diff] [blame] | 694 | /* blocks are not removed yet from <src> HTX message to be able to |
| 695 | * rollback the transfer if all the headers/trailers are not copied. |
| 696 | */ |
| 697 | for (blk = htx_get_head_blk(src); blk && count; blk = htx_get_next_blk(src, blk)) { |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 698 | type = htx_get_blk_type(blk); |
| 699 | |
Ilya Shipitsin | 47d1718 | 2020-06-21 21:42:57 +0500 | [diff] [blame] | 700 | /* Ignore unused block */ |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 701 | if (type == HTX_BLK_UNUSED) |
Christopher Faulet | c92ec0b | 2021-04-22 09:45:18 +0200 | [diff] [blame] | 702 | continue; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 703 | |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 704 | |
Christopher Faulet | 156852b | 2019-05-16 11:29:13 +0200 | [diff] [blame] | 705 | max = htx_get_max_blksz(dst, count); |
| 706 | if (!max) |
| 707 | break; |
Christopher Faulet | c92ec0b | 2021-04-22 09:45:18 +0200 | [diff] [blame] | 708 | |
| 709 | sz = htx_get_blksz(blk); |
| 710 | info = blk->info; |
Willy Tarreau | 90caa07 | 2019-04-09 16:21:54 +0200 | [diff] [blame] | 711 | if (sz > max) { |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 712 | /* Only DATA blocks can be partially xferred */ |
Christopher Faulet | 156852b | 2019-05-16 11:29:13 +0200 | [diff] [blame] | 713 | if (type != HTX_BLK_DATA) |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 714 | break; |
Christopher Faulet | 156852b | 2019-05-16 11:29:13 +0200 | [diff] [blame] | 715 | sz = max; |
| 716 | info = (type << 28) + sz; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 717 | } |
| 718 | |
| 719 | dstblk = htx_reserve_nxblk(dst, sz); |
| 720 | if (!dstblk) |
| 721 | break; |
| 722 | dstblk->info = info; |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 723 | htx_memcpy(htx_get_blk_ptr(dst, dstblk), htx_get_blk_ptr(src, blk), sz); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 724 | |
Christopher Faulet | 156852b | 2019-05-16 11:29:13 +0200 | [diff] [blame] | 725 | count -= sizeof(dstblk) + sz; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 726 | if (blk->info != info) { |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 727 | /* Partial xfer: don't remove <blk> from <src> but |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 728 | * resize its content */ |
Christopher Faulet | 156852b | 2019-05-16 11:29:13 +0200 | [diff] [blame] | 729 | htx_cut_data_blk(src, blk, sz); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 730 | break; |
| 731 | } |
Christopher Faulet | c92ec0b | 2021-04-22 09:45:18 +0200 | [diff] [blame] | 732 | |
| 733 | if (type == mark) { |
| 734 | blk = htx_get_next_blk(src, blk); |
| 735 | srcref = dstref = NULL; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 736 | break; |
Christopher Faulet | c92ec0b | 2021-04-22 09:45:18 +0200 | [diff] [blame] | 737 | } |
| 738 | |
| 739 | /* Save <blk> to <srcref> and <dstblk> to <dstref> when we start |
| 740 | * to xfer headers or trailers. When EOH/EOT block is reached, |
| 741 | * both are reset. It is mandatory to be able to rollback a |
| 742 | * partial transfer. |
| 743 | */ |
| 744 | if (!srcref && !dstref && |
| 745 | (type == HTX_BLK_REQ_SL || type == HTX_BLK_RES_SL || type == HTX_BLK_TLR)) { |
| 746 | srcref = blk; |
| 747 | dstref = dstblk; |
| 748 | } |
| 749 | else if (type == HTX_BLK_EOH || type == HTX_BLK_EOT) |
| 750 | srcref = dstref = NULL; |
| 751 | } |
| 752 | |
| 753 | if (unlikely(dstref)) { |
Christopher Faulet | 234a10a | 2022-02-28 15:29:56 +0100 | [diff] [blame] | 754 | /* Headers or trailers part was partially xferred, so rollback |
| 755 | * the copy by removing all block between <dstref> and <dstblk>, |
| 756 | * both included. <dstblk> may be NULL. |
Christopher Faulet | c92ec0b | 2021-04-22 09:45:18 +0200 | [diff] [blame] | 757 | */ |
| 758 | while (dstref && dstref != dstblk) |
| 759 | dstref = htx_remove_blk(dst, dstref); |
Christopher Faulet | 234a10a | 2022-02-28 15:29:56 +0100 | [diff] [blame] | 760 | if (dstblk) |
| 761 | htx_remove_blk(dst, dstblk); |
Christopher Faulet | c92ec0b | 2021-04-22 09:45:18 +0200 | [diff] [blame] | 762 | |
| 763 | /* <dst> HTX message is empty, it means the headers or trailers |
| 764 | * part is too big to be copied at once. |
| 765 | */ |
| 766 | if (htx_is_empty(dst)) |
| 767 | src->flags |= HTX_FL_PARSING_ERROR; |
| 768 | } |
| 769 | |
| 770 | /* Now, remove xferred blocks from <src> htx message */ |
| 771 | if (!blk && !srcref) { |
| 772 | /* End of src reached, all blocks were consumed, drain all data */ |
| 773 | htx_drain(src, src->data); |
| 774 | } |
| 775 | else { |
| 776 | /* Remove all block from the head to <blk>, or <srcref> if defined, excluded */ |
| 777 | srcref = (srcref ? srcref : blk); |
| 778 | for (blk = htx_get_head_blk(src); blk && blk != srcref; blk = htx_remove_blk(src, blk)); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 779 | } |
| 780 | |
Christopher Faulet | 156852b | 2019-05-16 11:29:13 +0200 | [diff] [blame] | 781 | end: |
| 782 | ret = htx_used_space(dst) - ret; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 783 | return (struct htx_ret){.ret = ret, .blk = dstblk}; |
| 784 | } |
| 785 | |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 786 | /* Replaces an header by a new one. The new header can be smaller or larger than |
| 787 | * the old one. It returns the new block on success, otherwise it returns NULL. |
Willy Tarreau | ed00e34 | 2018-12-07 08:47:45 +0100 | [diff] [blame] | 788 | * The header name is always lower cased. |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 789 | */ |
| 790 | struct htx_blk *htx_replace_header(struct htx *htx, struct htx_blk *blk, |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 791 | const struct ist name, const struct ist value) |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 792 | { |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 793 | enum htx_blk_type type; |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 794 | void *ptr; |
Christopher Faulet | e97f3ba | 2018-12-10 15:39:40 +0100 | [diff] [blame] | 795 | int32_t delta; |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 796 | int ret; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 797 | |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 798 | type = htx_get_blk_type(blk); |
| 799 | if (type != HTX_BLK_HDR) |
| 800 | return NULL; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 801 | |
Christopher Faulet | e97f3ba | 2018-12-10 15:39:40 +0100 | [diff] [blame] | 802 | delta = name.len + value.len - htx_get_blksz(blk); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 803 | ret = htx_prepare_blk_expansion(htx, blk, delta); |
| 804 | if (!ret) |
Christopher Faulet | e97f3ba | 2018-12-10 15:39:40 +0100 | [diff] [blame] | 805 | return NULL; /* not enough space */ |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 806 | |
Christopher Faulet | e97f3ba | 2018-12-10 15:39:40 +0100 | [diff] [blame] | 807 | |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 808 | /* Replace in place or at a new address is the same. We replace all the |
| 809 | * header (name+value). Only take care to defrag the message if |
| 810 | * necessary. */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 811 | if (ret == 3) |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 812 | blk = htx_defrag(htx, blk, (type << 28) + (value.len << 8) + name.len); |
| 813 | else { |
| 814 | /* Set the new block size and update HTX message */ |
| 815 | blk->info = (type << 28) + (value.len << 8) + name.len; |
| 816 | htx->data += delta; |
| 817 | } |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 818 | |
Ilya Shipitsin | 47d1718 | 2020-06-21 21:42:57 +0500 | [diff] [blame] | 819 | /* Finally, copy data. */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 820 | ptr = htx_get_blk_ptr(htx, blk); |
| 821 | ist2bin_lc(ptr, name); |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 822 | htx_memcpy(ptr + name.len, value.ptr, value.len); |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 823 | return blk; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 824 | } |
| 825 | |
Christopher Faulet | f1ba18d | 2018-11-26 21:37:08 +0100 | [diff] [blame] | 826 | /* Replaces the parts of the start-line. It returns the new start-line on |
| 827 | * success, otherwise it returns NULL. It is the caller responsibility to update |
| 828 | * sl->info, if necessary. |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 829 | */ |
Christopher Faulet | f1ba18d | 2018-11-26 21:37:08 +0100 | [diff] [blame] | 830 | struct htx_sl *htx_replace_stline(struct htx *htx, struct htx_blk *blk, const struct ist p1, |
| 831 | const struct ist p2, const struct ist p3) |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 832 | { |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 833 | enum htx_blk_type type; |
Christopher Faulet | f1ba18d | 2018-11-26 21:37:08 +0100 | [diff] [blame] | 834 | struct htx_sl *sl; |
| 835 | struct htx_sl tmp; /* used to save sl->info and sl->flags */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 836 | uint32_t sz; |
Christopher Faulet | e97f3ba | 2018-12-10 15:39:40 +0100 | [diff] [blame] | 837 | int32_t delta; |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 838 | int ret; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 839 | |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 840 | type = htx_get_blk_type(blk); |
Willy Tarreau | c706cd7 | 2018-12-07 17:12:22 +0100 | [diff] [blame] | 841 | if (type != HTX_BLK_REQ_SL && type != HTX_BLK_RES_SL) |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 842 | return NULL; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 843 | |
Christopher Faulet | f1ba18d | 2018-11-26 21:37:08 +0100 | [diff] [blame] | 844 | /* Save start-line info and flags */ |
| 845 | sl = htx_get_blk_ptr(htx, blk); |
| 846 | tmp.info = sl->info; |
| 847 | tmp.flags = sl->flags; |
Christopher Faulet | f1ba18d | 2018-11-26 21:37:08 +0100 | [diff] [blame] | 848 | |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 849 | sz = htx_get_blksz(blk); |
| 850 | delta = sizeof(*sl) + p1.len + p2.len + p3.len - sz; |
| 851 | ret = htx_prepare_blk_expansion(htx, blk, delta); |
| 852 | if (!ret) |
Christopher Faulet | e97f3ba | 2018-12-10 15:39:40 +0100 | [diff] [blame] | 853 | return NULL; /* not enough space */ |
| 854 | |
Christopher Faulet | 3b21972 | 2019-06-19 13:48:09 +0200 | [diff] [blame] | 855 | /* Replace in place or at a new address is the same. We replace all the |
| 856 | * start-line. Only take care to defrag the message if necessary. */ |
Christopher Faulet | 1cf414b | 2021-06-09 17:30:40 +0200 | [diff] [blame] | 857 | if (ret == 3) { |
| 858 | blk = htx_defrag(htx, blk, (type << 28) + sz + delta); |
| 859 | } |
| 860 | else { |
| 861 | /* Set the new block size and update HTX message */ |
| 862 | blk->info = (type << 28) + sz + delta; |
| 863 | htx->data += delta; |
| 864 | } |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 865 | |
Christopher Faulet | e97f3ba | 2018-12-10 15:39:40 +0100 | [diff] [blame] | 866 | /* Restore start-line info and flags and copy parts of the start-line */ |
Christopher Faulet | f1ba18d | 2018-11-26 21:37:08 +0100 | [diff] [blame] | 867 | sl = htx_get_blk_ptr(htx, blk); |
| 868 | sl->info = tmp.info; |
| 869 | sl->flags = tmp.flags; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 870 | |
Christopher Faulet | f1ba18d | 2018-11-26 21:37:08 +0100 | [diff] [blame] | 871 | HTX_SL_P1_LEN(sl) = p1.len; |
| 872 | HTX_SL_P2_LEN(sl) = p2.len; |
| 873 | HTX_SL_P3_LEN(sl) = p3.len; |
Christopher Faulet | 54483df | 2018-11-26 15:05:52 +0100 | [diff] [blame] | 874 | |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 875 | htx_memcpy(HTX_SL_P1_PTR(sl), p1.ptr, p1.len); |
| 876 | htx_memcpy(HTX_SL_P2_PTR(sl), p2.ptr, p2.len); |
| 877 | htx_memcpy(HTX_SL_P3_PTR(sl), p3.ptr, p3.len); |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 878 | |
Christopher Faulet | f1ba18d | 2018-11-26 21:37:08 +0100 | [diff] [blame] | 879 | return sl; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 880 | } |
| 881 | |
Christopher Faulet | e071f0e | 2021-02-03 12:11:31 +0100 | [diff] [blame] | 882 | /* Reserves the maximum possible size for an HTX data block, by extending an |
| 883 | * existing one or by creating a now one. It returns a compound result with the |
| 884 | * HTX block and the position where new data must be inserted (0 for a new |
| 885 | * block). If an error occurs or if there is no space left, NULL is returned |
| 886 | * instead of a pointer on an HTX block. |
| 887 | */ |
| 888 | struct htx_ret htx_reserve_max_data(struct htx *htx) |
| 889 | { |
| 890 | struct htx_blk *blk, *tailblk; |
| 891 | uint32_t sz, room; |
| 892 | int32_t len = htx_free_data_space(htx); |
| 893 | |
| 894 | if (htx->head == -1) |
| 895 | goto rsv_new_block; |
| 896 | |
| 897 | if (!len) |
| 898 | return (struct htx_ret){.ret = 0, .blk = NULL}; |
| 899 | |
| 900 | /* get the tail and head block */ |
| 901 | tailblk = htx_get_tail_blk(htx); |
| 902 | if (tailblk == NULL) |
| 903 | goto rsv_new_block; |
| 904 | sz = htx_get_blksz(tailblk); |
| 905 | |
| 906 | /* Don't try to append data if the last inserted block is not of the |
| 907 | * same type */ |
| 908 | if (htx_get_blk_type(tailblk) != HTX_BLK_DATA) |
| 909 | goto rsv_new_block; |
| 910 | |
| 911 | /* |
| 912 | * Same type and enough space: append data |
| 913 | */ |
| 914 | if (!htx->head_addr) { |
| 915 | if (tailblk->addr+sz != htx->tail_addr) |
| 916 | goto rsv_new_block; |
| 917 | room = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr); |
| 918 | } |
| 919 | else { |
| 920 | if (tailblk->addr+sz != htx->head_addr) |
| 921 | goto rsv_new_block; |
| 922 | room = (htx->end_addr - htx->head_addr); |
| 923 | } |
| 924 | BUG_ON((int32_t)room < 0); |
| 925 | if (room < len) |
| 926 | len = room; |
| 927 | |
| 928 | append_data: |
| 929 | htx_change_blk_value_len(htx, tailblk, sz+len); |
| 930 | |
| 931 | BUG_ON((int32_t)htx->tail_addr < 0); |
| 932 | BUG_ON((int32_t)htx->head_addr < 0); |
| 933 | BUG_ON(htx->end_addr > htx->tail_addr); |
| 934 | BUG_ON(htx->head_addr > htx->end_addr); |
| 935 | return (struct htx_ret){.ret = sz, .blk = tailblk}; |
| 936 | |
| 937 | rsv_new_block: |
Christopher Faulet | e071f0e | 2021-02-03 12:11:31 +0100 | [diff] [blame] | 938 | blk = htx_add_blk(htx, HTX_BLK_DATA, len); |
| 939 | if (!blk) |
| 940 | return (struct htx_ret){.ret = 0, .blk = NULL}; |
| 941 | blk->info += len; |
| 942 | return (struct htx_ret){.ret = 0, .blk = blk}; |
| 943 | } |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 944 | |
| 945 | /* Adds an HTX block of type DATA in <htx>. It first tries to append data if |
Willy Tarreau | 0a7ef02 | 2019-05-28 10:30:11 +0200 | [diff] [blame] | 946 | * possible. It returns the number of bytes consumed from <data>, which may be |
| 947 | * zero if nothing could be copied. |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 948 | */ |
Willy Tarreau | 0a7ef02 | 2019-05-28 10:30:11 +0200 | [diff] [blame] | 949 | size_t htx_add_data(struct htx *htx, const struct ist data) |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 950 | { |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 951 | struct htx_blk *blk, *tailblk; |
| 952 | void *ptr; |
| 953 | uint32_t sz, room; |
Willy Tarreau | 0350b90 | 2019-05-28 10:58:50 +0200 | [diff] [blame] | 954 | int32_t len = data.len; |
Willy Tarreau | 0a7ef02 | 2019-05-28 10:30:11 +0200 | [diff] [blame] | 955 | |
Willy Tarreau | 0350b90 | 2019-05-28 10:58:50 +0200 | [diff] [blame] | 956 | /* Not enough space to store data */ |
| 957 | if (len > htx_free_data_space(htx)) |
| 958 | len = htx_free_data_space(htx); |
| 959 | |
| 960 | if (!len) |
| 961 | return 0; |
| 962 | |
Christopher Faulet | 28e7ba8 | 2022-01-12 14:03:42 +0100 | [diff] [blame] | 963 | if (htx->head == -1) |
| 964 | goto add_new_block; |
| 965 | |
Willy Tarreau | 0350b90 | 2019-05-28 10:58:50 +0200 | [diff] [blame] | 966 | /* get the tail and head block */ |
| 967 | tailblk = htx_get_tail_blk(htx); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 968 | if (tailblk == NULL) |
Willy Tarreau | 0350b90 | 2019-05-28 10:58:50 +0200 | [diff] [blame] | 969 | goto add_new_block; |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 970 | sz = htx_get_blksz(tailblk); |
Willy Tarreau | 0350b90 | 2019-05-28 10:58:50 +0200 | [diff] [blame] | 971 | |
| 972 | /* Don't try to append data if the last inserted block is not of the |
| 973 | * same type */ |
| 974 | if (htx_get_blk_type(tailblk) != HTX_BLK_DATA) |
| 975 | goto add_new_block; |
| 976 | |
| 977 | /* |
| 978 | * Same type and enough space: append data |
| 979 | */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 980 | if (!htx->head_addr) { |
| 981 | if (tailblk->addr+sz != htx->tail_addr) |
Willy Tarreau | 0350b90 | 2019-05-28 10:58:50 +0200 | [diff] [blame] | 982 | goto add_new_block; |
Christopher Faulet | 2bf43f0 | 2019-06-12 11:28:11 +0200 | [diff] [blame] | 983 | room = (htx_pos_to_addr(htx, htx->tail) - htx->tail_addr); |
Willy Tarreau | 0350b90 | 2019-05-28 10:58:50 +0200 | [diff] [blame] | 984 | } |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 985 | else { |
| 986 | if (tailblk->addr+sz != htx->head_addr) |
| 987 | goto add_new_block; |
| 988 | room = (htx->end_addr - htx->head_addr); |
| 989 | } |
| 990 | BUG_ON((int32_t)room < 0); |
Willy Tarreau | 0350b90 | 2019-05-28 10:58:50 +0200 | [diff] [blame] | 991 | if (room < len) |
| 992 | len = room; |
| 993 | |
| 994 | append_data: |
Willy Tarreau | 0350b90 | 2019-05-28 10:58:50 +0200 | [diff] [blame] | 995 | /* Append data and update the block itself */ |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 996 | ptr = htx_get_blk_ptr(htx, tailblk); |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 997 | htx_memcpy(ptr + sz, data.ptr, len); |
Christopher Faulet | 3e2638e | 2019-06-18 09:49:16 +0200 | [diff] [blame] | 998 | htx_change_blk_value_len(htx, tailblk, sz+len); |
Christopher Faulet | d7884d3 | 2019-06-11 10:40:43 +0200 | [diff] [blame] | 999 | |
| 1000 | BUG_ON((int32_t)htx->tail_addr < 0); |
| 1001 | BUG_ON((int32_t)htx->head_addr < 0); |
| 1002 | BUG_ON(htx->end_addr > htx->tail_addr); |
| 1003 | BUG_ON(htx->head_addr > htx->end_addr); |
Willy Tarreau | 0350b90 | 2019-05-28 10:58:50 +0200 | [diff] [blame] | 1004 | return len; |
| 1005 | |
| 1006 | add_new_block: |
Willy Tarreau | 0350b90 | 2019-05-28 10:58:50 +0200 | [diff] [blame] | 1007 | blk = htx_add_blk(htx, HTX_BLK_DATA, len); |
| 1008 | if (!blk) |
Willy Tarreau | 0a7ef02 | 2019-05-28 10:30:11 +0200 | [diff] [blame] | 1009 | return 0; |
Willy Tarreau | 0350b90 | 2019-05-28 10:58:50 +0200 | [diff] [blame] | 1010 | |
| 1011 | blk->info += len; |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 1012 | htx_memcpy(htx_get_blk_ptr(htx, blk), data.ptr, len); |
Willy Tarreau | 0350b90 | 2019-05-28 10:58:50 +0200 | [diff] [blame] | 1013 | return len; |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 1014 | } |
| 1015 | |
Christopher Faulet | 86bc8df | 2019-06-11 10:38:38 +0200 | [diff] [blame] | 1016 | |
| 1017 | /* Adds an HTX block of type DATA in <htx> just after all other DATA |
| 1018 | * blocks. Because it relies on htx_add_data_atonce(), It may be happened to a |
| 1019 | * DATA block if possible. But, if the function succeeds, it will be the last |
| 1020 | * DATA block in all cases. If an error occurred, NULL is returned. Otherwise, |
| 1021 | * on success, the updated block (or the new one) is returned. |
| 1022 | */ |
| 1023 | struct htx_blk *htx_add_last_data(struct htx *htx, struct ist data) |
Christopher Faulet | 24ed835 | 2018-11-22 11:20:43 +0100 | [diff] [blame] | 1024 | { |
Christopher Faulet | 86bc8df | 2019-06-11 10:38:38 +0200 | [diff] [blame] | 1025 | struct htx_blk *blk, *pblk; |
Christopher Faulet | 24ed835 | 2018-11-22 11:20:43 +0100 | [diff] [blame] | 1026 | |
Christopher Faulet | 86bc8df | 2019-06-11 10:38:38 +0200 | [diff] [blame] | 1027 | blk = htx_add_data_atonce(htx, data); |
Christopher Faulet | aa75b3d | 2018-12-05 16:20:40 +0100 | [diff] [blame] | 1028 | if (!blk) |
| 1029 | return NULL; |
Christopher Faulet | 24ed835 | 2018-11-22 11:20:43 +0100 | [diff] [blame] | 1030 | |
Christopher Faulet | 86bc8df | 2019-06-11 10:38:38 +0200 | [diff] [blame] | 1031 | for (pblk = htx_get_prev_blk(htx, blk); pblk; pblk = htx_get_prev_blk(htx, pblk)) { |
Christopher Faulet | 86bc8df | 2019-06-11 10:38:38 +0200 | [diff] [blame] | 1032 | if (htx_get_blk_type(pblk) <= HTX_BLK_DATA) |
| 1033 | break; |
Christopher Faulet | 24ed835 | 2018-11-22 11:20:43 +0100 | [diff] [blame] | 1034 | |
Christopher Faulet | 24ed835 | 2018-11-22 11:20:43 +0100 | [diff] [blame] | 1035 | /* Swap .addr and .info fields */ |
| 1036 | blk->addr ^= pblk->addr; pblk->addr ^= blk->addr; blk->addr ^= pblk->addr; |
| 1037 | blk->info ^= pblk->info; pblk->info ^= blk->info; blk->info ^= pblk->info; |
| 1038 | |
| 1039 | if (blk->addr == pblk->addr) |
| 1040 | blk->addr += htx_get_blksz(pblk); |
Christopher Faulet | 24ed835 | 2018-11-22 11:20:43 +0100 | [diff] [blame] | 1041 | blk = pblk; |
| 1042 | } |
Christopher Faulet | 05aab64 | 2019-04-11 13:43:57 +0200 | [diff] [blame] | 1043 | |
Christopher Faulet | 24ed835 | 2018-11-22 11:20:43 +0100 | [diff] [blame] | 1044 | return blk; |
| 1045 | } |
Christopher Faulet | a3d2a16 | 2018-10-22 08:59:39 +0200 | [diff] [blame] | 1046 | |
Christopher Faulet | 86fcf6d | 2019-06-11 10:41:19 +0200 | [diff] [blame] | 1047 | /* Moves the block <blk> just before the block <ref>. Both blocks must be in the |
| 1048 | * HTX message <htx> and <blk> must be placed after <ref>. pointer to these |
| 1049 | * blocks are updated to remain valid after the move. */ |
| 1050 | void htx_move_blk_before(struct htx *htx, struct htx_blk **blk, struct htx_blk **ref) |
| 1051 | { |
| 1052 | struct htx_blk *cblk, *pblk; |
| 1053 | |
| 1054 | cblk = *blk; |
| 1055 | for (pblk = htx_get_prev_blk(htx, cblk); pblk; pblk = htx_get_prev_blk(htx, pblk)) { |
| 1056 | /* Swap .addr and .info fields */ |
| 1057 | cblk->addr ^= pblk->addr; pblk->addr ^= cblk->addr; cblk->addr ^= pblk->addr; |
| 1058 | cblk->info ^= pblk->info; pblk->info ^= cblk->info; cblk->info ^= pblk->info; |
| 1059 | |
| 1060 | if (cblk->addr == pblk->addr) |
| 1061 | cblk->addr += htx_get_blksz(pblk); |
| 1062 | if (pblk == *ref) |
| 1063 | break; |
| 1064 | cblk = pblk; |
| 1065 | } |
| 1066 | *blk = cblk; |
| 1067 | *ref = pblk; |
| 1068 | } |
Christopher Faulet | 0ea0c86 | 2020-01-23 11:47:53 +0100 | [diff] [blame] | 1069 | |
| 1070 | /* Append the HTX message <src> to the HTX message <dst>. It returns 1 on |
| 1071 | * success and 0 on error. All the message or nothing is copied. If an error |
| 1072 | * occurred, all blocks from <src> already appended to <dst> are truncated. |
| 1073 | */ |
| 1074 | int htx_append_msg(struct htx *dst, const struct htx *src) |
| 1075 | { |
| 1076 | struct htx_blk *blk, *newblk; |
| 1077 | enum htx_blk_type type; |
| 1078 | uint32_t blksz, offset = dst->data; |
| 1079 | |
| 1080 | for (blk = htx_get_head_blk(src); blk; blk = htx_get_next_blk(src, blk)) { |
| 1081 | type = htx_get_blk_type(blk); |
| 1082 | |
| 1083 | if (type == HTX_BLK_UNUSED) |
| 1084 | continue; |
| 1085 | |
| 1086 | blksz = htx_get_blksz(blk); |
| 1087 | newblk = htx_add_blk(dst, type, blksz); |
| 1088 | if (!newblk) |
| 1089 | goto error; |
| 1090 | newblk->info = blk->info; |
Willy Tarreau | 23aa79d | 2023-02-02 15:32:20 +0100 | [diff] [blame] | 1091 | htx_memcpy(htx_get_blk_ptr(dst, newblk), htx_get_blk_ptr(src, blk), blksz); |
Christopher Faulet | 0ea0c86 | 2020-01-23 11:47:53 +0100 | [diff] [blame] | 1092 | } |
| 1093 | |
| 1094 | return 1; |
| 1095 | |
| 1096 | error: |
| 1097 | htx_truncate(dst, offset); |
| 1098 | return 0; |
| 1099 | } |