blob: 8fdf12088f51dc9c236c4802d3f29ea33879297e [file] [log] [blame]
Olivier Houcharde962fd82017-08-07 19:20:04 +02001/*
2 * include/common/net_helper.h
3 * This file contains miscellaneous network helper functions.
4 *
5 * Copyright (C) 2017 Olivier Houchard
Willy Tarreaud5370e12017-09-19 14:59:52 +02006 * Copyright (C) 2017 Willy Tarreau
Olivier Houcharde962fd82017-08-07 19:20:04 +02007 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27#ifndef _COMMON_NET_HELPER_H
28#define _COMMON_NET_HELPER_H
29
Willy Tarreau4c7e4b72020-05-27 12:58:42 +020030#include <haproxy/api.h>
Willy Tarreau5531d572017-09-20 08:14:52 +020031#include <common/standard.h>
Olivier Houcharde962fd82017-08-07 19:20:04 +020032#include <arpa/inet.h>
33
Willy Tarreau2888c082017-09-19 17:27:05 +020034/* Functions to read/write various integers that may be unaligned */
Olivier Houcharde962fd82017-08-07 19:20:04 +020035
Willy Tarreaud5370e12017-09-19 14:59:52 +020036/* Read a uint16_t in native host order */
37static inline uint16_t read_u16(const void *p)
Olivier Houcharde962fd82017-08-07 19:20:04 +020038{
39 const union { uint16_t u16; } __attribute__((packed))*u = p;
40 return u->u16;
41}
42
Willy Tarreau2888c082017-09-19 17:27:05 +020043/* Write a uint16_t in native host order */
44static inline void write_u16(void *p, const uint16_t u16)
45{
46 union { uint16_t u16; } __attribute__((packed))*u = p;
47 u->u16 = u16;
48}
49
Willy Tarreaud5370e12017-09-19 14:59:52 +020050/* Read a uint32_t in native host order */
51static inline uint32_t read_u32(const void *p)
Olivier Houcharde962fd82017-08-07 19:20:04 +020052{
Willy Tarreaud5370e12017-09-19 14:59:52 +020053 const union { uint32_t u32; } __attribute__((packed))*u = p;
54 return u->u32;
Olivier Houcharde962fd82017-08-07 19:20:04 +020055}
56
Willy Tarreau2888c082017-09-19 17:27:05 +020057/* Write a uint32_t in native host order */
58static inline void write_u32(void *p, const uint32_t u32)
59{
60 union { uint32_t u32; } __attribute__((packed))*u = p;
61 u->u32 = u32;
62}
63
Willy Tarreau5531d572017-09-20 08:14:52 +020064/* Read a uint64_t in native host order */
65static inline uint64_t read_u64(const void *p)
66{
67 const union { uint64_t u64; } __attribute__((packed))*u = p;
68 return u->u64;
69}
70
71/* Write a uint64_t in native host order */
72static inline void write_u64(void *p, const uint64_t u64)
73{
74 union { uint64_t u64; } __attribute__((packed))*u = p;
75 u->u64 = u64;
76}
77
Willy Tarreaud5370e12017-09-19 14:59:52 +020078/* Read a possibly wrapping number of bytes <bytes> into destination <dst>. The
79 * first segment is composed of <s1> bytes at p1. The remaining byte(s), if any,
80 * are read from <p2>. <s1> may be zero and may also be larger than <bytes>. The
81 * caller is always responsible for providing enough bytes. Note: the function
82 * is purposely *not* marked inline to let the compiler decide what to do with
83 * it, because it's around 34 bytes long, placed on critical path but rarely
84 * called, and uses uses a lot of arguments if not inlined. The compiler will
85 * thus decide what's best to do with it depending on the context.
86 */
87static void readv_bytes(void *dst, const size_t bytes, const void *p1, size_t s1, const void *p2)
Olivier Houcharde962fd82017-08-07 19:20:04 +020088{
Willy Tarreaud5370e12017-09-19 14:59:52 +020089 size_t idx;
90
91 p2 -= s1;
92 for (idx = 0; idx < bytes; idx++) {
93 if (idx == s1)
94 p1 = p2;
95 ((uint8_t *)dst)[idx] = ((const uint8_t *)p1)[idx];
96 }
97 /* this memory barrier is critical otherwise gcc may over-optimize this
98 * code, completely removing it as well as any surrounding boundary
99 * check (4.7.1..6.4.0)!
100 */
101 __asm__ volatile("" ::: "memory");
Olivier Houcharde962fd82017-08-07 19:20:04 +0200102}
103
Willy Tarreau2888c082017-09-19 17:27:05 +0200104/* Write a possibly wrapping number of bytes <bytes> from location <src>. The
105 * first segment is composed of <s1> bytes at p1. The remaining byte(s), if any,
106 * are written to <p2>. <s1> may be zero and may also be larger than <bytes>.
107 * The caller is always responsible for providing enough room. Note: the
108 * function is purposely *not* marked inline to let the compiler decide what to
109 * do with it, because it's around 34 bytes long, placed on critical path but
110 * rarely called, and uses uses a lot of arguments if not inlined. The compiler
111 * will thus decide what's best to do with it depending on the context.
112 */
113static void writev_bytes(const void *src, const size_t bytes, void *p1, size_t s1, void *p2)
114{
115 size_t idx;
116
117 p2 -= s1;
118 for (idx = 0; idx < bytes; idx++) {
119 if (idx == s1)
120 p1 = p2;
121 ((uint8_t *)p1)[idx] = ((const uint8_t *)src)[idx];
122 }
123}
124
Willy Tarreaud5370e12017-09-19 14:59:52 +0200125/* Read a possibly wrapping uint16_t in native host order. The first segment is
126 * composed of <s1> bytes at p1. The remaining byte(s), if any, are read from
127 * <p2>. <s1> may be zero and may be larger than the type. The caller is always
128 * responsible for providing enough bytes.
129 */
130static inline uint16_t readv_u16(const void *p1, size_t s1, const void *p2)
Olivier Houcharde962fd82017-08-07 19:20:04 +0200131{
Willy Tarreaud5370e12017-09-19 14:59:52 +0200132 if (unlikely(s1 == 1)) {
133 volatile uint16_t u16;
134
135 ((uint8_t *)&u16)[0] = *(uint8_t *)p1;
136 ((uint8_t *)&u16)[1] = *(uint8_t *)p2;
137 return u16;
138 }
139 else {
140 const union { uint16_t u16; } __attribute__((packed)) *u;
141
142 u = (s1 == 0) ? p2 : p1;
143 return u->u16;
144 }
145}
146
Willy Tarreau2888c082017-09-19 17:27:05 +0200147/* Write a possibly wrapping uint16_t in native host order. The first segment is
148 * composed of <s1> bytes at p1. The remaining byte(s), if any, are written to
149 * <p2>. <s1> may be zero and may be larger than the type. The caller is always
150 * responsible for providing enough room.
151 */
152static inline void writev_u16(void *p1, size_t s1, void *p2, const uint16_t u16)
153{
154 union { uint16_t u16; } __attribute__((packed)) *u;
155
156 if (unlikely(s1 == 1)) {
157 *(uint8_t *)p1 = ((const uint8_t *)&u16)[0];
158 *(uint8_t *)p2 = ((const uint8_t *)&u16)[1];
159 }
160 else {
161 u = (s1 == 0) ? p2 : p1;
162 u->u16 = u16;
163 }
164}
165
Willy Tarreaud5370e12017-09-19 14:59:52 +0200166/* Read a possibly wrapping uint32_t in native host order. The first segment is
167 * composed of <s1> bytes at p1. The remaining byte(s), if any, are read from
168 * <p2>. <s1> may be zero and may be larger than the type. The caller is always
169 * responsible for providing enough bytes.
170 */
171static inline uint32_t readv_u32(const void *p1, size_t s1, const void *p2)
172{
173 uint32_t u32;
174
Tim Duesterhus1d48ba92020-02-21 13:02:04 +0100175 if (likely(s1 >= sizeof(u32)))
Willy Tarreaud5370e12017-09-19 14:59:52 +0200176 u32 = read_u32(p1);
177 else
178 readv_bytes(&u32, sizeof(u32), p1, s1, p2);
179 return u32;
180}
181
Willy Tarreau2888c082017-09-19 17:27:05 +0200182/* Write a possibly wrapping uint32_t in native host order. The first segment is
183 * composed of <s1> bytes at p1. The remaining byte(s), if any, are written to
184 * <p2>. <s1> may be zero and may be larger than the type. The caller is always
185 * responsible for providing enough room.
186 */
187static inline void writev_u32(void *p1, size_t s1, void *p2, const uint32_t u32)
188{
Tim Duesterhus1d48ba92020-02-21 13:02:04 +0100189 if (likely(s1 >= sizeof(u32)))
Willy Tarreau2888c082017-09-19 17:27:05 +0200190 write_u32(p1, u32);
191 else
192 writev_bytes(&u32, sizeof(u32), p1, s1, p2);
193}
194
Willy Tarreau5531d572017-09-20 08:14:52 +0200195/* Read a possibly wrapping uint64_t in native host order. The first segment is
196 * composed of <s1> bytes at p1. The remaining byte(s), if any, are read from
197 * <p2>. <s1> may be zero and may be larger than the type. The caller is always
198 * responsible for providing enough bytes.
199 */
200static inline uint64_t readv_u64(const void *p1, size_t s1, const void *p2)
201{
202 uint64_t u64;
203
Tim Duesterhus1d48ba92020-02-21 13:02:04 +0100204 if (likely(s1 >= sizeof(u64)))
Willy Tarreau5531d572017-09-20 08:14:52 +0200205 u64 = read_u64(p1);
206 else
207 readv_bytes(&u64, sizeof(u64), p1, s1, p2);
208 return u64;
209}
210
211/* Write a possibly wrapping uint64_t in native host order. The first segment is
212 * composed of <s1> bytes at p1. The remaining byte(s), if any, are written to
213 * <p2>. <s1> may be zero and may be larger than the type. The caller is always
214 * responsible for providing enough room.
215 */
216static inline void writev_u64(void *p1, size_t s1, void *p2, const uint64_t u64)
217{
Tim Duesterhus1d48ba92020-02-21 13:02:04 +0100218 if (likely(s1 >= sizeof(u64)))
Willy Tarreau5531d572017-09-20 08:14:52 +0200219 write_u64(p1, u64);
220 else
221 writev_bytes(&u64, sizeof(u64), p1, s1, p2);
222}
223
Willy Tarreaud5370e12017-09-19 14:59:52 +0200224/* Signed integer versions : return the same data but signed */
225
226/* Read an int16_t in native host order */
227static inline int16_t read_i16(const void *p)
228{
229 return read_u16(p);
Olivier Houcharde962fd82017-08-07 19:20:04 +0200230}
231
Willy Tarreaud5370e12017-09-19 14:59:52 +0200232/* Read an int32_t in native host order */
233static inline int32_t read_i32(const void *p)
Olivier Houcharde962fd82017-08-07 19:20:04 +0200234{
Willy Tarreaud5370e12017-09-19 14:59:52 +0200235 return read_u32(p);
Olivier Houcharde962fd82017-08-07 19:20:04 +0200236}
237
Willy Tarreau5531d572017-09-20 08:14:52 +0200238/* Read an int64_t in native host order */
239static inline int64_t read_i64(const void *p)
240{
241 return read_u64(p);
242}
243
Willy Tarreaud5370e12017-09-19 14:59:52 +0200244/* Read a possibly wrapping int16_t in native host order */
245static inline int16_t readv_i16(const void *p1, size_t s1, const void *p2)
246{
247 return readv_u16(p1, s1, p2);
248}
249
250/* Read a possibly wrapping int32_t in native host order */
251static inline int32_t readv_i32(const void *p1, size_t s1, const void *p2)
252{
253 return readv_u32(p1, s1, p2);
254}
255
Willy Tarreau5531d572017-09-20 08:14:52 +0200256/* Read a possibly wrapping int64_t in native host order */
257static inline int64_t readv_i64(const void *p1, size_t s1, const void *p2)
258{
259 return readv_u64(p1, s1, p2);
260}
261
Willy Tarreaud5370e12017-09-19 14:59:52 +0200262/* Read a uint16_t, and convert from network order to host order */
263static inline uint16_t read_n16(const void *p)
264{
265 return ntohs(read_u16(p));
266}
267
Willy Tarreau2888c082017-09-19 17:27:05 +0200268/* Write a uint16_t after converting it from host order to network order */
269static inline void write_n16(void *p, const uint16_t u16)
270{
271 write_u16(p, htons(u16));
272}
273
Olivier Houcharde962fd82017-08-07 19:20:04 +0200274/* Read a uint32_t, and convert from network order to host order */
Willy Tarreaud5370e12017-09-19 14:59:52 +0200275static inline uint32_t read_n32(const void *p)
Olivier Houcharde962fd82017-08-07 19:20:04 +0200276{
Willy Tarreaud5370e12017-09-19 14:59:52 +0200277 return ntohl(read_u32(p));
278}
279
Willy Tarreau2888c082017-09-19 17:27:05 +0200280/* Write a uint32_t after converting it from host order to network order */
281static inline void write_n32(void *p, const uint32_t u32)
282{
283 write_u32(p, htonl(u32));
284}
285
Willy Tarreau5531d572017-09-20 08:14:52 +0200286/* Read a uint64_t, and convert from network order to host order */
287static inline uint64_t read_n64(const void *p)
288{
289 return my_ntohll(read_u64(p));
290}
291
292/* Write a uint64_t after converting it from host order to network order */
293static inline void write_n64(void *p, const uint64_t u64)
294{
295 write_u64(p, my_htonll(u64));
296}
297
Willy Tarreaud5370e12017-09-19 14:59:52 +0200298/* Read a possibly wrapping uint16_t in network order. The first segment is
299 * composed of <s1> bytes at p1. The remaining byte(s), if any, are read from
300 * <p2>. <s1> may be zero and may be larger than the type. The caller is always
301 * responsible for providing enough bytes.
302 */
303static inline uint16_t readv_n16(const void *p1, size_t s1, const void *p2)
304{
305 if (unlikely(s1 < 2)) {
306 if (s1 == 0)
307 p1 = p2++;
308 }
309 else
310 p2 = p1 + 1;
311 return (*(uint8_t *)p1 << 8) + *(uint8_t *)p2;
312}
313
Willy Tarreau2888c082017-09-19 17:27:05 +0200314/* Write a possibly wrapping uint16_t in network order. The first segment is
315 * composed of <s1> bytes at p1. The remaining byte(s), if any, are written to
316 * <p2>. <s1> may be zero and may be larger than the type. The caller is always
317 * responsible for providing enough room.
318 */
319static inline void writev_n16(const void *p1, size_t s1, const void *p2, const uint16_t u16)
320{
321 if (unlikely(s1 < 2)) {
322 if (s1 == 0)
323 p1 = p2++;
324 }
325 else
326 p2 = p1 + 1;
327 *(uint8_t *)p1 = u16 >> 8;
328 *(uint8_t *)p2 = u16;
329}
330
Willy Tarreaud5370e12017-09-19 14:59:52 +0200331/* Read a possibly wrapping uint32_t in network order. The first segment is
332 * composed of <s1> bytes at p1. The remaining byte(s), if any, are read from
333 * <p2>. <s1> may be zero and may be larger than the type. The caller is always
334 * responsible for providing enough bytes.
335 */
336static inline uint32_t readv_n32(const void *p1, size_t s1, const void *p2)
337{
338 return ntohl(readv_u32(p1, s1, p2));
Olivier Houcharde962fd82017-08-07 19:20:04 +0200339}
340
Willy Tarreau2888c082017-09-19 17:27:05 +0200341/* Write a possibly wrapping uint32_t in network order. The first segment is
342 * composed of <s1> bytes at p1. The remaining byte(s), if any, are written to
343 * <p2>. <s1> may be zero and may be larger than the type. The caller is always
344 * responsible for providing enough room.
345 */
346static inline void writev_n32(void *p1, size_t s1, void *p2, const uint32_t u32)
347{
348 writev_u32(p1, s1, p2, htonl(u32));
349}
350
Willy Tarreau5531d572017-09-20 08:14:52 +0200351/* Read a possibly wrapping uint64_t in network order. The first segment is
352 * composed of <s1> bytes at p1. The remaining byte(s), if any, are read from
353 * <p2>. <s1> may be zero and may be larger than the type. The caller is always
354 * responsible for providing enough bytes.
355 */
356static inline uint64_t readv_n64(const void *p1, size_t s1, const void *p2)
357{
358 return my_ntohll(readv_u64(p1, s1, p2));
359}
360
361/* Write a possibly wrapping uint64_t in network order. The first segment is
362 * composed of <s1> bytes at p1. The remaining byte(s), if any, are written to
363 * <p2>. <s1> may be zero and may be larger than the type. The caller is always
364 * responsible for providing enough room.
365 */
366static inline void writev_n64(void *p1, size_t s1, void *p2, const uint64_t u64)
367{
368 writev_u64(p1, s1, p2, my_htonll(u64));
369}
370
Olivier Houcharde962fd82017-08-07 19:20:04 +0200371#endif /* COMMON_NET_HELPER_H */