Wireshark 4.5.0
The Wireshark network protocol analyzer
Loading...
Searching...
No Matches
pint.h
Go to the documentation of this file.
1
13#ifndef __PINT_H__
14#define __PINT_H__
15
16#include <inttypes.h>
17
18#include <glib.h>
19
20/* Routines that take a possibly-unaligned pointer to a 16-bit, 24-bit,
21 * 32-bit, 40-bit, ... 64-bit integral quantity, in a particular byte
22 * order, and fetch the value and return it in host byte order.
23 *
24 * The pntohN() routines fetch big-endian values; the pletohN() routines
25 * fetch little-endian values.
26 */
27
28/* On most architectures, accesses of 16, 32, and 64 bit quantities can be
29 * heavily optimized. gcc and clang recognize portable versions below and,
30 * at -Os and higher, optimize them appropriately (for gcc, that includes
31 * for z/Architecture, PPC64, MIPS, etc.). Older versions don't do as good
32 * of a job with 16 bit accesses, though.
33 *
34 * Unfortunately, MSVC and icc (both the "classic" version and the new
35 * LLVM-based Intel C Compiler) do not, according to Matt Godbolt's Compiler
36 * Explorer (https://godbolt.org) as of the end of 2022. They *do* recognize
37 * and optimize a memcpy based approach (which avoids unaligned accesses on,
38 * say, ARM32), though that requires byteswapping appropriately.
39 */
40
41#if (defined(_MSC_VER) && !defined(__clang__)) || defined(__INTEL_COMPILER) || defined(__INTEL_LLVM_COMPILER)
42/* MSVC or Intel C Compiler (Classic or new LLVM version), but not
43 * clang-cl on Windows.
44 */
45/* Unfortunately, C23 did not fully accept the N3022 Modern Bit Utilities
46 * proposal, so a standard bytereverse function has been deferred for some
47 * future version:
48 * https://www.open-std.org/jtc1/sc22/wg14/www/docs/n3048.htm
49 * https://www.open-std.org/jtc1/sc22/wg14/www/docs/n3022.htm
50 *
51 * So choose byteswap intrinsics we know we have.
52 */
53#if defined(_MSC_VER) && !defined(__INTEL_COMPILER) && !defined(__INTEL_LLVM_COMPILER) && !defined(__clang__)
54/* Intel and clang-cl both define _MSC_VER when compiling on Windows for
55 * greater compatibility (just as they define __GNUC__ on other platforms).
56 * However, at least on some versions, while including the MSVC <stdlib.h>
57 * provides access to the _byteswap_ intrinsics, they are not actually
58 * optimized into a single x86 BSWAP function, unlike the gcc-style intrinsics
59 * (which both support.) See: https://stackoverflow.com/q/72327906
60 */
61#include <stdlib.h> // For MSVC _byteswap intrinsics
62#define pint_bswap16(x) _byteswap_ushort(x)
63#define pint_bswap32(x) _byteswap_ulong(x)
64/* Hopefully MSVC never decides that a long is 64 bit. */
65#define pint_bswap64(x) _byteswap_uint64(x)
66#elif defined(__INTEL_COMPILER)
67/* The (deprecated) Intel C++ Compiler Classic has these byteswap intrinsics.
68 * It also has the GCC-style intrinsics, though __builtin_bswap16 wasn't
69 * added until some point after icc 13.0 but at least by 16.0, reflecting
70 * that it wasn't added to gcc until 4.8.
71 */
72#define pint_bswap16(x) _bswap16(x)
73#define pint_bswap32(x) _bswap32(x)
74#define pint_bswap64(x) _bswap64(x)
75#else
76/* GCC-style _bswap intrinsics */
77/* The new LLVM-based Intel C++ Compiler doesn't have the above intrinsics,
78 * but it always has all the GCC intrinsics.
79 */
80/* __builtin_bswap32 and __builtin_bswap64 intrinsics have been supported
81 * for a long time on gcc (4.1), and clang (pre 3.0), versions that predate
82 * C11 and C+11 support, which we require, so we could assume we have them.
83 *
84 * __builtin_bswap16 was added a bit later, gcc 4.8, and clang 3.2. While
85 * those versions or later are required for full C11 and C++11 support,
86 * some earlier versions claim to support C11 and C++11 in ways that might
87 * allow them to get past CMake. We don't use this codepath for those
88 * compilers because they heavily optimize the portable versions, though.
89 */
90#define pint_bswap16(x) __builtin_bswap16(x)
91#define pint_bswap32(x) __builtin_bswap32(x)
92#define pint_bswap64(x) __builtin_bswap64(x)
93#endif
94
95static inline uint16_t pntoh16(const void *p)
96{
97 uint16_t ret;
98 memcpy(&ret, p, sizeof(ret));
99#if G_BYTE_ORDER == G_LITTLE_ENDIAN
100 ret = pint_bswap16(ret);
101#endif
102 return ret;
103}
104
105static inline uint32_t pntoh32(const void *p)
106{
107 uint32_t ret;
108 memcpy(&ret, p, sizeof(ret));
109#if G_BYTE_ORDER == G_LITTLE_ENDIAN
110 ret = pint_bswap32(ret);
111#endif
112 return ret;
113}
114
115static inline uint64_t pntoh64(const void *p)
116{
117 uint64_t ret;
118 memcpy(&ret, p, sizeof(ret));
119#if G_BYTE_ORDER == G_LITTLE_ENDIAN
120 ret = pint_bswap64(ret);
121#endif
122 return ret;
123}
124
125static inline uint16_t pletoh16(const void *p)
126{
127 uint16_t ret;
128 memcpy(&ret, p, sizeof(ret));
129#if G_BYTE_ORDER == G_BIG_ENDIAN
130 ret = pint_bswap16(ret);
131#endif
132 return ret;
133}
134
135static inline uint32_t pletoh32(const void *p)
136{
137 uint32_t ret;
138 memcpy(&ret, p, sizeof(ret));
139#if G_BYTE_ORDER == G_BIG_ENDIAN
140 ret = pint_bswap32(ret);
141#endif
142 return ret;
143}
144
145static inline uint64_t pletoh64(const void *p)
146{
147 uint64_t ret;
148 memcpy(&ret, p, sizeof(ret));
149#if G_BYTE_ORDER == G_BIG_ENDIAN
150 ret = pint_bswap64(ret);
151#endif
152 return ret;
153}
154
155static inline void phton16(uint8_t *p, uint16_t v)
156{
157#if G_BYTE_ORDER == G_LITTLE_ENDIAN
158 v = pint_bswap16(v);
159#endif
160 memcpy(p, &v, sizeof(v));
161}
162
163static inline void phton32(uint8_t *p, uint32_t v)
164{
165#if G_BYTE_ORDER == G_LITTLE_ENDIAN
166 v = pint_bswap32(v);
167#endif
168 memcpy(p, &v, sizeof(v));
169}
170
171static inline void phton64(uint8_t *p, uint64_t v) {
172#if G_BYTE_ORDER == G_LITTLE_ENDIAN
173 v = pint_bswap64(v);
174#endif
175 memcpy(p, &v, sizeof(v));
176}
177
178static inline void phtole32(uint8_t *p, uint32_t v)
179{
180#if G_BYTE_ORDER == G_BIG_ENDIAN
181 v = pint_bswap32(v);
182#endif
183 memcpy(p, &v, sizeof(v));
184}
185
186static inline void phtole64(uint8_t *p, uint64_t v) {
187#if G_BYTE_ORDER == G_BIG_ENDIAN
188 v = pint_bswap64(v);
189#endif
190 memcpy(p, &v, sizeof(v));
191}
192
193#else
194/* Portable functions */
195static inline uint16_t pntoh16(const void *p)
196{
197 return (uint16_t)*((const uint8_t *)(p)+0)<<8|
198 (uint16_t)*((const uint8_t *)(p)+1)<<0;
199}
200
201static inline uint32_t pntoh32(const void *p)
202{
203 return (uint32_t)*((const uint8_t *)(p)+0)<<24|
204 (uint32_t)*((const uint8_t *)(p)+1)<<16|
205 (uint32_t)*((const uint8_t *)(p)+2)<<8|
206 (uint32_t)*((const uint8_t *)(p)+3)<<0;
207}
208
209static inline uint64_t pntoh64(const void *p)
210{
211 return (uint64_t)*((const uint8_t *)(p)+0)<<56|
212 (uint64_t)*((const uint8_t *)(p)+1)<<48|
213 (uint64_t)*((const uint8_t *)(p)+2)<<40|
214 (uint64_t)*((const uint8_t *)(p)+3)<<32|
215 (uint64_t)*((const uint8_t *)(p)+4)<<24|
216 (uint64_t)*((const uint8_t *)(p)+5)<<16|
217 (uint64_t)*((const uint8_t *)(p)+6)<<8|
218 (uint64_t)*((const uint8_t *)(p)+7)<<0;
219}
220
221static inline uint16_t pletoh16(const void *p)
222{
223 return (uint16_t)*((const uint8_t *)(p)+1)<<8|
224 (uint16_t)*((const uint8_t *)(p)+0)<<0;
225}
226
227static inline uint32_t pletoh32(const void *p)
228{
229 return (uint32_t)*((const uint8_t *)(p)+3)<<24|
230 (uint32_t)*((const uint8_t *)(p)+2)<<16|
231 (uint32_t)*((const uint8_t *)(p)+1)<<8|
232 (uint32_t)*((const uint8_t *)(p)+0)<<0;
233}
234
235static inline uint64_t pletoh64(const void *p)
236{
237 return (uint64_t)*((const uint8_t *)(p)+7)<<56|
238 (uint64_t)*((const uint8_t *)(p)+6)<<48|
239 (uint64_t)*((const uint8_t *)(p)+5)<<40|
240 (uint64_t)*((const uint8_t *)(p)+4)<<32|
241 (uint64_t)*((const uint8_t *)(p)+3)<<24|
242 (uint64_t)*((const uint8_t *)(p)+2)<<16|
243 (uint64_t)*((const uint8_t *)(p)+1)<<8|
244 (uint64_t)*((const uint8_t *)(p)+0)<<0;
245}
246
247/* Pointer routines to put items out in a particular byte order.
248 * These will work regardless of the byte alignment of the pointer.
249 */
250
251static inline void phton16(uint8_t *p, uint16_t v)
252{
253 p[0] = (uint8_t)(v >> 8);
254 p[1] = (uint8_t)(v >> 0);
255}
256
257static inline void phton32(uint8_t *p, uint32_t v)
258{
259 p[0] = (uint8_t)(v >> 24);
260 p[1] = (uint8_t)(v >> 16);
261 p[2] = (uint8_t)(v >> 8);
262 p[3] = (uint8_t)(v >> 0);
263}
264
265static inline void phton64(uint8_t *p, uint64_t v) {
266 p[0] = (uint8_t)(v >> 56);
267 p[1] = (uint8_t)(v >> 48);
268 p[2] = (uint8_t)(v >> 40);
269 p[3] = (uint8_t)(v >> 32);
270 p[4] = (uint8_t)(v >> 24);
271 p[5] = (uint8_t)(v >> 16);
272 p[6] = (uint8_t)(v >> 8);
273 p[7] = (uint8_t)(v >> 0);
274}
275
276static inline void phtole32(uint8_t *p, uint32_t v) {
277 p[0] = (uint8_t)(v >> 0);
278 p[1] = (uint8_t)(v >> 8);
279 p[2] = (uint8_t)(v >> 16);
280 p[3] = (uint8_t)(v >> 24);
281}
282
283static inline void phtole64(uint8_t *p, uint64_t v) {
284 p[0] = (uint8_t)(v >> 0);
285 p[1] = (uint8_t)(v >> 8);
286 p[2] = (uint8_t)(v >> 16);
287 p[3] = (uint8_t)(v >> 24);
288 p[4] = (uint8_t)(v >> 32);
289 p[5] = (uint8_t)(v >> 40);
290 p[6] = (uint8_t)(v >> 48);
291 p[7] = (uint8_t)(v >> 56);
292}
293#endif
294
295static inline uint32_t pntoh24(const void *p)
296{
297 return (uint32_t)*((const uint8_t *)(p)+0)<<16|
298 (uint32_t)*((const uint8_t *)(p)+1)<<8|
299 (uint32_t)*((const uint8_t *)(p)+2)<<0;
300}
301
302static inline uint64_t pntoh40(const void *p)
303{
304 return (uint64_t)*((const uint8_t *)(p)+0)<<32|
305 (uint64_t)*((const uint8_t *)(p)+1)<<24|
306 (uint64_t)*((const uint8_t *)(p)+2)<<16|
307 (uint64_t)*((const uint8_t *)(p)+3)<<8|
308 (uint64_t)*((const uint8_t *)(p)+4)<<0;
309}
310
311static inline uint64_t pntoh48(const void *p)
312{
313 return (uint64_t)*((const uint8_t *)(p)+0)<<40|
314 (uint64_t)*((const uint8_t *)(p)+1)<<32|
315 (uint64_t)*((const uint8_t *)(p)+2)<<24|
316 (uint64_t)*((const uint8_t *)(p)+3)<<16|
317 (uint64_t)*((const uint8_t *)(p)+4)<<8|
318 (uint64_t)*((const uint8_t *)(p)+5)<<0;
319}
320
321static inline uint64_t pntoh56(const void *p)
322{
323 return (uint64_t)*((const uint8_t *)(p)+0)<<48|
324 (uint64_t)*((const uint8_t *)(p)+1)<<40|
325 (uint64_t)*((const uint8_t *)(p)+2)<<32|
326 (uint64_t)*((const uint8_t *)(p)+3)<<24|
327 (uint64_t)*((const uint8_t *)(p)+4)<<16|
328 (uint64_t)*((const uint8_t *)(p)+5)<<8|
329 (uint64_t)*((const uint8_t *)(p)+6)<<0;
330}
331
332static inline uint32_t pletoh24(const void *p)
333{
334 return (uint32_t)*((const uint8_t *)(p)+2)<<16|
335 (uint32_t)*((const uint8_t *)(p)+1)<<8|
336 (uint32_t)*((const uint8_t *)(p)+0)<<0;
337}
338
339static inline uint64_t pletoh40(const void *p)
340{
341 return (uint64_t)*((const uint8_t *)(p)+4)<<32|
342 (uint64_t)*((const uint8_t *)(p)+3)<<24|
343 (uint64_t)*((const uint8_t *)(p)+2)<<16|
344 (uint64_t)*((const uint8_t *)(p)+1)<<8|
345 (uint64_t)*((const uint8_t *)(p)+0)<<0;
346}
347
348static inline uint64_t pletoh48(const void *p)
349{
350 return (uint64_t)*((const uint8_t *)(p)+5)<<40|
351 (uint64_t)*((const uint8_t *)(p)+4)<<32|
352 (uint64_t)*((const uint8_t *)(p)+3)<<24|
353 (uint64_t)*((const uint8_t *)(p)+2)<<16|
354 (uint64_t)*((const uint8_t *)(p)+1)<<8|
355 (uint64_t)*((const uint8_t *)(p)+0)<<0;
356}
357
358static inline uint64_t pletoh56(const void *p)
359{
360 return (uint64_t)*((const uint8_t *)(p)+6)<<48|
361 (uint64_t)*((const uint8_t *)(p)+5)<<40|
362 (uint64_t)*((const uint8_t *)(p)+4)<<32|
363 (uint64_t)*((const uint8_t *)(p)+3)<<24|
364 (uint64_t)*((const uint8_t *)(p)+2)<<16|
365 (uint64_t)*((const uint8_t *)(p)+1)<<8|
366 (uint64_t)*((const uint8_t *)(p)+0)<<0;
367}
368
369#endif /* PINT_H */
370
371/*
372 * Editor modelines - https://www.wireshark.org/tools/modelines.html
373 *
374 * Local Variables:
375 * c-basic-offset: 4
376 * tab-width: 8
377 * indent-tabs-mode: nil
378 * End:
379 *
380 * ex: set shiftwidth=4 tabstop=8 expandtab:
381 * :indentSize=4:tabSize=8:noTabs=true:
382 */