1 /* 2 * Copyright (c) 1992, 1993, 1994, 1995, 1996 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that: (1) source code distributions 7 * retain the above copyright notice and this paragraph in its entirety, (2) 8 * distributions including binary code include the above copyright notice and 9 * this paragraph in its entirety in the documentation or other materials 10 * provided with the distribution, and (3) all advertising materials mentioning 11 * features or use of this software display the following acknowledgement: 12 * ``This product includes software developed by the University of California, 13 * Lawrence Berkeley Laboratory and its contributors.'' Neither the name of 14 * the University nor the names of its contributors may be used to endorse 15 * or promote products derived from this software without specific prior 16 * written permission. 17 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED 18 * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. 20 * 21 * @(#) $Header: /tcpdump/master/tcpdump/extract.h,v 1.25 2006-01-30 16:20:07 hannes Exp $ (LBL) 22 */ 23 24 /* 25 * Macros to extract possibly-unaligned big-endian integral values. 26 */ 27 #ifdef LBL_ALIGN 28 /* 29 * The processor doesn't natively handle unaligned loads. 30 */ 31 #if defined(__GNUC__) && defined(HAVE___ATTRIBUTE__) && \ 32 (defined(__alpha) || defined(__alpha__) || \ 33 defined(__mips) || defined(__mips__)) 34 35 /* 36 * This is a GCC-compatible compiler and we have __attribute__, which 37 * we assume that mean we have __attribute__((packed)), and this is 38 * MIPS or Alpha, which has instructions that can help when doing 39 * unaligned loads. 40 * 41 * Declare packed structures containing a u_int16_t and a u_int32_t, 42 * cast the pointer to point to one of those, and fetch through it; 43 * the GCC manual doesn't appear to explicitly say that 44 * __attribute__((packed)) causes the compiler to generate unaligned-safe 45 * code, but it apppears to do so. 46 * 47 * We do this in case the compiler can generate code using those 48 * instructions to do an unaligned load and pass stuff to "ntohs()" or 49 * "ntohl()", which might be better than than the code to fetch the 50 * bytes one at a time and assemble them. (That might not be the 51 * case on a little-endian platform, such as DEC's MIPS machines and 52 * Alpha machines, where "ntohs()" and "ntohl()" might not be done 53 * inline.) 54 * 55 * We do this only for specific architectures because, for example, 56 * at least some versions of GCC, when compiling for 64-bit SPARC, 57 * generate code that assumes alignment if we do this. 58 * 59 * XXX - add other architectures and compilers as possible and 60 * appropriate. 61 * 62 * HP's C compiler, indicated by __HP_cc being defined, supports 63 * "#pragma unaligned N" in version A.05.50 and later, where "N" 64 * specifies a number of bytes at which the typedef on the next 65 * line is aligned, e.g. 66 * 67 * #pragma unalign 1 68 * typedef u_int16_t unaligned_u_int16_t; 69 * 70 * to define unaligned_u_int16_t as a 16-bit unaligned data type. 71 * This could be presumably used, in sufficiently recent versions of 72 * the compiler, with macros similar to those below. This would be 73 * useful only if that compiler could generate better code for PA-RISC 74 * or Itanium than would be generated by a bunch of shifts-and-ORs. 75 * 76 * DEC C, indicated by __DECC being defined, has, at least on Alpha, 77 * an __unaligned qualifier that can be applied to pointers to get the 78 * compiler to generate code that does unaligned loads and stores when 79 * dereferencing the pointer in question. 80 * 81 * XXX - what if the native C compiler doesn't support 82 * __attribute__((packed))? How can we get it to generate unaligned 83 * accesses for *specific* items? 84 */ 85 typedef struct { 86 u_int16_t val; 87 } __attribute__((packed)) unaligned_u_int16_t; 88 89 typedef struct { 90 u_int32_t val; 91 } __attribute__((packed)) unaligned_u_int32_t; 92 93 static inline u_int16_t 94 EXTRACT_16BITS(const void *p) 95 { 96 return ((u_int16_t)ntohs(((const unaligned_u_int16_t *)(p))->val)); 97 } 98 99 static inline u_int32_t 100 EXTRACT_32BITS(const void *p) 101 { 102 return ((u_int32_t)ntohl(((const unaligned_u_int32_t *)(p))->val)); 103 } 104 105 static inline u_int64_t 106 EXTRACT_64BITS(const void *p) 107 { 108 return ((u_int64_t)(((u_int64_t)ntohl(((const unaligned_u_int32_t *)(p) + 0)->val)) << 32 | \ 109 ((u_int64_t)ntohl(((const unaligned_u_int32_t *)(p) + 1)->val)) << 0)); 110 } 111 112 #else /* have to do it a byte at a time */ 113 /* 114 * This isn't a GCC-compatible compiler, we don't have __attribute__, 115 * or we do but we don't know of any better way with this instruction 116 * set to do unaligned loads, so do unaligned loads of big-endian 117 * quantities the hard way - fetch the bytes one at a time and 118 * assemble them. 119 */ 120 #define EXTRACT_16BITS(p) \ 121 ((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 0) << 8 | \ 122 (u_int16_t)*((const u_int8_t *)(p) + 1))) 123 #define EXTRACT_32BITS(p) \ 124 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 24 | \ 125 (u_int32_t)*((const u_int8_t *)(p) + 1) << 16 | \ 126 (u_int32_t)*((const u_int8_t *)(p) + 2) << 8 | \ 127 (u_int32_t)*((const u_int8_t *)(p) + 3))) 128 #define EXTRACT_64BITS(p) \ 129 ((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 0) << 56 | \ 130 (u_int64_t)*((const u_int8_t *)(p) + 1) << 48 | \ 131 (u_int64_t)*((const u_int8_t *)(p) + 2) << 40 | \ 132 (u_int64_t)*((const u_int8_t *)(p) + 3) << 32 | \ 133 (u_int64_t)*((const u_int8_t *)(p) + 4) << 24 | \ 134 (u_int64_t)*((const u_int8_t *)(p) + 5) << 16 | \ 135 (u_int64_t)*((const u_int8_t *)(p) + 6) << 8 | \ 136 (u_int64_t)*((const u_int8_t *)(p) + 7))) 137 #endif /* must special-case unaligned accesses */ 138 #else /* LBL_ALIGN */ 139 /* 140 * The processor natively handles unaligned loads, so we can just 141 * cast the pointer and fetch through it. 142 */ 143 static inline u_int16_t 144 EXTRACT_16BITS(const void *p) 145 { 146 return ((u_int16_t)ntohs(*(const u_int16_t *)(p))); 147 } 148 149 static inline u_int32_t 150 EXTRACT_32BITS(const void *p) 151 { 152 return ((u_int32_t)ntohl(*(const u_int32_t *)(p))); 153 } 154 155 static inline u_int64_t 156 EXTRACT_64BITS(const void *p) 157 { 158 return ((u_int64_t)(((u_int64_t)ntohl(*((const u_int32_t *)(p) + 0))) << 32 | \ 159 ((u_int64_t)ntohl(*((const u_int32_t *)(p) + 1))) << 0)); 160 161 } 162 163 #endif /* LBL_ALIGN */ 164 165 #define EXTRACT_24BITS(p) \ 166 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 0) << 16 | \ 167 (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \ 168 (u_int32_t)*((const u_int8_t *)(p) + 2))) 169 170 /* 171 * Macros to extract possibly-unaligned little-endian integral values. 172 * XXX - do loads on little-endian machines that support unaligned loads? 173 */ 174 #define EXTRACT_LE_8BITS(p) (*(p)) 175 #define EXTRACT_LE_16BITS(p) \ 176 ((u_int16_t)((u_int16_t)*((const u_int8_t *)(p) + 1) << 8 | \ 177 (u_int16_t)*((const u_int8_t *)(p) + 0))) 178 #define EXTRACT_LE_32BITS(p) \ 179 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 3) << 24 | \ 180 (u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \ 181 (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \ 182 (u_int32_t)*((const u_int8_t *)(p) + 0))) 183 #define EXTRACT_LE_24BITS(p) \ 184 ((u_int32_t)((u_int32_t)*((const u_int8_t *)(p) + 2) << 16 | \ 185 (u_int32_t)*((const u_int8_t *)(p) + 1) << 8 | \ 186 (u_int32_t)*((const u_int8_t *)(p) + 0))) 187 #define EXTRACT_LE_64BITS(p) \ 188 ((u_int64_t)((u_int64_t)*((const u_int8_t *)(p) + 7) << 56 | \ 189 (u_int64_t)*((const u_int8_t *)(p) + 6) << 48 | \ 190 (u_int64_t)*((const u_int8_t *)(p) + 5) << 40 | \ 191 (u_int64_t)*((const u_int8_t *)(p) + 4) << 32 | \ 192 (u_int64_t)*((const u_int8_t *)(p) + 3) << 24 | \ 193 (u_int64_t)*((const u_int8_t *)(p) + 2) << 16 | \ 194 (u_int64_t)*((const u_int8_t *)(p) + 1) << 8 | \ 195 (u_int64_t)*((const u_int8_t *)(p) + 0))) 196