Nobuhiro Iwamatsu | 8b25f17 | 2010-02-08 11:50:16 +0900 | [diff] [blame] | 1 | #ifndef __ASM_SH_UNALIGNED_SH4A_H |
| 2 | #define __ASM_SH_UNALIGNED_SH4A_H |
| 3 | |
| 4 | /* |
| 5 | * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only. |
| 6 | * Support for 64-bit accesses are done through shifting and masking |
| 7 | * relative to the endianness. Unaligned stores are not supported by the |
| 8 | * instruction encoding, so these continue to use the packed |
| 9 | * struct. |
| 10 | * |
| 11 | * The same note as with the movli.l/movco.l pair applies here, as long |
| 12 | * as the load is gauranteed to be inlined, nothing else will hook in to |
| 13 | * r0 and we get the return value for free. |
| 14 | * |
| 15 | * NOTE: Due to the fact we require r0 encoding, care should be taken to |
| 16 | * avoid mixing these heavily with other r0 consumers, such as the atomic |
| 17 | * ops. Failure to adhere to this can result in the compiler running out |
| 18 | * of spill registers and blowing up when building at low optimization |
| 19 | * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777. |
| 20 | */ |
| 21 | #include <linux/types.h> |
| 22 | #include <asm/byteorder.h> |
| 23 | |
| 24 | static __always_inline u32 __get_unaligned_cpu32(const u8 *p) |
| 25 | { |
| 26 | unsigned long unaligned; |
| 27 | |
| 28 | __asm__ __volatile__ ( |
| 29 | "movua.l @%1, %0\n\t" |
| 30 | : "=z" (unaligned) |
| 31 | : "r" (p) |
| 32 | ); |
| 33 | |
| 34 | return unaligned; |
| 35 | } |
| 36 | |
| 37 | struct __una_u16 { u16 x __attribute__((packed)); }; |
| 38 | struct __una_u32 { u32 x __attribute__((packed)); }; |
| 39 | struct __una_u64 { u64 x __attribute__((packed)); }; |
| 40 | |
| 41 | static inline u16 __get_unaligned_cpu16(const u8 *p) |
| 42 | { |
| 43 | #ifdef __LITTLE_ENDIAN |
| 44 | return p[0] | p[1] << 8; |
| 45 | #else |
| 46 | return p[0] << 8 | p[1]; |
| 47 | #endif |
| 48 | } |
| 49 | |
| 50 | /* |
| 51 | * Even though movua.l supports auto-increment on the read side, it can |
| 52 | * only store to r0 due to instruction encoding constraints, so just let |
| 53 | * the compiler sort it out on its own. |
| 54 | */ |
| 55 | static inline u64 __get_unaligned_cpu64(const u8 *p) |
| 56 | { |
| 57 | #ifdef __LITTLE_ENDIAN |
| 58 | return (u64)__get_unaligned_cpu32(p + 4) << 32 | |
| 59 | __get_unaligned_cpu32(p); |
| 60 | #else |
| 61 | return (u64)__get_unaligned_cpu32(p) << 32 | |
| 62 | __get_unaligned_cpu32(p + 4); |
| 63 | #endif |
| 64 | } |
| 65 | |
| 66 | static inline u16 get_unaligned_le16(const void *p) |
| 67 | { |
| 68 | return le16_to_cpu(__get_unaligned_cpu16(p)); |
| 69 | } |
| 70 | |
| 71 | static inline u32 get_unaligned_le32(const void *p) |
| 72 | { |
| 73 | return le32_to_cpu(__get_unaligned_cpu32(p)); |
| 74 | } |
| 75 | |
| 76 | static inline u64 get_unaligned_le64(const void *p) |
| 77 | { |
| 78 | return le64_to_cpu(__get_unaligned_cpu64(p)); |
| 79 | } |
| 80 | |
| 81 | static inline u16 get_unaligned_be16(const void *p) |
| 82 | { |
| 83 | return be16_to_cpu(__get_unaligned_cpu16(p)); |
| 84 | } |
| 85 | |
| 86 | static inline u32 get_unaligned_be32(const void *p) |
| 87 | { |
| 88 | return be32_to_cpu(__get_unaligned_cpu32(p)); |
| 89 | } |
| 90 | |
| 91 | static inline u64 get_unaligned_be64(const void *p) |
| 92 | { |
| 93 | return be64_to_cpu(__get_unaligned_cpu64(p)); |
| 94 | } |
| 95 | |
| 96 | static inline void __put_le16_noalign(u8 *p, u16 val) |
| 97 | { |
| 98 | *p++ = val; |
| 99 | *p++ = val >> 8; |
| 100 | } |
| 101 | |
| 102 | static inline void __put_le32_noalign(u8 *p, u32 val) |
| 103 | { |
| 104 | __put_le16_noalign(p, val); |
| 105 | __put_le16_noalign(p + 2, val >> 16); |
| 106 | } |
| 107 | |
| 108 | static inline void __put_le64_noalign(u8 *p, u64 val) |
| 109 | { |
| 110 | __put_le32_noalign(p, val); |
| 111 | __put_le32_noalign(p + 4, val >> 32); |
| 112 | } |
| 113 | |
| 114 | static inline void __put_be16_noalign(u8 *p, u16 val) |
| 115 | { |
| 116 | *p++ = val >> 8; |
| 117 | *p++ = val; |
| 118 | } |
| 119 | |
| 120 | static inline void __put_be32_noalign(u8 *p, u32 val) |
| 121 | { |
| 122 | __put_be16_noalign(p, val >> 16); |
| 123 | __put_be16_noalign(p + 2, val); |
| 124 | } |
| 125 | |
| 126 | static inline void __put_be64_noalign(u8 *p, u64 val) |
| 127 | { |
| 128 | __put_be32_noalign(p, val >> 32); |
| 129 | __put_be32_noalign(p + 4, val); |
| 130 | } |
| 131 | |
| 132 | static inline void put_unaligned_le16(u16 val, void *p) |
| 133 | { |
| 134 | #ifdef __LITTLE_ENDIAN |
| 135 | ((struct __una_u16 *)p)->x = val; |
| 136 | #else |
| 137 | __put_le16_noalign(p, val); |
| 138 | #endif |
| 139 | } |
| 140 | |
| 141 | static inline void put_unaligned_le32(u32 val, void *p) |
| 142 | { |
| 143 | #ifdef __LITTLE_ENDIAN |
| 144 | ((struct __una_u32 *)p)->x = val; |
| 145 | #else |
| 146 | __put_le32_noalign(p, val); |
| 147 | #endif |
| 148 | } |
| 149 | |
| 150 | static inline void put_unaligned_le64(u64 val, void *p) |
| 151 | { |
| 152 | #ifdef __LITTLE_ENDIAN |
| 153 | ((struct __una_u64 *)p)->x = val; |
| 154 | #else |
| 155 | __put_le64_noalign(p, val); |
| 156 | #endif |
| 157 | } |
| 158 | |
| 159 | static inline void put_unaligned_be16(u16 val, void *p) |
| 160 | { |
| 161 | #ifdef __BIG_ENDIAN |
| 162 | ((struct __una_u16 *)p)->x = val; |
| 163 | #else |
| 164 | __put_be16_noalign(p, val); |
| 165 | #endif |
| 166 | } |
| 167 | |
| 168 | static inline void put_unaligned_be32(u32 val, void *p) |
| 169 | { |
| 170 | #ifdef __BIG_ENDIAN |
| 171 | ((struct __una_u32 *)p)->x = val; |
| 172 | #else |
| 173 | __put_be32_noalign(p, val); |
| 174 | #endif |
| 175 | } |
| 176 | |
| 177 | static inline void put_unaligned_be64(u64 val, void *p) |
| 178 | { |
| 179 | #ifdef __BIG_ENDIAN |
| 180 | ((struct __una_u64 *)p)->x = val; |
| 181 | #else |
| 182 | __put_be64_noalign(p, val); |
| 183 | #endif |
| 184 | } |
| 185 | |
| 186 | /* |
| 187 | * Cause a link-time error if we try an unaligned access other than |
| 188 | * 1,2,4 or 8 bytes long |
| 189 | */ |
| 190 | extern void __bad_unaligned_access_size(void); |
| 191 | |
| 192 | #define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \ |
| 193 | __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ |
| 194 | __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ |
| 195 | __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ |
| 196 | __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ |
| 197 | __bad_unaligned_access_size())))); \ |
| 198 | })) |
| 199 | |
| 200 | #define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \ |
| 201 | __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ |
| 202 | __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ |
| 203 | __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ |
| 204 | __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ |
| 205 | __bad_unaligned_access_size())))); \ |
| 206 | })) |
| 207 | |
| 208 | #define __put_unaligned_le(val, ptr) ({ \ |
| 209 | void *__gu_p = (ptr); \ |
| 210 | switch (sizeof(*(ptr))) { \ |
| 211 | case 1: \ |
| 212 | *(u8 *)__gu_p = (__force u8)(val); \ |
| 213 | break; \ |
| 214 | case 2: \ |
| 215 | put_unaligned_le16((__force u16)(val), __gu_p); \ |
| 216 | break; \ |
| 217 | case 4: \ |
| 218 | put_unaligned_le32((__force u32)(val), __gu_p); \ |
| 219 | break; \ |
| 220 | case 8: \ |
| 221 | put_unaligned_le64((__force u64)(val), __gu_p); \ |
| 222 | break; \ |
| 223 | default: \ |
| 224 | __bad_unaligned_access_size(); \ |
| 225 | break; \ |
| 226 | } \ |
| 227 | (void)0; }) |
| 228 | |
| 229 | #define __put_unaligned_be(val, ptr) ({ \ |
| 230 | void *__gu_p = (ptr); \ |
| 231 | switch (sizeof(*(ptr))) { \ |
| 232 | case 1: \ |
| 233 | *(u8 *)__gu_p = (__force u8)(val); \ |
| 234 | break; \ |
| 235 | case 2: \ |
| 236 | put_unaligned_be16((__force u16)(val), __gu_p); \ |
| 237 | break; \ |
| 238 | case 4: \ |
| 239 | put_unaligned_be32((__force u32)(val), __gu_p); \ |
| 240 | break; \ |
| 241 | case 8: \ |
| 242 | put_unaligned_be64((__force u64)(val), __gu_p); \ |
| 243 | break; \ |
| 244 | default: \ |
| 245 | __bad_unaligned_access_size(); \ |
| 246 | break; \ |
| 247 | } \ |
| 248 | (void)0; }) |
| 249 | |
| 250 | #ifdef __LITTLE_ENDIAN |
| 251 | # define get_unaligned __get_unaligned_le |
| 252 | # define put_unaligned __put_unaligned_le |
| 253 | #else |
| 254 | # define get_unaligned __get_unaligned_be |
| 255 | # define put_unaligned __put_unaligned_be |
| 256 | #endif |
| 257 | |
| 258 | #endif /* __ASM_SH_UNALIGNED_SH4A_H */ |