Chris Zankel | 1387dab | 2016-08-10 18:36:44 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Based on Linux/Xtensa kernel version |
| 3 | * |
| 4 | * Copyright (C) 2001 - 2007 Tensilica Inc. |
| 5 | * |
| 6 | * SPDX-License-Identifier: GPL-2.0+ |
| 7 | */ |
| 8 | |
| 9 | #ifndef _XTENSA_BYTEORDER_H |
| 10 | #define _XTENSA_BYTEORDER_H |
| 11 | |
| 12 | #include <asm/types.h> |
| 13 | |
| 14 | static inline __attribute__((const)) __u32 ___arch__swab32(__u32 x) |
| 15 | { |
| 16 | __u32 res; |
| 17 | |
| 18 | /* instruction sequence from Xtensa ISA release 2/2000 */ |
| 19 | __asm__("ssai 8\n\t" |
| 20 | "srli %0, %1, 16\n\t" |
| 21 | "src %0, %0, %1\n\t" |
| 22 | "src %0, %0, %0\n\t" |
| 23 | "src %0, %1, %0\n" |
| 24 | : "=&a" (res) |
| 25 | : "a" (x) |
| 26 | ); |
| 27 | return res; |
| 28 | } |
| 29 | |
| 30 | static inline __attribute__((const)) __u16 ___arch__swab16(__u16 x) |
| 31 | { |
| 32 | /* |
| 33 | * Given that 'short' values are signed (i.e., can be negative), |
| 34 | * we cannot assume that the upper 16-bits of the register are |
| 35 | * zero. We are careful to mask values after shifting. |
| 36 | */ |
| 37 | |
| 38 | /* |
| 39 | * There exists an anomaly between xt-gcc and xt-xcc. xt-gcc |
| 40 | * inserts an extui instruction after putting this function inline |
| 41 | * to ensure that it uses only the least-significant 16 bits of |
| 42 | * the result. xt-xcc doesn't use an extui, but assumes the |
| 43 | * __asm__ macro follows convention that the upper 16 bits of an |
| 44 | * 'unsigned short' result are still zero. This macro doesn't |
| 45 | * follow convention; indeed, it leaves garbage in the upport 16 |
| 46 | * bits of the register. |
| 47 | * |
| 48 | * Declaring the temporary variables 'res' and 'tmp' to be 32-bit |
| 49 | * types while the return type of the function is a 16-bit type |
| 50 | * forces both compilers to insert exactly one extui instruction |
| 51 | * (or equivalent) to mask off the upper 16 bits. |
| 52 | */ |
| 53 | |
| 54 | __u32 res; |
| 55 | __u32 tmp; |
| 56 | |
| 57 | __asm__("extui %1, %2, 8, 8\n\t" |
| 58 | "slli %0, %2, 8\n\t" |
| 59 | "or %0, %0, %1\n" |
| 60 | : "=&a" (res), "=&a" (tmp) |
| 61 | : "a" (x) |
| 62 | ); |
| 63 | |
| 64 | return res; |
| 65 | } |
| 66 | |
| 67 | #define __arch__swab32(x) ___arch__swab32(x) |
| 68 | #define __arch__swab16(x) ___arch__swab16(x) |
| 69 | |
| 70 | #if !defined(__STRICT_ANSI__) || defined(__KERNEL__) |
| 71 | # define __BYTEORDER_HAS_U64__ |
| 72 | # define __SWAB_64_THRU_32__ |
| 73 | #endif |
| 74 | |
| 75 | #ifdef __XTENSA_EL__ |
| 76 | # include <linux/byteorder/little_endian.h> |
| 77 | #elif defined(__XTENSA_EB__) |
| 78 | # include <linux/byteorder/big_endian.h> |
| 79 | #else |
| 80 | # error processor byte order undefined! |
| 81 | #endif |
| 82 | |
| 83 | #endif /* _XTENSA_BYTEORDER_H */ |