1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Port on Texas Instruments TMS320C6x architecture
4 *
5 * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
6 * Author: Aurelien Jacquiot ([email protected])
7 * Rewritten for 2.6.3x: Mark Salter <[email protected]>
8 */
9 #ifndef _ASM_C6X_UNALIGNED_H
10 #define _ASM_C6X_UNALIGNED_H
11
12 // #include <linux/swab.h>
13 // #include <linux/unaligned/generic.h>
14
15 #include "inject_80211.h"
16
17 /*
18 * The C64x+ can do unaligned word and dword accesses in hardware
19 * using special load/store instructions.
20 */
21
get_unaligned_le16(const void * p)22 static inline u16 get_unaligned_le16(const void *p)
23 {
24 const u8 *_p = p;
25 return _p[0] | _p[1] << 8;
26 }
27
get_unaligned_be16(const void * p)28 static inline u16 get_unaligned_be16(const void *p)
29 {
30 const u8 *_p = p;
31 return _p[0] << 8 | _p[1];
32 }
33
put_unaligned_le16(u16 val,void * p)34 static inline void put_unaligned_le16(u16 val, void *p)
35 {
36 u8 *_p = p;
37 _p[0] = val;
38 _p[1] = val >> 8;
39 }
40
put_unaligned_be16(u16 val,void * p)41 static inline void put_unaligned_be16(u16 val, void *p)
42 {
43 u8 *_p = p;
44 _p[0] = val >> 8;
45 _p[1] = val;
46 }
47
48 // static inline u32 get_unaligned32(const void *p)
49 // {
50 // u32 val = (u32) p;
51 // asm (" ldnw .d1t1 *%0,%0\n"
52 // " nop 4\n"
53 // : "+a"(val));
54 // return val;
55 // }
56
57 // static inline void put_unaligned32(u32 val, void *p)
58 // {
59 // asm volatile (" stnw .d2t1 %0,*%1\n"
60 // : : "a"(val), "b"(p) : "memory");
61 // }
62
63 // static inline u64 get_unaligned64(const void *p)
64 // {
65 // u64 val;
66 // asm volatile (" ldndw .d1t1 *%1,%0\n"
67 // " nop 4\n"
68 // : "=a"(val) : "a"(p));
69 // return val;
70 // }
71
72 // static inline void put_unaligned64(u64 val, const void *p)
73 // {
74 // asm volatile (" stndw .d2t1 %0,*%1\n"
75 // : : "a"(val), "b"(p) : "memory");
76 // }
77
78 #ifdef CONFIG_CPU_BIG_ENDIAN
79
80 #define get_unaligned_le32(p) __swab32(get_unaligned32(p))
81 #define get_unaligned_le64(p) __swab64(get_unaligned64(p))
82 #define get_unaligned_be32(p) get_unaligned32(p)
83 #define get_unaligned_be64(p) get_unaligned64(p)
84 #define put_unaligned_le32(v, p) put_unaligned32(__swab32(v), (p))
85 #define put_unaligned_le64(v, p) put_unaligned64(__swab64(v), (p))
86 #define put_unaligned_be32(v, p) put_unaligned32((v), (p))
87 #define put_unaligned_be64(v, p) put_unaligned64((v), (p))
88 #define get_unaligned __get_unaligned_be
89 #define put_unaligned __put_unaligned_be
90
91 #else
92
93 #define get_unaligned_le32(p) get_unaligned32(p)
94 #define get_unaligned_le64(p) get_unaligned64(p)
95 #define get_unaligned_be32(p) __swab32(get_unaligned32(p))
96 #define get_unaligned_be64(p) __swab64(get_unaligned64(p))
97 #define put_unaligned_le32(v, p) put_unaligned32((v), (p))
98 #define put_unaligned_le64(v, p) put_unaligned64((v), (p))
99 #define put_unaligned_be32(v, p) put_unaligned32(__swab32(v), (p))
100 #define put_unaligned_be64(v, p) put_unaligned64(__swab64(v), (p))
101 #define get_unaligned __get_unaligned_le
102 #define put_unaligned __put_unaligned_le
103
104 #endif
105
106 #endif /* _ASM_C6X_UNALIGNED_H */
107