summaryrefslogtreecommitdiff
path: root/fs/ufs/swab.h
blob: 1683d2bee614249a40d91d79af64db3295857d88 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
/*
 *  linux/fs/ufs/swab.h
 *
 * Copyright (C) 1997, 1998 Francois-Rene Rideau <fare@tunes.org>
 * Copyright (C) 1998 Jakub Jelinek <jj@ultra.linux.cz>
 * Copyright (C) 2001 Christoph Hellwig <hch@infradead.org>
 */

#ifndef _UFS_SWAB_H
#define _UFS_SWAB_H

/*
 * Notes:
 *    HERE WE ASSUME EITHER BIG OR LITTLE ENDIAN UFSes
 *    in case there are ufs implementations that have strange bytesexes,
 *    you'll need to modify code here as well as in ufs_super.c and ufs_fs.h
 *    to support them.
 */

enum {
	BYTESEX_LE,
	BYTESEX_BE
};

static inline u64
fs64_to_cpu(struct super_block *sbp, __fs64 n)
{
	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
		return le64_to_cpu((__force __le64)n);
	else
		return be64_to_cpu((__force __be64)n);
}

static inline __fs64
cpu_to_fs64(struct super_block *sbp, u64 n)
{
	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
		return (__force __fs64)cpu_to_le64(n);
	else
		return (__force __fs64)cpu_to_be64(n);
}

static __inline u32
fs64_add(struct super_block *sbp, u32 *n, int d)
{
	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
		return *n = cpu_to_le64(le64_to_cpu(*n)+d);
	else
		return *n = cpu_to_be64(be64_to_cpu(*n)+d);
}

static __inline u32
fs64_sub(struct super_block *sbp, u32 *n, int d)
{
	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
		return *n = cpu_to_le64(le64_to_cpu(*n)-d);
	else
		return *n = cpu_to_be64(be64_to_cpu(*n)-d);
}

static __inline u32
fs32_to_cpu(struct super_block *sbp, __fs32 n)
{
	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
		return le32_to_cpu((__force __le32)n);
	else
		return be32_to_cpu((__force __be32)n);
}

static inline __fs32
cpu_to_fs32(struct super_block *sbp, u32 n)
{
	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
		return (__force __fs32)cpu_to_le32(n);
	else
		return (__force __fs32)cpu_to_be32(n);
}

static inline void
fs32_add(struct super_block *sbp, __fs32 *n, int d)
{
	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
		*(__le32 *)n = cpu_to_le32(le32_to_cpu(*(__le32 *)n)+d);
	else
		*(__be32 *)n = cpu_to_be32(be32_to_cpu(*(__be32 *)n)+d);
}

static inline void
fs32_sub(struct super_block *sbp, __fs32 *n, int d)
{
	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
		*(__le32 *)n = cpu_to_le32(le32_to_cpu(*(__le32 *)n)-d);
	else
		*(__be32 *)n = cpu_to_be32(be32_to_cpu(*(__be32 *)n)-d);
}

static inline u16
fs16_to_cpu(struct super_block *sbp, __fs16 n)
{
	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
		return le16_to_cpu((__force __le16)n);
	else
		return be16_to_cpu((__force __be16)n);
}

static inline __fs16
cpu_to_fs16(struct super_block *sbp, u16 n)
{
	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
		return (__force __fs16)cpu_to_le16(n);
	else
		return (__force __fs16)cpu_to_be16(n);
}

static inline void
fs16_add(struct super_block *sbp, __fs16 *n, int d)
{
	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
		*(__le16 *)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)+d);
	else
		*(__be16 *)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)+d);
}

static inline void
fs16_sub(struct super_block *sbp, __fs16 *n, int d)
{
	if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
		*(__le16 *)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)-d);
	else
		*(__be16 *)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)-d);
}

#endif /* _UFS_SWAB_H */