aboutsummaryrefslogtreecommitdiffstats
path: root/include/sys/atomic.h
blob: c04a5b6b47c20691524b9d8709ce200ce9ca87ce (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
#ifndef _SPL_ATOMIC_H
#define _SPL_ATOMIC_H

#ifdef  __cplusplus
extern "C" {
#endif

#include <linux/module.h>
#include <linux/spinlock.h>

/* XXX: Serialize everything through global locks.  This is
 * going to be bad for performance, but for now it's the easiest
 * way to ensure correct behavior.  I don't like it at all.
 * It would be nicer to make these function to the atomic linux
 * functions, but the normal uint64_t type complicates this.
 */
extern spinlock_t atomic64_lock;
extern spinlock_t atomic32_lock;

static __inline__ uint32_t
atomic_add_32(volatile uint32_t *target, int32_t delta)
{
	uint32_t rc;

	spin_lock(&atomic32_lock);
	rc = *target;
	*target += delta;
	spin_unlock(&atomic32_lock);

	return rc;
}

static __inline__ void
atomic_inc_64(volatile uint64_t *target)
{
	spin_lock(&atomic64_lock);
	(*target)++;
	spin_unlock(&atomic64_lock);
}

static __inline__ void
atomic_dec_64(volatile uint64_t *target)
{
	spin_lock(&atomic64_lock);
	(*target)--;
	spin_unlock(&atomic64_lock);
}

static __inline__ uint64_t
atomic_add_64(volatile uint64_t *target, uint64_t delta)
{
	uint64_t rc;

	spin_lock(&atomic64_lock);
	rc = *target;
	*target += delta;
	spin_unlock(&atomic64_lock);

	return rc;
}

static __inline__ uint64_t
atomic_sub_64(volatile uint64_t *target, uint64_t delta)
{
	uint64_t rc;

	spin_lock(&atomic64_lock);
	rc = *target;
	*target -= delta;
	spin_unlock(&atomic64_lock);

	return rc;
}

static __inline__ uint64_t
atomic_add_64_nv(volatile uint64_t *target, uint64_t delta)
{
	spin_lock(&atomic64_lock);
	*target += delta;
	spin_unlock(&atomic64_lock);

	return *target;
}

static __inline__ uint64_t
atomic_sub_64_nv(volatile uint64_t *target, uint64_t delta)
{
	spin_lock(&atomic64_lock);
	*target -= delta;
	spin_unlock(&atomic64_lock);

	return *target;
}

static __inline__ uint64_t
atomic_cas_64(volatile uint64_t *target,  uint64_t cmp,
               uint64_t newval)
{
	uint64_t rc;

	spin_lock(&atomic64_lock);
	rc = *target;
	if (*target == cmp)
		*target = newval;
	spin_unlock(&atomic64_lock);

	return rc;
}

#if defined(__x86_64__)
/* XXX: Implement atomic_cas_ptr() in terms of uint64'ts.  This
 * is of course only safe and correct for 64 bit arches...  but
 * for now I'm OK with that.
 */
static __inline__ void *
atomic_cas_ptr(volatile void *target,  void *cmp, void *newval)
{
	return (void *)atomic_cas_64((volatile uint64_t *)target,
	                             (uint64_t)cmp, (uint64_t)newval);
}
#endif

#ifdef  __cplusplus
}
#endif

#endif  /* _SPL_ATOMIC_H */