Line data Source code
1 : /*
2 : * Generic implementation of 64-bit atomics using spinlocks,
3 : * useful on processors that don't have 64-bit atomic instructions.
4 : *
5 : * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 : *
7 : * This program is free software; you can redistribute it and/or
8 : * modify it under the terms of the GNU General Public License
9 : * as published by the Free Software Foundation; either version
10 : * 2 of the License, or (at your option) any later version.
11 : */
12 : #include <linux/types.h>
13 : #include <linux/cache.h>
14 : #include <linux/spinlock.h>
15 : #include <linux/init.h>
16 : #include <linux/export.h>
17 : #include <linux/atomic.h>
18 :
19 : /*
20 : * We use a hashed array of spinlocks to provide exclusive access
21 : * to each atomic64_t variable. Since this is expected to used on
22 : * systems with small numbers of CPUs (<= 4 or so), we use a
23 : * relatively small array of 16 spinlocks to avoid wasting too much
24 : * memory on the spinlock array.
25 : */
26 : #define NR_LOCKS 16
27 :
28 : /*
29 : * Ensure each lock is in a separate cacheline.
30 : */
31 : static union {
32 : raw_spinlock_t lock;
33 : char pad[L1_CACHE_BYTES];
34 : } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
35 : [0 ... (NR_LOCKS - 1)] = {
36 : .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
37 : },
38 : };
39 :
40 : static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
41 : {
42 : unsigned long addr = (unsigned long) v;
43 :
44 : addr >>= L1_CACHE_SHIFT;
45 : addr ^= (addr >> 8) ^ (addr >> 16);
46 : return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
47 : }
48 :
49 273504 : long long atomic64_read(const atomic64_t *v)
50 : {
51 : unsigned long flags;
52 : raw_spinlock_t *lock = lock_addr(v);
53 : long long val;
54 :
55 273504 : raw_spin_lock_irqsave(lock, flags);
56 273504 : val = v->counter;
57 820512 : raw_spin_unlock_irqrestore(lock, flags);
58 273504 : return val;
59 : }
60 : EXPORT_SYMBOL(atomic64_read);
61 :
62 15731 : void atomic64_set(atomic64_t *v, long long i)
63 : {
64 : unsigned long flags;
65 : raw_spinlock_t *lock = lock_addr(v);
66 :
67 15731 : raw_spin_lock_irqsave(lock, flags);
68 15731 : v->counter = i;
69 47193 : raw_spin_unlock_irqrestore(lock, flags);
70 15731 : }
71 : EXPORT_SYMBOL(atomic64_set);
72 :
73 : #define ATOMIC64_OP(op, c_op) \
74 : void atomic64_##op(long long a, atomic64_t *v) \
75 : { \
76 : unsigned long flags; \
77 : raw_spinlock_t *lock = lock_addr(v); \
78 : \
79 : raw_spin_lock_irqsave(lock, flags); \
80 : v->counter c_op a; \
81 : raw_spin_unlock_irqrestore(lock, flags); \
82 : } \
83 : EXPORT_SYMBOL(atomic64_##op);
84 :
85 : #define ATOMIC64_OP_RETURN(op, c_op) \
86 : long long atomic64_##op##_return(long long a, atomic64_t *v) \
87 : { \
88 : unsigned long flags; \
89 : raw_spinlock_t *lock = lock_addr(v); \
90 : long long val; \
91 : \
92 : raw_spin_lock_irqsave(lock, flags); \
93 : val = (v->counter c_op a); \
94 : raw_spin_unlock_irqrestore(lock, flags); \
95 : return val; \
96 : } \
97 : EXPORT_SYMBOL(atomic64_##op##_return);
98 :
99 : #define ATOMIC64_OPS(op, c_op) \
100 : ATOMIC64_OP(op, c_op) \
101 : ATOMIC64_OP_RETURN(op, c_op)
102 :
103 17990 : ATOMIC64_OPS(add, +=)
104 20700 : ATOMIC64_OPS(sub, -=)
105 :
106 : #undef ATOMIC64_OPS
107 : #undef ATOMIC64_OP_RETURN
108 : #undef ATOMIC64_OP
109 :
110 0 : long long atomic64_dec_if_positive(atomic64_t *v)
111 : {
112 : unsigned long flags;
113 : raw_spinlock_t *lock = lock_addr(v);
114 : long long val;
115 :
116 0 : raw_spin_lock_irqsave(lock, flags);
117 0 : val = v->counter - 1;
118 0 : if (val >= 0)
119 0 : v->counter = val;
120 0 : raw_spin_unlock_irqrestore(lock, flags);
121 0 : return val;
122 : }
123 : EXPORT_SYMBOL(atomic64_dec_if_positive);
124 :
125 0 : long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
126 : {
127 : unsigned long flags;
128 : raw_spinlock_t *lock = lock_addr(v);
129 : long long val;
130 :
131 0 : raw_spin_lock_irqsave(lock, flags);
132 0 : val = v->counter;
133 0 : if (val == o)
134 0 : v->counter = n;
135 0 : raw_spin_unlock_irqrestore(lock, flags);
136 0 : return val;
137 : }
138 : EXPORT_SYMBOL(atomic64_cmpxchg);
139 :
140 117013 : long long atomic64_xchg(atomic64_t *v, long long new)
141 : {
142 : unsigned long flags;
143 : raw_spinlock_t *lock = lock_addr(v);
144 : long long val;
145 :
146 117013 : raw_spin_lock_irqsave(lock, flags);
147 117013 : val = v->counter;
148 117013 : v->counter = new;
149 351039 : raw_spin_unlock_irqrestore(lock, flags);
150 117013 : return val;
151 : }
152 : EXPORT_SYMBOL(atomic64_xchg);
153 :
154 0 : int atomic64_add_unless(atomic64_t *v, long long a, long long u)
155 : {
156 : unsigned long flags;
157 : raw_spinlock_t *lock = lock_addr(v);
158 : int ret = 0;
159 :
160 0 : raw_spin_lock_irqsave(lock, flags);
161 0 : if (v->counter != u) {
162 0 : v->counter += a;
163 : ret = 1;
164 : }
165 0 : raw_spin_unlock_irqrestore(lock, flags);
166 0 : return ret;
167 : }
168 : EXPORT_SYMBOL(atomic64_add_unless);
|