Line data Source code
1 : /*
2 : * Percpu IDA library
3 : *
4 : * Copyright (C) 2013 Datera, Inc. Kent Overstreet
5 : *
6 : * This program is free software; you can redistribute it and/or
7 : * modify it under the terms of the GNU General Public License as
8 : * published by the Free Software Foundation; either version 2, or (at
9 : * your option) any later version.
10 : *
11 : * This program is distributed in the hope that it will be useful, but
12 : * WITHOUT ANY WARRANTY; without even the implied warranty of
13 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 : * General Public License for more details.
15 : */
16 :
17 : #include <linux/bitmap.h>
18 : #include <linux/bitops.h>
19 : #include <linux/bug.h>
20 : #include <linux/err.h>
21 : #include <linux/export.h>
22 : #include <linux/hardirq.h>
23 : #include <linux/idr.h>
24 : #include <linux/init.h>
25 : #include <linux/kernel.h>
26 : #include <linux/percpu.h>
27 : #include <linux/sched.h>
28 : #include <linux/slab.h>
29 : #include <linux/string.h>
30 : #include <linux/spinlock.h>
31 : #include <linux/percpu_ida.h>
32 :
33 : struct percpu_ida_cpu {
34 : /*
35 : * Even though this is percpu, we need a lock for tag stealing by remote
36 : * CPUs:
37 : */
38 : spinlock_t lock;
39 :
40 : /* nr_free/freelist form a stack of free IDs */
41 : unsigned nr_free;
42 : unsigned freelist[];
43 : };
44 :
45 : static inline void move_tags(unsigned *dst, unsigned *dst_nr,
46 : unsigned *src, unsigned *src_nr,
47 : unsigned nr)
48 : {
49 0 : *src_nr -= nr;
50 0 : memcpy(dst + *dst_nr, src + *src_nr, sizeof(unsigned) * nr);
51 0 : *dst_nr += nr;
52 : }
53 :
54 : /*
55 : * Try to steal tags from a remote cpu's percpu freelist.
56 : *
57 : * We first check how many percpu freelists have tags
58 : *
59 : * Then we iterate through the cpus until we find some tags - we don't attempt
60 : * to find the "best" cpu to steal from, to keep cacheline bouncing to a
61 : * minimum.
62 : */
63 : static inline void steal_tags(struct percpu_ida *pool,
64 : struct percpu_ida_cpu *tags)
65 : {
66 0 : unsigned cpus_have_tags, cpu = pool->cpu_last_stolen;
67 : struct percpu_ida_cpu *remote;
68 :
69 0 : for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
70 0 : cpus_have_tags; cpus_have_tags--) {
71 0 : cpu = cpumask_next(cpu, &pool->cpus_have_tags);
72 :
73 0 : if (cpu >= nr_cpu_ids) {
74 : cpu = cpumask_first(&pool->cpus_have_tags);
75 : if (cpu >= nr_cpu_ids)
76 : BUG();
77 : }
78 :
79 0 : pool->cpu_last_stolen = cpu;
80 0 : remote = per_cpu_ptr(pool->tag_cpu, cpu);
81 :
82 0 : cpumask_clear_cpu(cpu, &pool->cpus_have_tags);
83 :
84 0 : if (remote == tags)
85 0 : continue;
86 :
87 : spin_lock(&remote->lock);
88 :
89 0 : if (remote->nr_free) {
90 0 : memcpy(tags->freelist,
91 0 : remote->freelist,
92 : sizeof(unsigned) * remote->nr_free);
93 :
94 0 : tags->nr_free = remote->nr_free;
95 0 : remote->nr_free = 0;
96 : }
97 :
98 : spin_unlock(&remote->lock);
99 :
100 0 : if (tags->nr_free)
101 : break;
102 : }
103 : }
104 :
105 : /*
106 : * Pop up to IDA_PCPU_BATCH_MOVE IDs off the global freelist, and push them onto
107 : * our percpu freelist:
108 : */
109 : static inline void alloc_global_tags(struct percpu_ida *pool,
110 : struct percpu_ida_cpu *tags)
111 : {
112 0 : move_tags(tags->freelist, &tags->nr_free,
113 : pool->freelist, &pool->nr_free,
114 0 : min(pool->nr_free, pool->percpu_batch_size));
115 : }
116 :
117 : static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
118 : {
119 : int tag = -ENOSPC;
120 :
121 : spin_lock(&tags->lock);
122 0 : if (tags->nr_free)
123 0 : tag = tags->freelist[--tags->nr_free];
124 : spin_unlock(&tags->lock);
125 :
126 : return tag;
127 : }
128 :
129 : /**
130 : * percpu_ida_alloc - allocate a tag
131 : * @pool: pool to allocate from
132 : * @state: task state for prepare_to_wait
133 : *
134 : * Returns a tag - an integer in the range [0..nr_tags) (passed to
135 : * tag_pool_init()), or otherwise -ENOSPC on allocation failure.
136 : *
137 : * Safe to be called from interrupt context (assuming it isn't passed
138 : * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
139 : *
140 : * @gfp indicates whether or not to wait until a free id is available (it's not
141 : * used for internal memory allocations); thus if passed __GFP_WAIT we may sleep
142 : * however long it takes until another thread frees an id (same semantics as a
143 : * mempool).
144 : *
145 : * Will not fail if passed TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE.
146 : */
147 0 : int percpu_ida_alloc(struct percpu_ida *pool, int state)
148 : {
149 0 : DEFINE_WAIT(wait);
150 : struct percpu_ida_cpu *tags;
151 : unsigned long flags;
152 : int tag;
153 :
154 : local_irq_save(flags);
155 0 : tags = this_cpu_ptr(pool->tag_cpu);
156 :
157 : /* Fastpath */
158 : tag = alloc_local_tag(tags);
159 0 : if (likely(tag >= 0)) {
160 0 : local_irq_restore(flags);
161 0 : return tag;
162 : }
163 :
164 : while (1) {
165 : spin_lock(&pool->lock);
166 :
167 : /*
168 : * prepare_to_wait() must come before steal_tags(), in case
169 : * percpu_ida_free() on another cpu flips a bit in
170 : * cpus_have_tags
171 : *
172 : * global lock held and irqs disabled, don't need percpu lock
173 : */
174 0 : if (state != TASK_RUNNING)
175 0 : prepare_to_wait(&pool->wait, &wait, state);
176 :
177 0 : if (!tags->nr_free)
178 : alloc_global_tags(pool, tags);
179 0 : if (!tags->nr_free)
180 : steal_tags(pool, tags);
181 :
182 0 : if (tags->nr_free) {
183 0 : tag = tags->freelist[--tags->nr_free];
184 0 : if (tags->nr_free)
185 : cpumask_set_cpu(smp_processor_id(),
186 : &pool->cpus_have_tags);
187 : }
188 :
189 : spin_unlock(&pool->lock);
190 0 : local_irq_restore(flags);
191 :
192 0 : if (tag >= 0 || state == TASK_RUNNING)
193 : break;
194 :
195 0 : if (signal_pending_state(state, current)) {
196 : tag = -ERESTARTSYS;
197 : break;
198 : }
199 :
200 0 : schedule();
201 :
202 : local_irq_save(flags);
203 0 : tags = this_cpu_ptr(pool->tag_cpu);
204 0 : }
205 0 : if (state != TASK_RUNNING)
206 0 : finish_wait(&pool->wait, &wait);
207 :
208 0 : return tag;
209 : }
210 : EXPORT_SYMBOL_GPL(percpu_ida_alloc);
211 :
212 : /**
213 : * percpu_ida_free - free a tag
214 : * @pool: pool @tag was allocated from
215 : * @tag: a tag previously allocated with percpu_ida_alloc()
216 : *
217 : * Safe to be called from interrupt context.
218 : */
219 0 : void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
220 : {
221 : struct percpu_ida_cpu *tags;
222 : unsigned long flags;
223 : unsigned nr_free;
224 :
225 : BUG_ON(tag >= pool->nr_tags);
226 :
227 : local_irq_save(flags);
228 0 : tags = this_cpu_ptr(pool->tag_cpu);
229 :
230 : spin_lock(&tags->lock);
231 0 : tags->freelist[tags->nr_free++] = tag;
232 :
233 : nr_free = tags->nr_free;
234 : spin_unlock(&tags->lock);
235 :
236 0 : if (nr_free == 1) {
237 : cpumask_set_cpu(smp_processor_id(),
238 : &pool->cpus_have_tags);
239 0 : wake_up(&pool->wait);
240 : }
241 :
242 0 : if (nr_free == pool->percpu_max_size) {
243 : spin_lock(&pool->lock);
244 :
245 : /*
246 : * Global lock held and irqs disabled, don't need percpu
247 : * lock
248 : */
249 0 : if (tags->nr_free == pool->percpu_max_size) {
250 0 : move_tags(pool->freelist, &pool->nr_free,
251 0 : tags->freelist, &tags->nr_free,
252 : pool->percpu_batch_size);
253 :
254 0 : wake_up(&pool->wait);
255 : }
256 : spin_unlock(&pool->lock);
257 : }
258 :
259 0 : local_irq_restore(flags);
260 0 : }
261 : EXPORT_SYMBOL_GPL(percpu_ida_free);
262 :
263 : /**
264 : * percpu_ida_destroy - release a tag pool's resources
265 : * @pool: pool to free
266 : *
267 : * Frees the resources allocated by percpu_ida_init().
268 : */
269 0 : void percpu_ida_destroy(struct percpu_ida *pool)
270 : {
271 0 : free_percpu(pool->tag_cpu);
272 0 : free_pages((unsigned long) pool->freelist,
273 0 : get_order(pool->nr_tags * sizeof(unsigned)));
274 0 : }
275 : EXPORT_SYMBOL_GPL(percpu_ida_destroy);
276 :
277 : /**
278 : * percpu_ida_init - initialize a percpu tag pool
279 : * @pool: pool to initialize
280 : * @nr_tags: number of tags that will be available for allocation
281 : *
282 : * Initializes @pool so that it can be used to allocate tags - integers in the
283 : * range [0, nr_tags). Typically, they'll be used by driver code to refer to a
284 : * preallocated array of tag structures.
285 : *
286 : * Allocation is percpu, but sharding is limited by nr_tags - for best
287 : * performance, the workload should not span more cpus than nr_tags / 128.
288 : */
289 0 : int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
290 : unsigned long max_size, unsigned long batch_size)
291 : {
292 : unsigned i, cpu, order;
293 :
294 0 : memset(pool, 0, sizeof(*pool));
295 :
296 0 : init_waitqueue_head(&pool->wait);
297 : spin_lock_init(&pool->lock);
298 0 : pool->nr_tags = nr_tags;
299 0 : pool->percpu_max_size = max_size;
300 0 : pool->percpu_batch_size = batch_size;
301 :
302 : /* Guard against overflow */
303 0 : if (nr_tags > (unsigned) INT_MAX + 1) {
304 0 : pr_err("percpu_ida_init(): nr_tags too large\n");
305 0 : return -EINVAL;
306 : }
307 :
308 0 : order = get_order(nr_tags * sizeof(unsigned));
309 0 : pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order);
310 0 : if (!pool->freelist)
311 : return -ENOMEM;
312 :
313 0 : for (i = 0; i < nr_tags; i++)
314 0 : pool->freelist[i] = i;
315 :
316 0 : pool->nr_free = nr_tags;
317 :
318 0 : pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) +
319 0 : pool->percpu_max_size * sizeof(unsigned),
320 : sizeof(unsigned));
321 0 : if (!pool->tag_cpu)
322 : goto err;
323 :
324 0 : for_each_possible_cpu(cpu)
325 : spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock);
326 :
327 : return 0;
328 : err:
329 0 : percpu_ida_destroy(pool);
330 0 : return -ENOMEM;
331 : }
332 : EXPORT_SYMBOL_GPL(__percpu_ida_init);
333 :
334 : /**
335 : * percpu_ida_for_each_free - iterate free ids of a pool
336 : * @pool: pool to iterate
337 : * @fn: interate callback function
338 : * @data: parameter for @fn
339 : *
340 : * Note, this doesn't guarantee to iterate all free ids restrictly. Some free
341 : * ids might be missed, some might be iterated duplicated, and some might
342 : * be iterated and not free soon.
343 : */
344 0 : int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
345 : void *data)
346 : {
347 : unsigned long flags;
348 : struct percpu_ida_cpu *remote;
349 : unsigned cpu, i, err = 0;
350 :
351 : local_irq_save(flags);
352 0 : for_each_possible_cpu(cpu) {
353 0 : remote = per_cpu_ptr(pool->tag_cpu, cpu);
354 : spin_lock(&remote->lock);
355 0 : for (i = 0; i < remote->nr_free; i++) {
356 0 : err = fn(remote->freelist[i], data);
357 0 : if (err)
358 : break;
359 : }
360 : spin_unlock(&remote->lock);
361 0 : if (err)
362 : goto out;
363 : }
364 :
365 : spin_lock(&pool->lock);
366 0 : for (i = 0; i < pool->nr_free; i++) {
367 0 : err = fn(pool->freelist[i], data);
368 0 : if (err)
369 : break;
370 : }
371 : spin_unlock(&pool->lock);
372 : out:
373 0 : local_irq_restore(flags);
374 0 : return err;
375 : }
376 : EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
377 :
378 : /**
379 : * percpu_ida_free_tags - return free tags number of a specific cpu or global pool
380 : * @pool: pool related
381 : * @cpu: specific cpu or global pool if @cpu == nr_cpu_ids
382 : *
383 : * Note: this just returns a snapshot of free tags number.
384 : */
385 0 : unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu)
386 : {
387 : struct percpu_ida_cpu *remote;
388 0 : if (cpu == nr_cpu_ids)
389 0 : return pool->nr_free;
390 0 : remote = per_cpu_ptr(pool->tag_cpu, cpu);
391 0 : return remote->nr_free;
392 : }
393 : EXPORT_SYMBOL_GPL(percpu_ida_free_tags);
|