Line data Source code
1 : /*
2 : * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 : * Copyright (C) 2002 by Concurrent Computer Corporation
4 : * Distributed under the GNU GPL license version 2.
5 : *
6 : * Modified by George Anzinger to reuse immediately and to use
7 : * find bit instructions. Also removed _irq on spinlocks.
8 : *
9 : * Modified by Nadia Derbey to make it RCU safe.
10 : *
11 : * Small id to pointer translation service.
12 : *
13 : * It uses a radix tree like structure as a sparse array indexed
14 : * by the id to obtain the pointer. The bitmap makes allocating
15 : * a new id quick.
16 : *
17 : * You call it to allocate an id (an int) an associate with that id a
18 : * pointer or what ever, we treat it as a (void *). You can pass this
19 : * id to a user for him to pass back at a later time. You then pass
20 : * that id to this code and it returns your pointer.
21 : */
22 :
23 : #ifndef TEST // to test in user space...
24 : #include <linux/slab.h>
25 : #include <linux/init.h>
26 : #include <linux/export.h>
27 : #endif
28 : #include <linux/err.h>
29 : #include <linux/string.h>
30 : #include <linux/idr.h>
31 : #include <linux/spinlock.h>
32 : #include <linux/percpu.h>
33 : #include <linux/hardirq.h>
34 :
35 : #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
36 : #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
37 :
38 : /* Leave the possibility of an incomplete final layer */
39 : #define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
40 :
41 : /* Number of id_layer structs to leave in free list */
42 : #define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
43 :
44 : static struct kmem_cache *idr_layer_cache;
45 : static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
46 : static DEFINE_PER_CPU(int, idr_preload_cnt);
47 : static DEFINE_SPINLOCK(simple_ida_lock);
48 :
49 : /* the maximum ID which can be allocated given idr->layers */
50 : static int idr_max(int layers)
51 : {
52 20576 : int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
53 :
54 20576 : return (1 << bits) - 1;
55 : }
56 :
57 : /*
58 : * Prefix mask for an idr_layer at @layer. For layer 0, the prefix mask is
59 : * all bits except for the lower IDR_BITS. For layer 1, 2 * IDR_BITS, and
60 : * so on.
61 : */
62 : static int idr_layer_prefix_mask(int layer)
63 : {
64 0 : return ~idr_max(layer + 1);
65 : }
66 :
67 11232 : static struct idr_layer *get_from_free_list(struct idr *idp)
68 : {
69 : struct idr_layer *p;
70 : unsigned long flags;
71 :
72 11232 : spin_lock_irqsave(&idp->lock, flags);
73 11232 : if ((p = idp->id_free)) {
74 11231 : idp->id_free = p->ary[0];
75 11231 : idp->id_free_cnt--;
76 11231 : p->ary[0] = NULL;
77 : }
78 : spin_unlock_irqrestore(&idp->lock, flags);
79 11232 : return(p);
80 : }
81 :
82 : /**
83 : * idr_layer_alloc - allocate a new idr_layer
84 : * @gfp_mask: allocation mask
85 : * @layer_idr: optional idr to allocate from
86 : *
87 : * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
88 : * one from the per-cpu preload buffer. If @layer_idr is not %NULL, fetch
89 : * an idr_layer from @idr->id_free.
90 : *
91 : * @layer_idr is to maintain backward compatibility with the old alloc
92 : * interface - idr_pre_get() and idr_get_new*() - and will be removed
93 : * together with per-pool preload buffer.
94 : */
95 126 : static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
96 : {
97 : struct idr_layer *new;
98 :
99 : /* this is the old path, bypass to get_from_free_list() */
100 126 : if (layer_idr)
101 83 : return get_from_free_list(layer_idr);
102 :
103 : /*
104 : * Try to allocate directly from kmem_cache. We want to try this
105 : * before preload buffer; otherwise, non-preloading idr_alloc()
106 : * users will end up taking advantage of preloading ones. As the
107 : * following is allowed to fail for preloaded cases, suppress
108 : * warning this time.
109 : */
110 43 : new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
111 43 : if (new)
112 : return new;
113 :
114 : /*
115 : * Try to fetch one from the per-cpu preload buffer if in process
116 : * context. See idr_preload() for details.
117 : */
118 0 : if (!in_interrupt()) {
119 0 : preempt_disable();
120 0 : new = __this_cpu_read(idr_preload_head);
121 0 : if (new) {
122 0 : __this_cpu_write(idr_preload_head, new->ary[0]);
123 0 : __this_cpu_dec(idr_preload_cnt);
124 0 : new->ary[0] = NULL;
125 : }
126 0 : preempt_enable();
127 0 : if (new)
128 : return new;
129 : }
130 :
131 : /*
132 : * Both failed. Try kmem_cache again w/o adding __GFP_NOWARN so
133 : * that memory allocation failure warning is printed as intended.
134 : */
135 0 : return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
136 : }
137 :
138 92 : static void idr_layer_rcu_free(struct rcu_head *head)
139 : {
140 : struct idr_layer *layer;
141 :
142 92 : layer = container_of(head, struct idr_layer, rcu_head);
143 92 : kmem_cache_free(idr_layer_cache, layer);
144 92 : }
145 :
146 : static inline void free_layer(struct idr *idr, struct idr_layer *p)
147 : {
148 92 : if (idr->hint == p)
149 22 : RCU_INIT_POINTER(idr->hint, NULL);
150 92 : call_rcu(&p->rcu_head, idr_layer_rcu_free);
151 : }
152 :
153 : /* only called when idp->lock is held */
154 : static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
155 : {
156 11309 : p->ary[0] = idp->id_free;
157 11309 : idp->id_free = p;
158 11309 : idp->id_free_cnt++;
159 : }
160 :
161 11309 : static void move_to_free_list(struct idr *idp, struct idr_layer *p)
162 : {
163 : unsigned long flags;
164 :
165 : /*
166 : * Depends on the return element being zeroed.
167 : */
168 11309 : spin_lock_irqsave(&idp->lock, flags);
169 : __move_to_free_list(idp, p);
170 : spin_unlock_irqrestore(&idp->lock, flags);
171 11309 : }
172 :
173 94 : static void idr_mark_full(struct idr_layer **pa, int id)
174 : {
175 94 : struct idr_layer *p = pa[0];
176 : int l = 0;
177 :
178 94 : __set_bit(id & IDR_MASK, p->bitmap);
179 : /*
180 : * If this layer is full mark the bit in the layer above to
181 : * show that this part of the radix tree is full. This may
182 : * complete the layer above and require walking up the radix
183 : * tree.
184 : */
185 188 : while (bitmap_full(p->bitmap, IDR_SIZE)) {
186 0 : if (!(p = pa[++l]))
187 : break;
188 0 : id = id >> IDR_BITS;
189 0 : __set_bit((id & IDR_MASK), p->bitmap);
190 : }
191 94 : }
192 :
193 11145 : static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
194 : {
195 33599 : while (idp->id_free_cnt < MAX_IDR_FREE) {
196 : struct idr_layer *new;
197 11309 : new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
198 11309 : if (new == NULL)
199 : return (0);
200 11309 : move_to_free_list(idp, new);
201 : }
202 : return 1;
203 : }
204 :
205 : /**
206 : * sub_alloc - try to allocate an id without growing the tree depth
207 : * @idp: idr handle
208 : * @starting_id: id to start search at
209 : * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
210 : * @gfp_mask: allocation mask for idr_layer_alloc()
211 : * @layer_idr: optional idr passed to idr_layer_alloc()
212 : *
213 : * Allocate an id in range [@starting_id, INT_MAX] from @idp without
214 : * growing its depth. Returns
215 : *
216 : * the allocated id >= 0 if successful,
217 : * -EAGAIN if the tree needs to grow for allocation to succeed,
218 : * -ENOSPC if the id space is exhausted,
219 : * -ENOMEM if more idr_layers need to be allocated.
220 : */
221 16275 : static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
222 : gfp_t gfp_mask, struct idr *layer_idr)
223 : {
224 : int n, m, sh;
225 : struct idr_layer *p, *new;
226 : int l, id, oid;
227 :
228 16275 : id = *starting_id;
229 : restart:
230 16275 : p = idp->top;
231 16275 : l = idp->layers;
232 16275 : pa[l--] = NULL;
233 : while (1) {
234 : /*
235 : * We run around this while until we reach the leaf node...
236 : */
237 16275 : n = (id >> (IDR_BITS*l)) & IDR_MASK;
238 16275 : m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
239 16275 : if (m == IDR_SIZE) {
240 : /* no space available go back to previous layer. */
241 0 : l++;
242 : oid = id;
243 0 : id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
244 :
245 : /* if already at the top layer, we need to grow */
246 0 : if (id > idr_max(idp->layers)) {
247 0 : *starting_id = id;
248 : return -EAGAIN;
249 : }
250 0 : p = pa[l];
251 : BUG_ON(!p);
252 :
253 : /* If we need to go up one layer, continue the
254 : * loop; otherwise, restart from the top.
255 : */
256 0 : sh = IDR_BITS * (l + 1);
257 0 : if (oid >> sh == id >> sh)
258 0 : continue;
259 : else
260 : goto restart;
261 : }
262 16275 : if (m != n) {
263 : sh = IDR_BITS*l;
264 4097 : id = ((id >> sh) ^ n ^ m) << sh;
265 : }
266 16275 : if ((id >= MAX_IDR_BIT) || (id < 0))
267 : return -ENOSPC;
268 16275 : if (l == 0)
269 : break;
270 : /*
271 : * Create the layer below if it is missing.
272 : */
273 0 : if (!p->ary[m]) {
274 0 : new = idr_layer_alloc(gfp_mask, layer_idr);
275 0 : if (!new)
276 : return -ENOMEM;
277 0 : new->layer = l-1;
278 0 : new->prefix = id & idr_layer_prefix_mask(new->layer);
279 0 : rcu_assign_pointer(p->ary[m], new);
280 0 : p->count++;
281 : }
282 0 : pa[l--] = p;
283 0 : p = p->ary[m];
284 : }
285 :
286 16275 : pa[l] = p;
287 : return id;
288 : }
289 :
290 16276 : static int idr_get_empty_slot(struct idr *idp, int starting_id,
291 : struct idr_layer **pa, gfp_t gfp_mask,
292 : struct idr *layer_idr)
293 : {
294 : struct idr_layer *p, *new;
295 : int layers, v, id;
296 : unsigned long flags;
297 :
298 16276 : id = starting_id;
299 : build_up:
300 16276 : p = idp->top;
301 16276 : layers = idp->layers;
302 16276 : if (unlikely(!p)) {
303 126 : if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
304 : return -ENOMEM;
305 125 : p->layer = 0;
306 : layers = 1;
307 : }
308 : /*
309 : * Add a new layer to the top of the tree if the requested
310 : * id is larger than the currently allocated space.
311 : */
312 16275 : while (id > idr_max(layers)) {
313 0 : layers++;
314 0 : if (!p->count) {
315 : /* special case: if the tree is currently empty,
316 : * then we grow the tree by moving the top node
317 : * upwards.
318 : */
319 0 : p->layer++;
320 : WARN_ON_ONCE(p->prefix);
321 0 : continue;
322 : }
323 0 : if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
324 : /*
325 : * The allocation failed. If we built part of
326 : * the structure tear it down.
327 : */
328 0 : spin_lock_irqsave(&idp->lock, flags);
329 0 : for (new = p; p && p != idp->top; new = p) {
330 0 : p = p->ary[0];
331 0 : new->ary[0] = NULL;
332 0 : new->count = 0;
333 0 : bitmap_clear(new->bitmap, 0, IDR_SIZE);
334 : __move_to_free_list(idp, new);
335 : }
336 : spin_unlock_irqrestore(&idp->lock, flags);
337 : return -ENOMEM;
338 : }
339 0 : new->ary[0] = p;
340 0 : new->count = 1;
341 0 : new->layer = layers-1;
342 0 : new->prefix = id & idr_layer_prefix_mask(new->layer);
343 0 : if (bitmap_full(p->bitmap, IDR_SIZE))
344 : __set_bit(0, new->bitmap);
345 : p = new;
346 : }
347 16275 : rcu_assign_pointer(idp->top, p);
348 16275 : idp->layers = layers;
349 16275 : v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
350 16275 : if (v == -EAGAIN)
351 : goto build_up;
352 : return(v);
353 : }
354 :
355 : /*
356 : * @id and @pa are from a successful allocation from idr_get_empty_slot().
357 : * Install the user pointer @ptr and mark the slot full.
358 : */
359 : static void idr_fill_slot(struct idr *idr, void *ptr, int id,
360 : struct idr_layer **pa)
361 : {
362 : /* update hint used for lookup, cleared from free_layer() */
363 89 : rcu_assign_pointer(idr->hint, pa[0]);
364 :
365 89 : rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
366 89 : pa[0]->count++;
367 89 : idr_mark_full(pa, id);
368 : }
369 :
370 :
371 : /**
372 : * idr_preload - preload for idr_alloc()
373 : * @gfp_mask: allocation mask to use for preloading
374 : *
375 : * Preload per-cpu layer buffer for idr_alloc(). Can only be used from
376 : * process context and each idr_preload() invocation should be matched with
377 : * idr_preload_end(). Note that preemption is disabled while preloaded.
378 : *
379 : * The first idr_alloc() in the preloaded section can be treated as if it
380 : * were invoked with @gfp_mask used for preloading. This allows using more
381 : * permissive allocation masks for idrs protected by spinlocks.
382 : *
383 : * For example, if idr_alloc() below fails, the failure can be treated as
384 : * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
385 : *
386 : * idr_preload(GFP_KERNEL);
387 : * spin_lock(lock);
388 : *
389 : * id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
390 : *
391 : * spin_unlock(lock);
392 : * idr_preload_end();
393 : * if (id < 0)
394 : * error;
395 : */
396 74 : void idr_preload(gfp_t gfp_mask)
397 : {
398 : /*
399 : * Consuming preload buffer from non-process context breaks preload
400 : * allocation guarantee. Disallow usage from those contexts.
401 : */
402 : WARN_ON_ONCE(in_interrupt());
403 : might_sleep_if(gfp_mask & __GFP_WAIT);
404 :
405 74 : preempt_disable();
406 :
407 : /*
408 : * idr_alloc() is likely to succeed w/o full idr_layer buffer and
409 : * return value from idr_alloc() needs to be checked for failure
410 : * anyway. Silently give up if allocation fails. The caller can
411 : * treat failures from idr_alloc() as if idr_alloc() were called
412 : * with @gfp_mask which should be enough.
413 : */
414 156 : while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
415 : struct idr_layer *new;
416 :
417 16 : preempt_enable();
418 8 : new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
419 8 : preempt_disable();
420 8 : if (!new)
421 : break;
422 :
423 : /* link the new one to per-cpu preload list */
424 8 : new->ary[0] = __this_cpu_read(idr_preload_head);
425 8 : __this_cpu_write(idr_preload_head, new);
426 8 : __this_cpu_inc(idr_preload_cnt);
427 : }
428 74 : }
429 : EXPORT_SYMBOL(idr_preload);
430 :
431 : /**
432 : * idr_alloc - allocate new idr entry
433 : * @idr: the (initialized) idr
434 : * @ptr: pointer to be associated with the new id
435 : * @start: the minimum id (inclusive)
436 : * @end: the maximum id (exclusive, <= 0 for max)
437 : * @gfp_mask: memory allocation flags
438 : *
439 : * Allocate an id in [start, end) and associate it with @ptr. If no ID is
440 : * available in the specified range, returns -ENOSPC. On memory allocation
441 : * failure, returns -ENOMEM.
442 : *
443 : * Note that @end is treated as max when <= 0. This is to always allow
444 : * using @start + N as @end as long as N is inside integer range.
445 : *
446 : * The user is responsible for exclusively synchronizing all operations
447 : * which may modify @idr. However, read-only accesses such as idr_find()
448 : * or iteration can be performed under RCU read lock provided the user
449 : * destroys @ptr in RCU-safe way after removal from idr.
450 : */
451 89 : int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
452 : {
453 89 : int max = end > 0 ? end - 1 : INT_MAX; /* inclusive upper limit */
454 : struct idr_layer *pa[MAX_IDR_LEVEL + 1];
455 : int id;
456 :
457 : might_sleep_if(gfp_mask & __GFP_WAIT);
458 :
459 : /* sanity checks */
460 89 : if (WARN_ON_ONCE(start < 0))
461 : return -EINVAL;
462 89 : if (unlikely(max < start))
463 : return -ENOSPC;
464 :
465 : /* allocate id */
466 89 : id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
467 89 : if (unlikely(id < 0))
468 : return id;
469 89 : if (unlikely(id > max))
470 : return -ENOSPC;
471 :
472 : idr_fill_slot(idr, ptr, id, pa);
473 89 : return id;
474 : }
475 : EXPORT_SYMBOL_GPL(idr_alloc);
476 :
477 : /**
478 : * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
479 : * @idr: the (initialized) idr
480 : * @ptr: pointer to be associated with the new id
481 : * @start: the minimum id (inclusive)
482 : * @end: the maximum id (exclusive, <= 0 for max)
483 : * @gfp_mask: memory allocation flags
484 : *
485 : * Essentially the same as idr_alloc, but prefers to allocate progressively
486 : * higher ids if it can. If the "cur" counter wraps, then it will start again
487 : * at the "start" end of the range and allocate one that has already been used.
488 : */
489 51 : int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
490 : gfp_t gfp_mask)
491 : {
492 : int id;
493 :
494 51 : id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
495 51 : if (id == -ENOSPC)
496 0 : id = idr_alloc(idr, ptr, start, end, gfp_mask);
497 :
498 51 : if (likely(id >= 0))
499 51 : idr->cur = id + 1;
500 51 : return id;
501 : }
502 : EXPORT_SYMBOL(idr_alloc_cyclic);
503 :
504 : static void idr_remove_warning(int id)
505 : {
506 : WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
507 : }
508 :
509 98 : static void sub_remove(struct idr *idp, int shift, int id)
510 : {
511 98 : struct idr_layer *p = idp->top;
512 : struct idr_layer **pa[MAX_IDR_LEVEL + 1];
513 : struct idr_layer ***paa = &pa[0];
514 : struct idr_layer *to_free;
515 : int n;
516 :
517 98 : *paa = NULL;
518 98 : *++paa = &idp->top;
519 :
520 196 : while ((shift > 0) && p) {
521 0 : n = (id >> shift) & IDR_MASK;
522 0 : __clear_bit(n, p->bitmap);
523 0 : *++paa = &p->ary[n];
524 0 : p = p->ary[n];
525 0 : shift -= IDR_BITS;
526 : }
527 98 : n = id & IDR_MASK;
528 196 : if (likely(p != NULL && test_bit(n, p->bitmap))) {
529 : __clear_bit(n, p->bitmap);
530 98 : RCU_INIT_POINTER(p->ary[n], NULL);
531 : to_free = NULL;
532 288 : while(*paa && ! --((**paa)->count)){
533 92 : if (to_free)
534 : free_layer(idp, to_free);
535 92 : to_free = **paa;
536 92 : **paa-- = NULL;
537 : }
538 98 : if (!*paa)
539 92 : idp->layers = 0;
540 98 : if (to_free)
541 : free_layer(idp, to_free);
542 : } else
543 : idr_remove_warning(id);
544 98 : }
545 :
546 : /**
547 : * idr_remove - remove the given id and free its slot
548 : * @idp: idr handle
549 : * @id: unique key
550 : */
551 98 : void idr_remove(struct idr *idp, int id)
552 : {
553 : struct idr_layer *p;
554 : struct idr_layer *to_free;
555 :
556 98 : if (id < 0)
557 : return;
558 :
559 196 : if (id > idr_max(idp->layers)) {
560 : idr_remove_warning(id);
561 : return;
562 : }
563 :
564 98 : sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
565 98 : if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
566 0 : idp->top->ary[0]) {
567 : /*
568 : * Single child at leftmost slot: we can shrink the tree.
569 : * This level is not needed anymore since when layers are
570 : * inserted, they are inserted at the top of the existing
571 : * tree.
572 : */
573 : to_free = idp->top;
574 : p = idp->top->ary[0];
575 0 : rcu_assign_pointer(idp->top, p);
576 0 : --idp->layers;
577 0 : to_free->count = 0;
578 0 : bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
579 : free_layer(idp, to_free);
580 : }
581 : }
582 : EXPORT_SYMBOL(idr_remove);
583 :
584 14 : static void __idr_remove_all(struct idr *idp)
585 : {
586 : int n, id, max;
587 : int bt_mask;
588 : struct idr_layer *p;
589 : struct idr_layer *pa[MAX_IDR_LEVEL + 1];
590 : struct idr_layer **paa = &pa[0];
591 :
592 14 : n = idp->layers * IDR_BITS;
593 14 : *paa = idp->top;
594 14 : RCU_INIT_POINTER(idp->top, NULL);
595 : max = idr_max(idp->layers);
596 :
597 : id = 0;
598 42 : while (id >= 0 && id <= max) {
599 14 : p = *paa;
600 28 : while (n > IDR_BITS && p) {
601 0 : n -= IDR_BITS;
602 0 : p = p->ary[(id >> n) & IDR_MASK];
603 0 : *++paa = p;
604 : }
605 :
606 : bt_mask = id;
607 14 : id += 1 << n;
608 : /* Get the highest bit that the above add changed from 0->1. */
609 70 : while (n < fls(id ^ bt_mask)) {
610 14 : if (*paa)
611 : free_layer(idp, *paa);
612 14 : n += IDR_BITS;
613 14 : --paa;
614 : }
615 : }
616 14 : idp->layers = 0;
617 14 : }
618 :
619 : /**
620 : * idr_destroy - release all cached layers within an idr tree
621 : * @idp: idr handle
622 : *
623 : * Free all id mappings and all idp_layers. After this function, @idp is
624 : * completely unused and can be freed / recycled. The caller is
625 : * responsible for ensuring that no one else accesses @idp during or after
626 : * idr_destroy().
627 : *
628 : * A typical clean-up sequence for objects stored in an idr tree will use
629 : * idr_for_each() to free all objects, if necessary, then idr_destroy() to
630 : * free up the id mappings and cached idr_layers.
631 : */
632 14 : void idr_destroy(struct idr *idp)
633 : {
634 14 : __idr_remove_all(idp);
635 :
636 28 : while (idp->id_free_cnt) {
637 0 : struct idr_layer *p = get_from_free_list(idp);
638 0 : kmem_cache_free(idr_layer_cache, p);
639 : }
640 14 : }
641 : EXPORT_SYMBOL(idr_destroy);
642 :
643 0 : void *idr_find_slowpath(struct idr *idp, int id)
644 : {
645 : int n;
646 : struct idr_layer *p;
647 :
648 0 : if (id < 0)
649 : return NULL;
650 :
651 0 : p = rcu_dereference_raw(idp->top);
652 0 : if (!p)
653 : return NULL;
654 0 : n = (p->layer+1) * IDR_BITS;
655 :
656 0 : if (id > idr_max(p->layer + 1))
657 : return NULL;
658 : BUG_ON(n == 0);
659 :
660 0 : while (n > 0 && p) {
661 0 : n -= IDR_BITS;
662 : BUG_ON(n != p->layer*IDR_BITS);
663 0 : p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
664 : }
665 : return((void *)p);
666 : }
667 : EXPORT_SYMBOL(idr_find_slowpath);
668 :
669 : /**
670 : * idr_for_each - iterate through all stored pointers
671 : * @idp: idr handle
672 : * @fn: function to be called for each pointer
673 : * @data: data passed back to callback function
674 : *
675 : * Iterate over the pointers registered with the given idr. The
676 : * callback function will be called for each pointer currently
677 : * registered, passing the id, the pointer and the data pointer passed
678 : * to this function. It is not safe to modify the idr tree while in
679 : * the callback, so functions such as idr_get_new and idr_remove are
680 : * not allowed.
681 : *
682 : * We check the return of @fn each time. If it returns anything other
683 : * than %0, we break out and return that value.
684 : *
685 : * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
686 : */
687 14 : int idr_for_each(struct idr *idp,
688 : int (*fn)(int id, void *p, void *data), void *data)
689 : {
690 : int n, id, max, error = 0;
691 : struct idr_layer *p;
692 : struct idr_layer *pa[MAX_IDR_LEVEL + 1];
693 : struct idr_layer **paa = &pa[0];
694 :
695 14 : n = idp->layers * IDR_BITS;
696 14 : *paa = rcu_dereference_raw(idp->top);
697 : max = idr_max(idp->layers);
698 :
699 : id = 0;
700 42 : while (id >= 0 && id <= max) {
701 14 : p = *paa;
702 28 : while (n > 0 && p) {
703 0 : n -= IDR_BITS;
704 0 : p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
705 0 : *++paa = p;
706 : }
707 :
708 14 : if (p) {
709 0 : error = fn(id, (void *)p, data);
710 0 : if (error)
711 : break;
712 : }
713 :
714 14 : id += 1 << n;
715 42 : while (n < fls(id)) {
716 14 : n += IDR_BITS;
717 14 : --paa;
718 : }
719 : }
720 :
721 14 : return error;
722 : }
723 : EXPORT_SYMBOL(idr_for_each);
724 :
725 : /**
726 : * idr_get_next - lookup next object of id to given id.
727 : * @idp: idr handle
728 : * @nextidp: pointer to lookup key
729 : *
730 : * Returns pointer to registered object with id, which is next number to
731 : * given id. After being looked up, *@nextidp will be updated for the next
732 : * iteration.
733 : *
734 : * This function can be called under rcu_read_lock(), given that the leaf
735 : * pointers lifetimes are correctly managed.
736 : */
737 0 : void *idr_get_next(struct idr *idp, int *nextidp)
738 : {
739 : struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
740 : struct idr_layer **paa = &pa[0];
741 0 : int id = *nextidp;
742 : int n, max;
743 :
744 : /* find first ent */
745 0 : p = *paa = rcu_dereference_raw(idp->top);
746 0 : if (!p)
747 : return NULL;
748 0 : n = (p->layer + 1) * IDR_BITS;
749 : max = idr_max(p->layer + 1);
750 :
751 0 : while (id >= 0 && id <= max) {
752 0 : p = *paa;
753 0 : while (n > 0 && p) {
754 0 : n -= IDR_BITS;
755 0 : p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
756 0 : *++paa = p;
757 : }
758 :
759 0 : if (p) {
760 0 : *nextidp = id;
761 0 : return p;
762 : }
763 :
764 : /*
765 : * Proceed to the next layer at the current level. Unlike
766 : * idr_for_each(), @id isn't guaranteed to be aligned to
767 : * layer boundary at this point and adding 1 << n may
768 : * incorrectly skip IDs. Make sure we jump to the
769 : * beginning of the next layer using round_up().
770 : */
771 0 : id = round_up(id + 1, 1 << n);
772 0 : while (n < fls(id)) {
773 0 : n += IDR_BITS;
774 0 : --paa;
775 : }
776 : }
777 : return NULL;
778 : }
779 : EXPORT_SYMBOL(idr_get_next);
780 :
781 :
782 : /**
783 : * idr_replace - replace pointer for given id
784 : * @idp: idr handle
785 : * @ptr: pointer you want associated with the id
786 : * @id: lookup key
787 : *
788 : * Replace the pointer registered with an id and return the old value.
789 : * A %-ENOENT return indicates that @id was not found.
790 : * A %-EINVAL return indicates that @id was not within valid constraints.
791 : *
792 : * The caller must serialize with writers.
793 : */
794 0 : void *idr_replace(struct idr *idp, void *ptr, int id)
795 : {
796 : int n;
797 : struct idr_layer *p, *old_p;
798 :
799 0 : if (id < 0)
800 : return ERR_PTR(-EINVAL);
801 :
802 0 : p = idp->top;
803 0 : if (!p)
804 : return ERR_PTR(-ENOENT);
805 :
806 0 : if (id > idr_max(p->layer + 1))
807 : return ERR_PTR(-ENOENT);
808 :
809 0 : n = p->layer * IDR_BITS;
810 0 : while ((n > 0) && p) {
811 0 : p = p->ary[(id >> n) & IDR_MASK];
812 0 : n -= IDR_BITS;
813 : }
814 :
815 0 : n = id & IDR_MASK;
816 0 : if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
817 : return ERR_PTR(-ENOENT);
818 :
819 0 : old_p = p->ary[n];
820 0 : rcu_assign_pointer(p->ary[n], ptr);
821 :
822 0 : return old_p;
823 : }
824 : EXPORT_SYMBOL(idr_replace);
825 :
826 1 : void __init idr_init_cache(void)
827 : {
828 1 : idr_layer_cache = kmem_cache_create("idr_layer_cache",
829 : sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
830 1 : }
831 :
832 : /**
833 : * idr_init - initialize idr handle
834 : * @idp: idr handle
835 : *
836 : * This function is use to set up the handle (@idp) that you will pass
837 : * to the rest of the functions.
838 : */
839 32 : void idr_init(struct idr *idp)
840 : {
841 38 : memset(idp, 0, sizeof(struct idr));
842 : spin_lock_init(&idp->lock);
843 32 : }
844 : EXPORT_SYMBOL(idr_init);
845 :
846 0 : static int idr_has_entry(int id, void *p, void *data)
847 : {
848 0 : return 1;
849 : }
850 :
851 0 : bool idr_is_empty(struct idr *idp)
852 : {
853 0 : return !idr_for_each(idp, idr_has_entry, NULL);
854 : }
855 : EXPORT_SYMBOL(idr_is_empty);
856 :
857 : /**
858 : * DOC: IDA description
859 : * IDA - IDR based ID allocator
860 : *
861 : * This is id allocator without id -> pointer translation. Memory
862 : * usage is much lower than full blown idr because each id only
863 : * occupies a bit. ida uses a custom leaf node which contains
864 : * IDA_BITMAP_BITS slots.
865 : *
866 : * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
867 : */
868 :
869 116 : static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
870 : {
871 : unsigned long flags;
872 :
873 116 : if (!ida->free_bitmap) {
874 98 : spin_lock_irqsave(&ida->idr.lock, flags);
875 98 : if (!ida->free_bitmap) {
876 98 : ida->free_bitmap = bitmap;
877 : bitmap = NULL;
878 : }
879 : spin_unlock_irqrestore(&ida->idr.lock, flags);
880 : }
881 :
882 116 : kfree(bitmap);
883 116 : }
884 :
885 : /**
886 : * ida_pre_get - reserve resources for ida allocation
887 : * @ida: ida handle
888 : * @gfp_mask: memory allocation flag
889 : *
890 : * This function should be called prior to locking and calling the
891 : * following function. It preallocates enough memory to satisfy the
892 : * worst possible allocation.
893 : *
894 : * If the system is REALLY out of memory this function returns %0,
895 : * otherwise %1.
896 : */
897 11145 : int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
898 : {
899 : /* allocate idr_layers */
900 11145 : if (!__idr_pre_get(&ida->idr, gfp_mask))
901 : return 0;
902 :
903 : /* allocate free_bitmap */
904 11145 : if (!ida->free_bitmap) {
905 : struct ida_bitmap *bitmap;
906 :
907 : bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
908 46 : if (!bitmap)
909 : return 0;
910 :
911 46 : free_bitmap(ida, bitmap);
912 : }
913 :
914 : return 1;
915 : }
916 : EXPORT_SYMBOL(ida_pre_get);
917 :
918 : /**
919 : * ida_get_new_above - allocate new ID above or equal to a start id
920 : * @ida: ida handle
921 : * @starting_id: id to start search at
922 : * @p_id: pointer to the allocated handle
923 : *
924 : * Allocate new ID above or equal to @starting_id. It should be called
925 : * with any required locks.
926 : *
927 : * If memory is required, it will return %-EAGAIN, you should unlock
928 : * and go back to the ida_pre_get() call. If the ida is full, it will
929 : * return %-ENOSPC.
930 : *
931 : * @p_id returns a value in the range @starting_id ... %0x7fffffff.
932 : */
933 11150 : int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
934 : {
935 : struct idr_layer *pa[MAX_IDR_LEVEL + 1];
936 : struct ida_bitmap *bitmap;
937 : unsigned long flags;
938 11150 : int idr_id = starting_id / IDA_BITMAP_BITS;
939 11150 : int offset = starting_id % IDA_BITMAP_BITS;
940 : int t, id;
941 :
942 : restart:
943 : /* get vacant slot */
944 16187 : t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
945 16187 : if (t < 0)
946 1 : return t == -ENOMEM ? -EAGAIN : t;
947 :
948 16186 : if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
949 : return -ENOSPC;
950 :
951 16186 : if (t != idr_id)
952 : offset = 0;
953 : idr_id = t;
954 :
955 : /* if bitmap isn't there, create a new one */
956 16186 : bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
957 16186 : if (!bitmap) {
958 88 : spin_lock_irqsave(&ida->idr.lock, flags);
959 88 : bitmap = ida->free_bitmap;
960 88 : ida->free_bitmap = NULL;
961 : spin_unlock_irqrestore(&ida->idr.lock, flags);
962 :
963 88 : if (!bitmap)
964 : return -EAGAIN;
965 :
966 88 : memset(bitmap, 0, sizeof(struct ida_bitmap));
967 88 : rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
968 : (void *)bitmap);
969 88 : pa[0]->count++;
970 : }
971 :
972 : /* lookup for empty slot */
973 16186 : t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
974 16186 : if (t == IDA_BITMAP_BITS) {
975 : /* no empty slot after offset, continue to the next chunk */
976 5037 : idr_id++;
977 : offset = 0;
978 5037 : goto restart;
979 : }
980 :
981 11149 : id = idr_id * IDA_BITMAP_BITS + t;
982 11149 : if (id >= MAX_IDR_BIT)
983 : return -ENOSPC;
984 :
985 : __set_bit(t, bitmap->bitmap);
986 11149 : if (++bitmap->nr_busy == IDA_BITMAP_BITS)
987 5 : idr_mark_full(pa, idr_id);
988 :
989 11149 : *p_id = id;
990 :
991 : /* Each leaf node can handle nearly a thousand slots and the
992 : * whole idea of ida is to have small memory foot print.
993 : * Throw away extra resources one by one after each successful
994 : * allocation.
995 : */
996 11149 : if (ida->idr.id_free_cnt || ida->free_bitmap) {
997 11149 : struct idr_layer *p = get_from_free_list(&ida->idr);
998 11149 : if (p)
999 11149 : kmem_cache_free(idr_layer_cache, p);
1000 : }
1001 :
1002 : return 0;
1003 : }
1004 : EXPORT_SYMBOL(ida_get_new_above);
1005 :
1006 : /**
1007 : * ida_remove - remove the given ID
1008 : * @ida: ida handle
1009 : * @id: ID to free
1010 : */
1011 4175 : void ida_remove(struct ida *ida, int id)
1012 : {
1013 4175 : struct idr_layer *p = ida->idr.top;
1014 4175 : int shift = (ida->idr.layers - 1) * IDR_BITS;
1015 4175 : int idr_id = id / IDA_BITMAP_BITS;
1016 4175 : int offset = id % IDA_BITMAP_BITS;
1017 : int n;
1018 : struct ida_bitmap *bitmap;
1019 :
1020 4175 : if (idr_id > idr_max(ida->idr.layers))
1021 : goto err;
1022 :
1023 : /* clear full bits while looking up the leaf idr_layer */
1024 4175 : while ((shift > 0) && p) {
1025 0 : n = (idr_id >> shift) & IDR_MASK;
1026 0 : __clear_bit(n, p->bitmap);
1027 0 : p = p->ary[n];
1028 0 : shift -= IDR_BITS;
1029 : }
1030 :
1031 4175 : if (p == NULL)
1032 : goto err;
1033 :
1034 4175 : n = idr_id & IDR_MASK;
1035 4175 : __clear_bit(n, p->bitmap);
1036 :
1037 4175 : bitmap = (void *)p->ary[n];
1038 8350 : if (!bitmap || !test_bit(offset, bitmap->bitmap))
1039 : goto err;
1040 :
1041 : /* update bitmap and remove it if empty */
1042 : __clear_bit(offset, bitmap->bitmap);
1043 4175 : if (--bitmap->nr_busy == 0) {
1044 : __set_bit(n, p->bitmap); /* to please idr_remove() */
1045 70 : idr_remove(&ida->idr, idr_id);
1046 70 : free_bitmap(ida, bitmap);
1047 : }
1048 :
1049 4175 : return;
1050 :
1051 : err:
1052 : WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
1053 : }
1054 : EXPORT_SYMBOL(ida_remove);
1055 :
1056 : /**
1057 : * ida_destroy - release all cached layers within an ida tree
1058 : * @ida: ida handle
1059 : */
1060 0 : void ida_destroy(struct ida *ida)
1061 : {
1062 0 : idr_destroy(&ida->idr);
1063 0 : kfree(ida->free_bitmap);
1064 0 : }
1065 : EXPORT_SYMBOL(ida_destroy);
1066 :
1067 : /**
1068 : * ida_simple_get - get a new id.
1069 : * @ida: the (initialized) ida.
1070 : * @start: the minimum id (inclusive, < 0x8000000)
1071 : * @end: the maximum id (exclusive, < 0x8000000 or 0)
1072 : * @gfp_mask: memory allocation flags
1073 : *
1074 : * Allocates an id in the range start <= id < end, or returns -ENOSPC.
1075 : * On memory allocation failure, returns -ENOMEM.
1076 : *
1077 : * Use ida_simple_remove() to get rid of an id.
1078 : */
1079 9601 : int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
1080 : gfp_t gfp_mask)
1081 : {
1082 : int ret, id;
1083 : unsigned int max;
1084 : unsigned long flags;
1085 :
1086 : BUG_ON((int)start < 0);
1087 : BUG_ON((int)end < 0);
1088 :
1089 9601 : if (end == 0)
1090 : max = 0x80000000;
1091 : else {
1092 : BUG_ON(end < start);
1093 5 : max = end - 1;
1094 : }
1095 :
1096 : again:
1097 9601 : if (!ida_pre_get(ida, gfp_mask))
1098 : return -ENOMEM;
1099 :
1100 9601 : spin_lock_irqsave(&simple_ida_lock, flags);
1101 9601 : ret = ida_get_new_above(ida, start, &id);
1102 9601 : if (!ret) {
1103 9601 : if (id > max) {
1104 0 : ida_remove(ida, id);
1105 : ret = -ENOSPC;
1106 : } else {
1107 : ret = id;
1108 : }
1109 : }
1110 : spin_unlock_irqrestore(&simple_ida_lock, flags);
1111 :
1112 9601 : if (unlikely(ret == -EAGAIN))
1113 : goto again;
1114 :
1115 : return ret;
1116 : }
1117 : EXPORT_SYMBOL(ida_simple_get);
1118 :
1119 : /**
1120 : * ida_simple_remove - remove an allocated id.
1121 : * @ida: the (initialized) ida.
1122 : * @id: the id returned by ida_simple_get.
1123 : */
1124 3715 : void ida_simple_remove(struct ida *ida, unsigned int id)
1125 : {
1126 : unsigned long flags;
1127 :
1128 : BUG_ON((int)id < 0);
1129 3715 : spin_lock_irqsave(&simple_ida_lock, flags);
1130 3715 : ida_remove(ida, id);
1131 : spin_unlock_irqrestore(&simple_ida_lock, flags);
1132 3715 : }
1133 : EXPORT_SYMBOL(ida_simple_remove);
1134 :
1135 : /**
1136 : * ida_init - initialize ida handle
1137 : * @ida: ida handle
1138 : *
1139 : * This function is use to set up the handle (@ida) that you will pass
1140 : * to the rest of the functions.
1141 : */
1142 6 : void ida_init(struct ida *ida)
1143 : {
1144 6 : memset(ida, 0, sizeof(struct ida));
1145 6 : idr_init(&ida->idr);
1146 :
1147 6 : }
1148 : EXPORT_SYMBOL(ida_init);
|