Line data Source code
1 : /*
2 : * Basic general purpose allocator for managing special purpose
3 : * memory, for example, memory that is not managed by the regular
4 : * kmalloc/kfree interface. Uses for this includes on-device special
5 : * memory, uncached memory etc.
6 : *
7 : * It is safe to use the allocator in NMI handlers and other special
8 : * unblockable contexts that could otherwise deadlock on locks. This
9 : * is implemented by using atomic operations and retries on any
10 : * conflicts. The disadvantage is that there may be livelocks in
11 : * extreme cases. For better scalability, one allocator can be used
12 : * for each CPU.
13 : *
14 : * The lockless operation only works if there is enough memory
15 : * available. If new memory is added to the pool a lock has to be
16 : * still taken. So any user relying on locklessness has to ensure
17 : * that sufficient memory is preallocated.
18 : *
19 : * The basic atomic operation of this allocator is cmpxchg on long.
20 : * On architectures that don't have NMI-safe cmpxchg implementation,
21 : * the allocator can NOT be used in NMI handler. So code uses the
22 : * allocator in NMI handler should depend on
23 : * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
24 : *
25 : * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
26 : *
27 : * This source code is licensed under the GNU General Public License,
28 : * Version 2. See the file COPYING for more details.
29 : */
30 :
31 : #include <linux/slab.h>
32 : #include <linux/export.h>
33 : #include <linux/bitmap.h>
34 : #include <linux/rculist.h>
35 : #include <linux/interrupt.h>
36 : #include <linux/genalloc.h>
37 : #include <linux/of_address.h>
38 : #include <linux/of_device.h>
39 :
40 : static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
41 : {
42 3880 : return chunk->end_addr - chunk->start_addr + 1;
43 : }
44 :
45 3882 : static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
46 : {
47 : unsigned long val, nval;
48 :
49 3882 : nval = *addr;
50 : do {
51 : val = nval;
52 3882 : if (val & mask_to_set)
53 : return -EBUSY;
54 3882 : cpu_relax();
55 7764 : } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
56 :
57 : return 0;
58 : }
59 :
60 3877 : static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
61 : {
62 : unsigned long val, nval;
63 :
64 3877 : nval = *addr;
65 : do {
66 : val = nval;
67 3877 : if ((val & mask_to_clear) != mask_to_clear)
68 : return -EBUSY;
69 3877 : cpu_relax();
70 7754 : } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
71 :
72 : return 0;
73 : }
74 :
75 : /*
76 : * bitmap_set_ll - set the specified number of bits at the specified position
77 : * @map: pointer to a bitmap
78 : * @start: a bit position in @map
79 : * @nr: number of bits to set
80 : *
81 : * Set @nr bits start from @start in @map lock-lessly. Several users
82 : * can set/clear the same bitmap simultaneously without lock. If two
83 : * users set the same bit, one user will return remain bits, otherwise
84 : * return 0.
85 : */
86 3880 : static int bitmap_set_ll(unsigned long *map, int start, int nr)
87 : {
88 3880 : unsigned long *p = map + BIT_WORD(start);
89 3880 : const int size = start + nr;
90 3880 : int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
91 3880 : unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
92 :
93 7763 : while (nr - bits_to_set >= 0) {
94 3 : if (set_bits_ll(p, mask_to_set))
95 : return nr;
96 : nr -= bits_to_set;
97 : bits_to_set = BITS_PER_LONG;
98 : mask_to_set = ~0UL;
99 3 : p++;
100 : }
101 3880 : if (nr) {
102 3879 : mask_to_set &= BITMAP_LAST_WORD_MASK(size);
103 3879 : if (set_bits_ll(p, mask_to_set))
104 0 : return nr;
105 : }
106 :
107 : return 0;
108 : }
109 :
110 : /*
111 : * bitmap_clear_ll - clear the specified number of bits at the specified position
112 : * @map: pointer to a bitmap
113 : * @start: a bit position in @map
114 : * @nr: number of bits to set
115 : *
116 : * Clear @nr bits start from @start in @map lock-lessly. Several users
117 : * can set/clear the same bitmap simultaneously without lock. If two
118 : * users clear the same bit, one user will return remain bits,
119 : * otherwise return 0.
120 : */
121 3877 : static int bitmap_clear_ll(unsigned long *map, int start, int nr)
122 : {
123 3877 : unsigned long *p = map + BIT_WORD(start);
124 3877 : const int size = start + nr;
125 3877 : int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
126 3877 : unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
127 :
128 7754 : while (nr - bits_to_clear >= 0) {
129 0 : if (clear_bits_ll(p, mask_to_clear))
130 : return nr;
131 : nr -= bits_to_clear;
132 : bits_to_clear = BITS_PER_LONG;
133 : mask_to_clear = ~0UL;
134 0 : p++;
135 : }
136 3877 : if (nr) {
137 3877 : mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
138 3877 : if (clear_bits_ll(p, mask_to_clear))
139 0 : return nr;
140 : }
141 :
142 : return 0;
143 : }
144 :
145 : /**
146 : * gen_pool_create - create a new special memory pool
147 : * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
148 : * @nid: node id of the node the pool structure should be allocated on, or -1
149 : *
150 : * Create a new special memory pool that can be used to manage special purpose
151 : * memory not managed by the regular kmalloc/kfree interface.
152 : */
153 1 : struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
154 : {
155 : struct gen_pool *pool;
156 :
157 : pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
158 1 : if (pool != NULL) {
159 : spin_lock_init(&pool->lock);
160 1 : INIT_LIST_HEAD(&pool->chunks);
161 1 : pool->min_alloc_order = min_alloc_order;
162 1 : pool->algo = gen_pool_first_fit;
163 1 : pool->data = NULL;
164 : }
165 1 : return pool;
166 : }
167 : EXPORT_SYMBOL(gen_pool_create);
168 :
169 : /**
170 : * gen_pool_add_virt - add a new chunk of special memory to the pool
171 : * @pool: pool to add new memory chunk to
172 : * @virt: virtual starting address of memory chunk to add to pool
173 : * @phys: physical starting address of memory chunk to add to pool
174 : * @size: size in bytes of the memory chunk to add to pool
175 : * @nid: node id of the node the chunk structure and bitmap should be
176 : * allocated on, or -1
177 : *
178 : * Add a new chunk of special memory to the specified pool.
179 : *
180 : * Returns 0 on success or a -ve errno on failure.
181 : */
182 1 : int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
183 : size_t size, int nid)
184 : {
185 : struct gen_pool_chunk *chunk;
186 1 : int nbits = size >> pool->min_alloc_order;
187 1 : int nbytes = sizeof(struct gen_pool_chunk) +
188 1 : BITS_TO_LONGS(nbits) * sizeof(long);
189 :
190 : chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
191 1 : if (unlikely(chunk == NULL))
192 : return -ENOMEM;
193 :
194 1 : chunk->phys_addr = phys;
195 1 : chunk->start_addr = virt;
196 1 : chunk->end_addr = virt + size - 1;
197 1 : atomic_set(&chunk->avail, size);
198 :
199 : spin_lock(&pool->lock);
200 1 : list_add_rcu(&chunk->next_chunk, &pool->chunks);
201 : spin_unlock(&pool->lock);
202 :
203 : return 0;
204 : }
205 : EXPORT_SYMBOL(gen_pool_add_virt);
206 :
207 : /**
208 : * gen_pool_virt_to_phys - return the physical address of memory
209 : * @pool: pool to allocate from
210 : * @addr: starting address of memory
211 : *
212 : * Returns the physical address on success, or -1 on error.
213 : */
214 3880 : phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
215 : {
216 : struct gen_pool_chunk *chunk;
217 : phys_addr_t paddr = -1;
218 :
219 : rcu_read_lock();
220 3880 : list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
221 3880 : if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
222 3880 : paddr = chunk->phys_addr + (addr - chunk->start_addr);
223 3880 : break;
224 : }
225 : }
226 : rcu_read_unlock();
227 :
228 3880 : return paddr;
229 : }
230 : EXPORT_SYMBOL(gen_pool_virt_to_phys);
231 :
232 : /**
233 : * gen_pool_destroy - destroy a special memory pool
234 : * @pool: pool to destroy
235 : *
236 : * Destroy the specified special memory pool. Verifies that there are no
237 : * outstanding allocations.
238 : */
239 0 : void gen_pool_destroy(struct gen_pool *pool)
240 : {
241 : struct list_head *_chunk, *_next_chunk;
242 0 : struct gen_pool_chunk *chunk;
243 0 : int order = pool->min_alloc_order;
244 : int bit, end_bit;
245 :
246 0 : list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
247 : chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
248 : list_del(&chunk->next_chunk);
249 :
250 0 : end_bit = chunk_size(chunk) >> order;
251 0 : bit = find_next_bit(chunk->bits, end_bit, 0);
252 : BUG_ON(bit < end_bit);
253 :
254 0 : kfree(chunk);
255 : }
256 0 : kfree(pool);
257 0 : return;
258 : }
259 : EXPORT_SYMBOL(gen_pool_destroy);
260 :
261 : /**
262 : * gen_pool_alloc - allocate special memory from the pool
263 : * @pool: pool to allocate from
264 : * @size: number of bytes to allocate from the pool
265 : *
266 : * Allocate the requested number of bytes from the specified pool.
267 : * Uses the pool allocation function (with first-fit algorithm by default).
268 : * Can not be used in NMI handler on architectures without
269 : * NMI-safe cmpxchg implementation.
270 : */
271 3880 : unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
272 : {
273 3880 : struct gen_pool_chunk *chunk;
274 : unsigned long addr = 0;
275 3880 : int order = pool->min_alloc_order;
276 : int nbits, start_bit = 0, end_bit, remain;
277 :
278 : #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
279 : BUG_ON(in_nmi());
280 : #endif
281 :
282 3880 : if (size == 0)
283 : return 0;
284 :
285 3880 : nbits = (size + (1UL << order) - 1) >> order;
286 : rcu_read_lock();
287 3880 : list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
288 3880 : if (size > atomic_read(&chunk->avail))
289 0 : continue;
290 :
291 3880 : end_bit = chunk_size(chunk) >> order;
292 : retry:
293 3880 : start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
294 : pool->data);
295 3880 : if (start_bit >= end_bit)
296 0 : continue;
297 3880 : remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
298 3880 : if (remain) {
299 0 : remain = bitmap_clear_ll(chunk->bits, start_bit,
300 : nbits - remain);
301 : BUG_ON(remain);
302 0 : goto retry;
303 : }
304 :
305 3880 : addr = chunk->start_addr + ((unsigned long)start_bit << order);
306 3880 : size = nbits << order;
307 3880 : atomic_sub(size, &chunk->avail);
308 : break;
309 : }
310 : rcu_read_unlock();
311 3880 : return addr;
312 : }
313 : EXPORT_SYMBOL(gen_pool_alloc);
314 :
315 : /**
316 : * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
317 : * @pool: pool to allocate from
318 : * @size: number of bytes to allocate from the pool
319 : * @dma: dma-view physical address return value. Use NULL if unneeded.
320 : *
321 : * Allocate the requested number of bytes from the specified pool.
322 : * Uses the pool allocation function (with first-fit algorithm by default).
323 : * Can not be used in NMI handler on architectures without
324 : * NMI-safe cmpxchg implementation.
325 : */
326 0 : void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
327 : {
328 : unsigned long vaddr;
329 :
330 0 : if (!pool)
331 : return NULL;
332 :
333 0 : vaddr = gen_pool_alloc(pool, size);
334 0 : if (!vaddr)
335 : return NULL;
336 :
337 0 : if (dma)
338 0 : *dma = gen_pool_virt_to_phys(pool, vaddr);
339 :
340 0 : return (void *)vaddr;
341 : }
342 : EXPORT_SYMBOL(gen_pool_dma_alloc);
343 :
344 : /**
345 : * gen_pool_free - free allocated special memory back to the pool
346 : * @pool: pool to free to
347 : * @addr: starting address of memory to free back to pool
348 : * @size: size in bytes of memory to free
349 : *
350 : * Free previously allocated special memory back to the specified
351 : * pool. Can not be used in NMI handler on architectures without
352 : * NMI-safe cmpxchg implementation.
353 : */
354 3877 : void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
355 : {
356 : struct gen_pool_chunk *chunk;
357 3877 : int order = pool->min_alloc_order;
358 : int start_bit, nbits, remain;
359 :
360 : #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
361 : BUG_ON(in_nmi());
362 : #endif
363 :
364 3877 : nbits = (size + (1UL << order) - 1) >> order;
365 : rcu_read_lock();
366 3877 : list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
367 3877 : if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
368 : BUG_ON(addr + size - 1 > chunk->end_addr);
369 3877 : start_bit = (addr - chunk->start_addr) >> order;
370 3877 : remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
371 : BUG_ON(remain);
372 3877 : size = nbits << order;
373 3877 : atomic_add(size, &chunk->avail);
374 : rcu_read_unlock();
375 3877 : return;
376 : }
377 : }
378 : rcu_read_unlock();
379 : BUG();
380 : }
381 : EXPORT_SYMBOL(gen_pool_free);
382 :
383 : /**
384 : * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
385 : * @pool: the generic memory pool
386 : * @func: func to call
387 : * @data: additional data used by @func
388 : *
389 : * Call @func for every chunk of generic memory pool. The @func is
390 : * called with rcu_read_lock held.
391 : */
392 0 : void gen_pool_for_each_chunk(struct gen_pool *pool,
393 : void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
394 : void *data)
395 : {
396 : struct gen_pool_chunk *chunk;
397 :
398 : rcu_read_lock();
399 0 : list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
400 0 : func(pool, chunk, data);
401 : rcu_read_unlock();
402 0 : }
403 : EXPORT_SYMBOL(gen_pool_for_each_chunk);
404 :
405 : /**
406 : * addr_in_gen_pool - checks if an address falls within the range of a pool
407 : * @pool: the generic memory pool
408 : * @start: start address
409 : * @size: size of the region
410 : *
411 : * Check if the range of addresses falls within the specified pool. Returns
412 : * true if the entire range is contained in the pool and false otherwise.
413 : */
414 3877 : bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
415 : size_t size)
416 : {
417 : bool found = false;
418 3877 : unsigned long end = start + size;
419 : struct gen_pool_chunk *chunk;
420 :
421 : rcu_read_lock();
422 3877 : list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
423 3877 : if (start >= chunk->start_addr && start <= chunk->end_addr) {
424 3877 : if (end <= chunk->end_addr) {
425 : found = true;
426 : break;
427 : }
428 : }
429 : }
430 : rcu_read_unlock();
431 3877 : return found;
432 : }
433 :
434 : /**
435 : * gen_pool_avail - get available free space of the pool
436 : * @pool: pool to get available free space
437 : *
438 : * Return available free space of the specified pool.
439 : */
440 0 : size_t gen_pool_avail(struct gen_pool *pool)
441 : {
442 : struct gen_pool_chunk *chunk;
443 : size_t avail = 0;
444 :
445 : rcu_read_lock();
446 0 : list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
447 0 : avail += atomic_read(&chunk->avail);
448 : rcu_read_unlock();
449 0 : return avail;
450 : }
451 : EXPORT_SYMBOL_GPL(gen_pool_avail);
452 :
453 : /**
454 : * gen_pool_size - get size in bytes of memory managed by the pool
455 : * @pool: pool to get size
456 : *
457 : * Return size in bytes of memory managed by the pool.
458 : */
459 0 : size_t gen_pool_size(struct gen_pool *pool)
460 : {
461 0 : struct gen_pool_chunk *chunk;
462 : size_t size = 0;
463 :
464 : rcu_read_lock();
465 0 : list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
466 0 : size += chunk_size(chunk);
467 : rcu_read_unlock();
468 0 : return size;
469 : }
470 : EXPORT_SYMBOL_GPL(gen_pool_size);
471 :
472 : /**
473 : * gen_pool_set_algo - set the allocation algorithm
474 : * @pool: pool to change allocation algorithm
475 : * @algo: custom algorithm function
476 : * @data: additional data used by @algo
477 : *
478 : * Call @algo for each memory allocation in the pool.
479 : * If @algo is NULL use gen_pool_first_fit as default
480 : * memory allocation function.
481 : */
482 1 : void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
483 : {
484 : rcu_read_lock();
485 :
486 1 : pool->algo = algo;
487 1 : if (!pool->algo)
488 0 : pool->algo = gen_pool_first_fit;
489 :
490 1 : pool->data = data;
491 :
492 : rcu_read_unlock();
493 1 : }
494 : EXPORT_SYMBOL(gen_pool_set_algo);
495 :
496 : /**
497 : * gen_pool_first_fit - find the first available region
498 : * of memory matching the size requirement (no alignment constraint)
499 : * @map: The address to base the search on
500 : * @size: The bitmap size in bits
501 : * @start: The bitnumber to start searching at
502 : * @nr: The number of zeroed bits we're looking for
503 : * @data: additional data - unused
504 : */
505 0 : unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
506 : unsigned long start, unsigned int nr, void *data)
507 : {
508 0 : return bitmap_find_next_zero_area(map, size, start, nr, 0);
509 : }
510 : EXPORT_SYMBOL(gen_pool_first_fit);
511 :
512 : /**
513 : * gen_pool_first_fit_order_align - find the first available region
514 : * of memory matching the size requirement. The region will be aligned
515 : * to the order of the size specified.
516 : * @map: The address to base the search on
517 : * @size: The bitmap size in bits
518 : * @start: The bitnumber to start searching at
519 : * @nr: The number of zeroed bits we're looking for
520 : * @data: additional data - unused
521 : */
522 3880 : unsigned long gen_pool_first_fit_order_align(unsigned long *map,
523 : unsigned long size, unsigned long start,
524 : unsigned int nr, void *data)
525 : {
526 7760 : unsigned long align_mask = roundup_pow_of_two(nr) - 1;
527 :
528 3880 : return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
529 : }
530 : EXPORT_SYMBOL(gen_pool_first_fit_order_align);
531 :
532 : /**
533 : * gen_pool_best_fit - find the best fitting region of memory
534 : * macthing the size requirement (no alignment constraint)
535 : * @map: The address to base the search on
536 : * @size: The bitmap size in bits
537 : * @start: The bitnumber to start searching at
538 : * @nr: The number of zeroed bits we're looking for
539 : * @data: additional data - unused
540 : *
541 : * Iterate over the bitmap to find the smallest free region
542 : * which we can allocate the memory.
543 : */
544 0 : unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
545 : unsigned long start, unsigned int nr, void *data)
546 : {
547 : unsigned long start_bit = size;
548 0 : unsigned long len = size + 1;
549 : unsigned long index;
550 :
551 : index = bitmap_find_next_zero_area(map, size, start, nr, 0);
552 :
553 0 : while (index < size) {
554 0 : int next_bit = find_next_bit(map, size, index + nr);
555 0 : if ((next_bit - index) < len) {
556 : len = next_bit - index;
557 : start_bit = index;
558 0 : if (len == nr)
559 : return start_bit;
560 : }
561 0 : index = bitmap_find_next_zero_area(map, size,
562 0 : next_bit + 1, nr, 0);
563 : }
564 :
565 : return start_bit;
566 : }
567 : EXPORT_SYMBOL(gen_pool_best_fit);
568 :
569 0 : static void devm_gen_pool_release(struct device *dev, void *res)
570 : {
571 0 : gen_pool_destroy(*(struct gen_pool **)res);
572 0 : }
573 :
574 : /**
575 : * devm_gen_pool_create - managed gen_pool_create
576 : * @dev: device that provides the gen_pool
577 : * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
578 : * @nid: node id of the node the pool structure should be allocated on, or -1
579 : *
580 : * Create a new special memory pool that can be used to manage special purpose
581 : * memory not managed by the regular kmalloc/kfree interface. The pool will be
582 : * automatically destroyed by the device management code.
583 : */
584 0 : struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
585 : int nid)
586 : {
587 : struct gen_pool **ptr, *pool;
588 :
589 0 : ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
590 :
591 0 : pool = gen_pool_create(min_alloc_order, nid);
592 0 : if (pool) {
593 0 : *ptr = pool;
594 0 : devres_add(dev, ptr);
595 : } else {
596 0 : devres_free(ptr);
597 : }
598 :
599 0 : return pool;
600 : }
601 : EXPORT_SYMBOL(devm_gen_pool_create);
602 :
603 : /**
604 : * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
605 : * @dev: device to retrieve the gen_pool from
606 : *
607 : * Returns the gen_pool for the device if one is present, or NULL.
608 : */
609 0 : struct gen_pool *dev_get_gen_pool(struct device *dev)
610 : {
611 0 : struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
612 : NULL);
613 :
614 0 : if (!p)
615 : return NULL;
616 0 : return *p;
617 : }
618 : EXPORT_SYMBOL_GPL(dev_get_gen_pool);
619 :
620 : #ifdef CONFIG_OF
621 : /**
622 : * of_get_named_gen_pool - find a pool by phandle property
623 : * @np: device node
624 : * @propname: property name containing phandle(s)
625 : * @index: index into the phandle array
626 : *
627 : * Returns the pool that contains the chunk starting at the physical
628 : * address of the device tree node pointed at by the phandle property,
629 : * or NULL if not found.
630 : */
631 0 : struct gen_pool *of_get_named_gen_pool(struct device_node *np,
632 : const char *propname, int index)
633 : {
634 : struct platform_device *pdev;
635 : struct device_node *np_pool;
636 :
637 0 : np_pool = of_parse_phandle(np, propname, index);
638 0 : if (!np_pool)
639 : return NULL;
640 0 : pdev = of_find_device_by_node(np_pool);
641 : of_node_put(np_pool);
642 0 : if (!pdev)
643 : return NULL;
644 0 : return dev_get_gen_pool(&pdev->dev);
645 : }
646 : EXPORT_SYMBOL_GPL(of_get_named_gen_pool);
647 : #endif /* CONFIG_OF */
|