Line data Source code
1 : /*
2 : * Resizable, Scalable, Concurrent Hash Table
3 : *
4 : * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
5 : * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
6 : *
7 : * Based on the following paper:
8 : * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
9 : *
10 : * Code partially derived from nft_hash
11 : *
12 : * This program is free software; you can redistribute it and/or modify
13 : * it under the terms of the GNU General Public License version 2 as
14 : * published by the Free Software Foundation.
15 : */
16 :
17 : #include <linux/kernel.h>
18 : #include <linux/init.h>
19 : #include <linux/log2.h>
20 : #include <linux/slab.h>
21 : #include <linux/vmalloc.h>
22 : #include <linux/mm.h>
23 : #include <linux/jhash.h>
24 : #include <linux/random.h>
25 : #include <linux/rhashtable.h>
26 :
27 : #define HASH_DEFAULT_SIZE 64UL
28 : #define HASH_MIN_SIZE 4UL
29 :
30 : #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
31 :
32 : #ifdef CONFIG_PROVE_LOCKING
33 : int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
34 : {
35 : return ht->p.mutex_is_held(ht->p.parent);
36 : }
37 : EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
38 : #endif
39 :
40 : static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
41 : {
42 1692 : return (void *) he - ht->p.head_offset;
43 : }
44 :
45 : static u32 __hashfn(const struct rhashtable *ht, const void *key,
46 : u32 len, u32 hsize)
47 : {
48 : u32 h;
49 :
50 1538 : h = ht->p.hashfn(key, len, ht->p.hash_rnd);
51 :
52 1538 : return h & (hsize - 1);
53 : }
54 :
55 : /**
56 : * rhashtable_hashfn - compute hash for key of given length
57 : * @ht: hash table to compute for
58 : * @key: pointer to key
59 : * @len: length of key
60 : *
61 : * Computes the hash value using the hash function provided in the 'hashfn'
62 : * of struct rhashtable_params. The returned value is guaranteed to be
63 : * smaller than the number of buckets in the hash table.
64 : */
65 2468 : u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
66 : {
67 1234 : struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
68 :
69 2468 : return __hashfn(ht, key, len, tbl->size);
70 : }
71 : EXPORT_SYMBOL_GPL(rhashtable_hashfn);
72 :
73 608 : static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
74 : {
75 304 : if (unlikely(!ht->p.key_len)) {
76 : u32 h;
77 :
78 0 : h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
79 :
80 0 : return h & (hsize - 1);
81 : }
82 :
83 608 : return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
84 : }
85 :
86 : /**
87 : * rhashtable_obj_hashfn - compute hash for hashed object
88 : * @ht: hash table to compute for
89 : * @ptr: pointer to hashed object
90 : *
91 : * Computes the hash value using the hash function `hashfn` respectively
92 : * 'obj_hashfn' depending on whether the hash table is set up to work with
93 : * a fixed length key. The returned value is guaranteed to be smaller than
94 : * the number of buckets in the hash table.
95 : */
96 0 : u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
97 : {
98 0 : struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
99 :
100 0 : return obj_hashfn(ht, ptr, tbl->size);
101 : }
102 : EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
103 :
104 304 : static u32 head_hashfn(const struct rhashtable *ht,
105 : const struct rhash_head *he, u32 hsize)
106 : {
107 304 : return obj_hashfn(ht, rht_obj(ht, he), hsize);
108 : }
109 :
110 38 : static struct bucket_table *bucket_table_alloc(size_t nbuckets)
111 : {
112 : struct bucket_table *tbl;
113 : size_t size;
114 :
115 38 : size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
116 : tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
117 38 : if (tbl == NULL)
118 0 : tbl = vzalloc(size);
119 :
120 38 : if (tbl == NULL)
121 : return NULL;
122 :
123 38 : tbl->size = nbuckets;
124 :
125 38 : return tbl;
126 : }
127 :
128 : static void bucket_table_free(const struct bucket_table *tbl)
129 : {
130 6 : kvfree(tbl);
131 : }
132 :
133 : /**
134 : * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
135 : * @ht: hash table
136 : * @new_size: new table size
137 : */
138 158 : bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
139 : {
140 : /* Expand table when exceeding 75% load */
141 158 : return ht->nelems > (new_size / 4 * 3);
142 : }
143 : EXPORT_SYMBOL_GPL(rht_grow_above_75);
144 :
145 : /**
146 : * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
147 : * @ht: hash table
148 : * @new_size: new table size
149 : */
150 146 : bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
151 : {
152 : /* Shrink table beneath 30% load */
153 146 : return ht->nelems < (new_size * 3 / 10);
154 : }
155 : EXPORT_SYMBOL_GPL(rht_shrink_below_30);
156 :
157 0 : static void hashtable_chain_unzip(const struct rhashtable *ht,
158 : const struct bucket_table *new_tbl,
159 : struct bucket_table *old_tbl, size_t n)
160 : {
161 : struct rhash_head *he, *p, *next;
162 : unsigned int h;
163 :
164 : /* Old bucket empty, no work needed. */
165 0 : p = rht_dereference(old_tbl->buckets[n], ht);
166 0 : if (!p)
167 0 : return;
168 :
169 : /* Advance the old bucket pointer one or more times until it
170 : * reaches a node that doesn't hash to the same bucket as the
171 : * previous node p. Call the previous node p;
172 : */
173 0 : h = head_hashfn(ht, p, new_tbl->size);
174 0 : rht_for_each(he, p->next, ht) {
175 0 : if (head_hashfn(ht, he, new_tbl->size) != h)
176 : break;
177 : p = he;
178 : }
179 0 : RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
180 :
181 : /* Find the subsequent node which does hash to the same
182 : * bucket as node P, or NULL if no such node exists.
183 : */
184 : next = NULL;
185 0 : if (he) {
186 0 : rht_for_each(he, he->next, ht) {
187 0 : if (head_hashfn(ht, he, new_tbl->size) == h) {
188 : next = he;
189 : break;
190 : }
191 : }
192 : }
193 :
194 : /* Set p's next pointer to that subsequent node pointer,
195 : * bypassing the nodes which do not hash to p's bucket
196 : */
197 0 : RCU_INIT_POINTER(p->next, next);
198 : }
199 :
200 : /**
201 : * rhashtable_expand - Expand hash table while allowing concurrent lookups
202 : * @ht: the hash table to expand
203 : *
204 : * A secondary bucket array is allocated and the hash entries are migrated
205 : * while keeping them on both lists until the end of the RCU grace period.
206 : *
207 : * This function may only be called in a context where it is safe to call
208 : * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
209 : *
210 : * The caller must ensure that no concurrent table mutations take place.
211 : * It is however valid to have concurrent lookups if they are RCU protected.
212 : */
213 0 : int rhashtable_expand(struct rhashtable *ht)
214 : {
215 0 : struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
216 : struct rhash_head *he;
217 : unsigned int i, h;
218 : bool complete;
219 :
220 : ASSERT_RHT_MUTEX(ht);
221 :
222 0 : if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
223 : return 0;
224 :
225 0 : new_tbl = bucket_table_alloc(old_tbl->size * 2);
226 0 : if (new_tbl == NULL)
227 : return -ENOMEM;
228 :
229 0 : ht->shift++;
230 :
231 : /* For each new bucket, search the corresponding old bucket
232 : * for the first entry that hashes to the new bucket, and
233 : * link the new bucket to that entry. Since all the entries
234 : * which will end up in the new bucket appear in the same
235 : * old bucket, this constructs an entirely valid new hash
236 : * table, but with multiple buckets "zipped" together into a
237 : * single imprecise chain.
238 : */
239 0 : for (i = 0; i < new_tbl->size; i++) {
240 0 : h = i & (old_tbl->size - 1);
241 0 : rht_for_each(he, old_tbl->buckets[h], ht) {
242 0 : if (head_hashfn(ht, he, new_tbl->size) == i) {
243 0 : RCU_INIT_POINTER(new_tbl->buckets[i], he);
244 0 : break;
245 : }
246 : }
247 : }
248 :
249 : /* Publish the new table pointer. Lookups may now traverse
250 : * the new table, but they will not benefit from any
251 : * additional efficiency until later steps unzip the buckets.
252 : */
253 0 : rcu_assign_pointer(ht->tbl, new_tbl);
254 :
255 : /* Unzip interleaved hash chains */
256 : do {
257 : /* Wait for readers. All new readers will see the new
258 : * table, and thus no references to the old table will
259 : * remain.
260 : */
261 0 : synchronize_rcu();
262 :
263 : /* For each bucket in the old table (each of which
264 : * contains items from multiple buckets of the new
265 : * table): ...
266 : */
267 : complete = true;
268 0 : for (i = 0; i < old_tbl->size; i++) {
269 0 : hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
270 0 : if (old_tbl->buckets[i] != NULL)
271 : complete = false;
272 : }
273 0 : } while (!complete);
274 :
275 : bucket_table_free(old_tbl);
276 0 : return 0;
277 : }
278 : EXPORT_SYMBOL_GPL(rhashtable_expand);
279 :
280 : /**
281 : * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
282 : * @ht: the hash table to shrink
283 : *
284 : * This function may only be called in a context where it is safe to call
285 : * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
286 : *
287 : * The caller must ensure that no concurrent table mutations take place.
288 : * It is however valid to have concurrent lookups if they are RCU protected.
289 : */
290 6 : int rhashtable_shrink(struct rhashtable *ht)
291 : {
292 6 : struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
293 : struct rhash_head __rcu **pprev;
294 : unsigned int i;
295 :
296 : ASSERT_RHT_MUTEX(ht);
297 :
298 6 : if (ht->shift <= ht->p.min_shift)
299 : return 0;
300 :
301 6 : ntbl = bucket_table_alloc(tbl->size / 2);
302 6 : if (ntbl == NULL)
303 : return -ENOMEM;
304 :
305 6 : ht->shift--;
306 :
307 : /* Link each bucket in the new table to the first bucket
308 : * in the old table that contains entries which will hash
309 : * to the new bucket.
310 : */
311 114 : for (i = 0; i < ntbl->size; i++) {
312 108 : ntbl->buckets[i] = tbl->buckets[i];
313 :
314 : /* Link each bucket in the new table to the first bucket
315 : * in the old table that contains entries which will hash
316 : * to the new bucket.
317 : */
318 226 : for (pprev = &ntbl->buckets[i]; *pprev != NULL;
319 10 : pprev = &rht_dereference(*pprev, ht)->next)
320 : ;
321 108 : RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
322 : }
323 :
324 : /* Publish the new, valid hash table */
325 6 : rcu_assign_pointer(ht->tbl, ntbl);
326 :
327 : /* Wait for readers. No new readers will have references to the
328 : * old hash table.
329 : */
330 6 : synchronize_rcu();
331 :
332 : bucket_table_free(tbl);
333 :
334 6 : return 0;
335 : }
336 : EXPORT_SYMBOL_GPL(rhashtable_shrink);
337 :
338 : /**
339 : * rhashtable_insert - insert object into hash hash table
340 : * @ht: hash table
341 : * @obj: pointer to hash head inside object
342 : *
343 : * Will automatically grow the table via rhashtable_expand() if the the
344 : * grow_decision function specified at rhashtable_init() returns true.
345 : *
346 : * The caller must ensure that no concurrent table mutations occur. It is
347 : * however valid to have concurrent lookups if they are RCU protected.
348 : */
349 158 : void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
350 : {
351 158 : struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
352 : u32 hash;
353 :
354 : ASSERT_RHT_MUTEX(ht);
355 :
356 158 : hash = head_hashfn(ht, obj, tbl->size);
357 158 : RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
358 158 : rcu_assign_pointer(tbl->buckets[hash], obj);
359 158 : ht->nelems++;
360 :
361 158 : if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
362 0 : rhashtable_expand(ht);
363 158 : }
364 : EXPORT_SYMBOL_GPL(rhashtable_insert);
365 :
366 : /**
367 : * rhashtable_remove_pprev - remove object from hash table given previous element
368 : * @ht: hash table
369 : * @obj: pointer to hash head inside object
370 : * @pprev: pointer to previous element
371 : *
372 : * Identical to rhashtable_remove() but caller is alreayd aware of the element
373 : * in front of the element to be deleted. This is in particular useful for
374 : * deletion when combined with walking or lookup.
375 : */
376 146 : void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
377 : struct rhash_head __rcu **pprev)
378 : {
379 146 : struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
380 :
381 : ASSERT_RHT_MUTEX(ht);
382 :
383 146 : RCU_INIT_POINTER(*pprev, obj->next);
384 146 : ht->nelems--;
385 :
386 292 : if (ht->p.shrink_decision &&
387 146 : ht->p.shrink_decision(ht, tbl->size))
388 6 : rhashtable_shrink(ht);
389 146 : }
390 : EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
391 :
392 : /**
393 : * rhashtable_remove - remove object from hash table
394 : * @ht: hash table
395 : * @obj: pointer to hash head inside object
396 : *
397 : * Since the hash chain is single linked, the removal operation needs to
398 : * walk the bucket chain upon removal. The removal operation is thus
399 : * considerable slow if the hash table is not correctly sized.
400 : *
401 : * Will automatically shrink the table via rhashtable_expand() if the the
402 : * shrink_decision function specified at rhashtable_init() returns true.
403 : *
404 : * The caller must ensure that no concurrent table mutations occur. It is
405 : * however valid to have concurrent lookups if they are RCU protected.
406 : */
407 146 : bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
408 : {
409 146 : struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
410 : struct rhash_head __rcu **pprev;
411 : struct rhash_head *he;
412 : u32 h;
413 :
414 : ASSERT_RHT_MUTEX(ht);
415 :
416 146 : h = head_hashfn(ht, obj, tbl->size);
417 :
418 146 : pprev = &tbl->buckets[h];
419 300 : rht_for_each(he, tbl->buckets[h], ht) {
420 150 : if (he != obj) {
421 4 : pprev = &he->next;
422 4 : continue;
423 : }
424 :
425 146 : rhashtable_remove_pprev(ht, he, pprev);
426 146 : return true;
427 : }
428 :
429 : return false;
430 : }
431 : EXPORT_SYMBOL_GPL(rhashtable_remove);
432 :
433 : /**
434 : * rhashtable_lookup - lookup key in hash table
435 : * @ht: hash table
436 : * @key: pointer to key
437 : *
438 : * Computes the hash value for the key and traverses the bucket chain looking
439 : * for a entry with an identical key. The first matching entry is returned.
440 : *
441 : * This lookup function may only be used for fixed key hash table (key_len
442 : * paramter set). It will BUG() if used inappropriately.
443 : *
444 : * Lookups may occur in parallel with hash mutations as long as the lookup is
445 : * guarded by rcu_read_lock(). The caller must take care of this.
446 : */
447 0 : void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
448 : {
449 0 : const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
450 : struct rhash_head *he;
451 : u32 h;
452 :
453 : BUG_ON(!ht->p.key_len);
454 :
455 0 : h = __hashfn(ht, key, ht->p.key_len, tbl->size);
456 0 : rht_for_each_rcu(he, tbl->buckets[h], ht) {
457 0 : if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
458 : ht->p.key_len))
459 0 : continue;
460 : return (void *) he - ht->p.head_offset;
461 : }
462 :
463 : return NULL;
464 : }
465 : EXPORT_SYMBOL_GPL(rhashtable_lookup);
466 :
467 : /**
468 : * rhashtable_lookup_compare - search hash table with compare function
469 : * @ht: hash table
470 : * @hash: hash value of desired entry
471 : * @compare: compare function, must return true on match
472 : * @arg: argument passed on to compare function
473 : *
474 : * Traverses the bucket chain behind the provided hash value and calls the
475 : * specified compare function for each entry.
476 : *
477 : * Lookups may occur in parallel with hash mutations as long as the lookup is
478 : * guarded by rcu_read_lock(). The caller must take care of this.
479 : *
480 : * Returns the first entry on which the compare function returned true.
481 : */
482 2622 : void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
483 : bool (*compare)(void *, void *), void *arg)
484 : {
485 1234 : const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
486 : struct rhash_head *he;
487 :
488 1234 : if (unlikely(hash >= tbl->size))
489 : return NULL;
490 :
491 3392 : rht_for_each_rcu(he, tbl->buckets[hash], ht) {
492 1388 : if (!compare(rht_obj(ht, he), arg))
493 462 : continue;
494 926 : return (void *) he - ht->p.head_offset;
495 : }
496 :
497 : return NULL;
498 : }
499 : EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
500 :
501 0 : static size_t rounded_hashtable_size(struct rhashtable_params *params)
502 : {
503 0 : return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
504 : 1UL << params->min_shift);
505 : }
506 :
507 : /**
508 : * rhashtable_init - initialize a new hash table
509 : * @ht: hash table to be initialized
510 : * @params: configuration parameters
511 : *
512 : * Initializes a new hash table based on the provided configuration
513 : * parameters. A table can be configured either with a variable or
514 : * fixed length key:
515 : *
516 : * Configuration Example 1: Fixed length keys
517 : * struct test_obj {
518 : * int key;
519 : * void * my_member;
520 : * struct rhash_head node;
521 : * };
522 : *
523 : * struct rhashtable_params params = {
524 : * .head_offset = offsetof(struct test_obj, node),
525 : * .key_offset = offsetof(struct test_obj, key),
526 : * .key_len = sizeof(int),
527 : * .hashfn = jhash,
528 : * #ifdef CONFIG_PROVE_LOCKING
529 : * .mutex_is_held = &my_mutex_is_held,
530 : * #endif
531 : * };
532 : *
533 : * Configuration Example 2: Variable length keys
534 : * struct test_obj {
535 : * [...]
536 : * struct rhash_head node;
537 : * };
538 : *
539 : * u32 my_hash_fn(const void *data, u32 seed)
540 : * {
541 : * struct test_obj *obj = data;
542 : *
543 : * return [... hash ...];
544 : * }
545 : *
546 : * struct rhashtable_params params = {
547 : * .head_offset = offsetof(struct test_obj, node),
548 : * .hashfn = jhash,
549 : * .obj_hashfn = my_hash_fn,
550 : * #ifdef CONFIG_PROVE_LOCKING
551 : * .mutex_is_held = &my_mutex_is_held,
552 : * #endif
553 : * };
554 : */
555 32 : int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
556 : {
557 : struct bucket_table *tbl;
558 : size_t size;
559 :
560 : size = HASH_DEFAULT_SIZE;
561 :
562 32 : if ((params->key_len && !params->hashfn) ||
563 0 : (!params->key_len && !params->obj_hashfn))
564 : return -EINVAL;
565 :
566 32 : params->min_shift = max_t(size_t, params->min_shift,
567 : ilog2(HASH_MIN_SIZE));
568 :
569 32 : if (params->nelem_hint)
570 0 : size = rounded_hashtable_size(params);
571 :
572 32 : tbl = bucket_table_alloc(size);
573 32 : if (tbl == NULL)
574 : return -ENOMEM;
575 :
576 32 : memset(ht, 0, sizeof(*ht));
577 64 : ht->shift = ilog2(tbl->size);
578 32 : memcpy(&ht->p, params, sizeof(*params));
579 32 : RCU_INIT_POINTER(ht->tbl, tbl);
580 :
581 32 : if (!ht->p.hash_rnd)
582 32 : get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
583 :
584 : return 0;
585 : }
586 : EXPORT_SYMBOL_GPL(rhashtable_init);
587 :
588 : /**
589 : * rhashtable_destroy - destroy hash table
590 : * @ht: the hash table to destroy
591 : *
592 : * Frees the bucket array. This function is not rcu safe, therefore the caller
593 : * has to make sure that no resizing may happen by unpublishing the hashtable
594 : * and waiting for the quiescent cycle before releasing the bucket array.
595 : */
596 0 : void rhashtable_destroy(const struct rhashtable *ht)
597 : {
598 0 : bucket_table_free(ht->tbl);
599 0 : }
600 : EXPORT_SYMBOL_GPL(rhashtable_destroy);
601 :
602 : /**************************************************************************
603 : * Self Test
604 : **************************************************************************/
605 :
606 : #ifdef CONFIG_TEST_RHASHTABLE
607 :
608 : #define TEST_HT_SIZE 8
609 : #define TEST_ENTRIES 2048
610 : #define TEST_PTR ((void *) 0xdeadbeef)
611 : #define TEST_NEXPANDS 4
612 :
613 : #ifdef CONFIG_PROVE_LOCKING
614 : static int test_mutex_is_held(void *parent)
615 : {
616 : return 1;
617 : }
618 : #endif
619 :
620 : struct test_obj {
621 : void *ptr;
622 : int value;
623 : struct rhash_head node;
624 : };
625 :
626 : static int __init test_rht_lookup(struct rhashtable *ht)
627 : {
628 : unsigned int i;
629 :
630 : for (i = 0; i < TEST_ENTRIES * 2; i++) {
631 : struct test_obj *obj;
632 : bool expected = !(i % 2);
633 : u32 key = i;
634 :
635 : obj = rhashtable_lookup(ht, &key);
636 :
637 : if (expected && !obj) {
638 : pr_warn("Test failed: Could not find key %u\n", key);
639 : return -ENOENT;
640 : } else if (!expected && obj) {
641 : pr_warn("Test failed: Unexpected entry found for key %u\n",
642 : key);
643 : return -EEXIST;
644 : } else if (expected && obj) {
645 : if (obj->ptr != TEST_PTR || obj->value != i) {
646 : pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
647 : obj->ptr, TEST_PTR, obj->value, i);
648 : return -EINVAL;
649 : }
650 : }
651 : }
652 :
653 : return 0;
654 : }
655 :
656 : static void test_bucket_stats(struct rhashtable *ht, bool quiet)
657 : {
658 : unsigned int cnt, rcu_cnt, i, total = 0;
659 : struct test_obj *obj;
660 : struct bucket_table *tbl;
661 :
662 : tbl = rht_dereference_rcu(ht->tbl, ht);
663 : for (i = 0; i < tbl->size; i++) {
664 : rcu_cnt = cnt = 0;
665 :
666 : if (!quiet)
667 : pr_info(" [%#4x/%zu]", i, tbl->size);
668 :
669 : rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
670 : cnt++;
671 : total++;
672 : if (!quiet)
673 : pr_cont(" [%p],", obj);
674 : }
675 :
676 : rht_for_each_entry_rcu(obj, tbl->buckets[i], node)
677 : rcu_cnt++;
678 :
679 : if (rcu_cnt != cnt)
680 : pr_warn("Test failed: Chain count mismach %d != %d",
681 : cnt, rcu_cnt);
682 :
683 : if (!quiet)
684 : pr_cont("\n [%#x] first element: %p, chain length: %u\n",
685 : i, tbl->buckets[i], cnt);
686 : }
687 :
688 : pr_info(" Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
689 : total, ht->nelems, TEST_ENTRIES);
690 :
691 : if (total != ht->nelems || total != TEST_ENTRIES)
692 : pr_warn("Test failed: Total count mismatch ^^^");
693 : }
694 :
695 : static int __init test_rhashtable(struct rhashtable *ht)
696 : {
697 : struct bucket_table *tbl;
698 : struct test_obj *obj, *next;
699 : int err;
700 : unsigned int i;
701 :
702 : /*
703 : * Insertion Test:
704 : * Insert TEST_ENTRIES into table with all keys even numbers
705 : */
706 : pr_info(" Adding %d keys\n", TEST_ENTRIES);
707 : for (i = 0; i < TEST_ENTRIES; i++) {
708 : struct test_obj *obj;
709 :
710 : obj = kzalloc(sizeof(*obj), GFP_KERNEL);
711 : if (!obj) {
712 : err = -ENOMEM;
713 : goto error;
714 : }
715 :
716 : obj->ptr = TEST_PTR;
717 : obj->value = i * 2;
718 :
719 : rhashtable_insert(ht, &obj->node);
720 : }
721 :
722 : rcu_read_lock();
723 : test_bucket_stats(ht, true);
724 : test_rht_lookup(ht);
725 : rcu_read_unlock();
726 :
727 : for (i = 0; i < TEST_NEXPANDS; i++) {
728 : pr_info(" Table expansion iteration %u...\n", i);
729 : rhashtable_expand(ht);
730 :
731 : rcu_read_lock();
732 : pr_info(" Verifying lookups...\n");
733 : test_rht_lookup(ht);
734 : rcu_read_unlock();
735 : }
736 :
737 : for (i = 0; i < TEST_NEXPANDS; i++) {
738 : pr_info(" Table shrinkage iteration %u...\n", i);
739 : rhashtable_shrink(ht);
740 :
741 : rcu_read_lock();
742 : pr_info(" Verifying lookups...\n");
743 : test_rht_lookup(ht);
744 : rcu_read_unlock();
745 : }
746 :
747 : rcu_read_lock();
748 : test_bucket_stats(ht, true);
749 : rcu_read_unlock();
750 :
751 : pr_info(" Deleting %d keys\n", TEST_ENTRIES);
752 : for (i = 0; i < TEST_ENTRIES; i++) {
753 : u32 key = i * 2;
754 :
755 : obj = rhashtable_lookup(ht, &key);
756 : BUG_ON(!obj);
757 :
758 : rhashtable_remove(ht, &obj->node);
759 : kfree(obj);
760 : }
761 :
762 : return 0;
763 :
764 : error:
765 : tbl = rht_dereference_rcu(ht->tbl, ht);
766 : for (i = 0; i < tbl->size; i++)
767 : rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
768 : kfree(obj);
769 :
770 : return err;
771 : }
772 :
773 : static int __init test_rht_init(void)
774 : {
775 : struct rhashtable ht;
776 : struct rhashtable_params params = {
777 : .nelem_hint = TEST_HT_SIZE,
778 : .head_offset = offsetof(struct test_obj, node),
779 : .key_offset = offsetof(struct test_obj, value),
780 : .key_len = sizeof(int),
781 : .hashfn = jhash,
782 : #ifdef CONFIG_PROVE_LOCKING
783 : .mutex_is_held = &test_mutex_is_held,
784 : #endif
785 : .grow_decision = rht_grow_above_75,
786 : .shrink_decision = rht_shrink_below_30,
787 : };
788 : int err;
789 :
790 : pr_info("Running resizable hashtable tests...\n");
791 :
792 : err = rhashtable_init(&ht, ¶ms);
793 : if (err < 0) {
794 : pr_warn("Test failed: Unable to initialize hashtable: %d\n",
795 : err);
796 : return err;
797 : }
798 :
799 : err = test_rhashtable(&ht);
800 :
801 : rhashtable_destroy(&ht);
802 :
803 : return err;
804 : }
805 :
806 : subsys_initcall(test_rht_init);
807 :
808 : #endif /* CONFIG_TEST_RHASHTABLE */
|