Home | History | Annotate | Download | only in lib

Lines Matching refs:table

1 /* hash - hashing table processing.
20 /* A generic hash table package. */
56 are not empty, there are N_ENTRIES active entries in the table. */
86 /* A hash table contains many internal entries, each holding a pointer to
91 and the current table size. At each slot position in the hash table,
97 entries divided by the table size. Finding the slot for a data is usually
100 larger hash table size (that is, a larger number of buckets) is prone to
103 Long buckets slow down the lookup algorithm. One might use big hash table
105 become inordinate, as unused slots in the hash table take some space. The
107 that those are not that easy to write! :-), and to use a table size
110 /* If an insertion makes the ratio of nonempty buckets to table size larger
112 the table size by multiplying by the growth factor (a number greater than
114 defaults to 1.414, meaning that the table will have doubled its size
120 table size to become smaller than the shrink threshold (a number between
121 0.0 and 1.0), then shrink the table by multiplying by the shrink factor (a
123 threshold and factor default to 0.0 and 1.0, meaning that the table never
142 table organization: the number of entries, number of buckets and maximum
145 /* Return the number of buckets in the hash table. The table size, the total
150 hash_get_n_buckets (const Hash_table *table)
152 return table->n_buckets;
158 hash_get_n_buckets_used (const Hash_table *table)
160 return table->n_buckets_used;
166 hash_get_n_entries (const Hash_table *table)
168 return table->n_entries;
174 hash_get_max_bucket_length (const Hash_table *table)
179 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
197 /* Do a mild validation of a hash table, by traversing it and checking two
201 hash_table_ok (const Hash_table *table)
207 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
223 if (n_buckets_used == table->n_buckets_used && n_entries == table->n_entries)
230 hash_print_statistics (const Hash_table *table, FILE *stream)
232 size_t n_entries = hash_get_n_entries (table);
233 size_t n_buckets = hash_get_n_buckets (table);
234 size_t n_buckets_used = hash_get_n_buckets_used (table);
235 size_t max_bucket_length = hash_get_max_bucket_length (table);
247 If TABLE->hasher misbehaves, abort. */
249 safe_hasher (const Hash_table *table, const void *key)
251 size_t n = table->hasher (key, table->n_buckets);
252 if (! (n < table->n_buckets))
254 return table->bucket + n;
257 /* If ENTRY matches an entry already in the hash table, return the
258 entry from the table. Otherwise, return NULL. */
261 hash_lookup (const Hash_table *table, const void *entry)
263 struct hash_entry const *bucket = safe_hasher (table, entry);
270 if (entry == cursor->data || table->comparator (entry, cursor->data))
278 /* The functions in this page traverse the hash table and process the
279 contained entries. For the traversal to work properly, the hash table
285 /* Return the first data in the table, or NULL if the table is empty. */
288 hash_get_first (const Hash_table *table)
292 if (table->n_entries == 0)
295 for (bucket = table->bucket; ; bucket++)
296 if (! (bucket < table->bucket_limit))
307 hash_get_next (const Hash_table *table, const void *entry)
309 struct hash_entry const *bucket = safe_hasher (table, entry);
323 while (++bucket < table->bucket_limit)
331 /* Fill BUFFER with pointers to active user entries in the hash table, then
336 hash_get_entries (const Hash_table *table, void **buffer,
343 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
359 /* Call a PROCESSOR function for each entry of a hash table, and return the
368 hash_do_for_each (const Hash_table *table, Hash_processor processor,
375 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
505 /* For the given hash TABLE, check the user supplied tuning structure for
508 in the hash table (that is, the user loses the right of further modifying
512 check_tuning (Hash_table *table)
514 const Hash_tuning *tuning = table->tuning;
535 table->tuning = &default_tuning;
559 /* Allocate and return a new hash table, or NULL upon failure. The initial
562 the hash table size occurs. So, if have a reasonably tight a-priori upper
563 bound on the number of entries you intend to insert in the hash table, you
564 may save some table memory and insertion time, by specifying it here. If
598 Hash_table *table;
605 table = malloc (sizeof *table);
606 if (table == NULL)
611 table->tuning = tuning;
612 if (!check_tuning (table))
615 when the user gets some feedback about it. Once the table is created,
622 table->n_buckets = compute_bucket_size (candidate, tuning);
623 if (!table->n_buckets)
626 table->bucket = calloc (table->n_buckets, sizeof *table->bucket);
627 if (table->bucket == NULL)
629 table->bucket_limit = table->bucket + table->n_buckets;
630 table->n_buckets_used = 0;
631 table->n_entries = 0;
633 table->hasher = hasher;
634 table->comparator = comparator;
635 table->data_freer = data_freer;
637 table->free_entry_list = NULL;
639 obstack_init (&table->entry_stack);
641 return table;
644 free (table);
653 hash_clear (Hash_table *table)
657 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
667 if (table->data_freer)
668 table->data_freer (cursor->data);
674 cursor->next = table->free_entry_list;
675 table->free_entry_list = cursor;
679 if (table->data_freer)
680 table->data_freer (bucket->data);
686 table->n_buckets_used = 0;
687 table->n_entries = 0;
690 /* Reclaim all storage associated with a hash table. If a data_freer
691 function has been supplied by the user when the hash table was created,
696 hash_free (Hash_table *table)
703 if (table->data_freer && table->n_entries)
705 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
710 table->data_freer (cursor->data);
717 obstack_free (&table->entry_stack, NULL);
722 for (bucket = table->bucket; bucket < table->bucket_limit; bucket++)
732 for (cursor = table->free_entry_list; cursor; cursor = next)
740 /* Free the remainder of the hash table structure. */
741 free (table->bucket);
742 free (table);
751 allocate_entry (Hash_table *table)
755 if (table->free_entry_list)
757 new = table->free_entry_list;
758 table->free_entry_list = new->next;
763 new = obstack_alloc (&table->entry_stack, sizeof *new);
776 free_entry (Hash_table *table, struct hash_entry *entry)
779 entry->next = table->free_entry_list;
780 table->free_entry_list = entry;
784 ENTRY matches an entry in the table, return a pointer to the corresponding
787 the table, unlink the matching entry. */
790 hash_find_entry (Hash_table *table, const void *entry,
793 struct hash_entry *bucket = safe_hasher (table, entry);
803 if (entry == bucket->data || table->comparator (entry, bucket->data))
816 free_entry (table, next);
831 || table->comparator (entry, cursor->next->data))
842 free_entry (table, next);
901 allocation failure that the src table is in a consistent
934 /* For an already existing hash table, change the number of buckets through
935 specifying CANDIDATE. The contents of the hash table are preserved. The
937 the table may receive at least CANDIDATE different user entries, including
938 those already in the table, before any other growth of the hash table size
943 hash_rehash (Hash_table *table, size_t candidate)
947 size_t new_size = compute_bucket_size (candidate, table->tuning);
951 if (new_size == table->n_buckets)
961 new_table->tuning = table->tuning;
962 new_table->hasher = table->hasher;
963 new_table->comparator = table->comparator;
964 new_table->data_freer = table->data_freer;
968 table collide into a common bucket in the new table. The worst
971 guarantee table->n_buckets_used-1 free entries in advance, then
980 /* Merely reuse the extra old space into the new table. */
982 new_table->entry_stack = table->entry_stack;
984 new_table->free_entry_list = table->free_entry_list;
986 if (transfer_entries (new_table, table, false))
989 free (table->bucket);
990 table->bucket = new_table->bucket;
991 table->bucket_limit = new_table->bucket_limit;
992 table->n_buckets = new_table->n_buckets;
993 table->n_buckets_used = new_table->n_buckets_used;
994 table->free_entry_list = new_table->free_entry_list;
995 /* table->n_entries and table->entry_stack already hold their value. */
1003 uses fewer buckets than the old table, so we will reclaim some
1004 free entries as overflows in the new table are put back into
1005 distinct buckets in the old table.
1008 table requires more intermediate overflow entries than using two
1012 table->free_entry_list = new_table->free_entry_list;
1013 if (! (transfer_entries (table, new_table, true)
1014 && transfer_entries (table, new_table, false)))
1016 /* table->n_entries already holds its value. */
1021 /* Insert ENTRY into hash TABLE if there is not already a matching entry.
1025 Return 0 if there is already a matching entry in the table,
1038 hash_insert_if_absent (Hash_table *table, void const *entry,
1050 /* If there's a matching entry already in the table, return that. */
1051 if ((data = hash_find_entry (table, entry, &bucket, false)) != NULL)
1059 the table size and rehash. There's no point in checking the number of
1063 if (table->n_buckets_used
1064 > table->tuning->growth_threshold * table->n_buckets)
1068 check_tuning (table);
1069 if (table->n_buckets_used
1070 > table->tuning->growth_threshold * table->n_buckets)
1072 const Hash_tuning *tuning = table->tuning;
1075 ? (table->n_buckets * tuning->growth_factor)
1076 : (table->n_buckets * tuning->growth_factor
1083 if (!hash_rehash (table, candidate))
1087 if (hash_find_entry (table, entry, &bucket, false) != NULL)
1096 struct hash_entry *new_entry = allocate_entry (table);
1106 table->n_entries++;
1113 table->n_entries++;
1114 table->n_buckets_used++;
1122 hash_insert0 (Hash_table *table, void const *entry, void const **matched_ent)
1124 return hash_insert_if_absent (table, entry, matched_ent);
1127 /* If ENTRY matches an entry already in the hash table, return the pointer
1128 to the entry from the table. Otherwise, insert ENTRY and return ENTRY.
1134 hash_insert (Hash_table *table, void const *entry)
1137 int err = hash_insert_if_absent (table, entry, &matched_ent);
1143 /* If ENTRY is already in the table, remove it and return the just-deleted
1145 table, don't modify the table and return NULL. */
1148 hash_delete (Hash_table *table, const void *entry)
1153 data = hash_find_entry (table, entry, &bucket, true);
1157 table->n_entries--;
1160 table->n_buckets_used--;
1163 rehash into a smaller table. */
1165 if (table->n_buckets_used
1166 < table->tuning->shrink_threshold * table->n_buckets)
1170 check_tuning (table);
1171 if (table->n_buckets_used
1172 < table->tuning->shrink_threshold * table->n_buckets)
1174 const Hash_tuning *tuning = table->tuning;
1177 ? table->n_buckets * tuning->shrink_factor
1178 : (table->n_buckets * tuning->shrink_factor
1181 if (!hash_rehash (table, candidate))
1184 shrink the table is not fatal. But since memory
1189 struct hash_entry *cursor = table->free_entry_list;
1197 table->free_entry_list = NULL;
1212 hash_print (const Hash_table *table)
1214 struct hash_entry *bucket = (struct hash_entry *) table->bucket;
1216 for ( ; bucket < table->bucket_limit; bucket++)
1221 printf ("%lu:\n", (unsigned long int) (bucket - table->bucket));