I wrote a hashtable and it basically consists of these two structures:
typedef struct dictEntry {
void *key;
void *value;
struct dictEntry *next;
} dictEntry;
typedef struct dict {
dictEntry **table;
unsigned long size;
unsigned long items;
} dict;
dict.table
is a multidimensional array, which contains all the stored key/value pair, which again are a linked list.
If half of the hashtable is full, I expand it by doubling the size and rehashing it:
dict *_dictRehash(dict *d) {
int i;
dict *_d;
dictEntry *dit;
_d = dictCreate(d->size * 2);
for (i = 0; i < d->size; i++) {
for (dit = d->table[i]; dit != NULL; dit = dit->next) {
_dictAddRaw(_d, dit);
}
}
/* FIXME memory leak because the old dict can never be freed */
free(d); // seg fault
return _d;
}
The function above uses the pointers from the old hash table and stores it in the newly created one. When freeing the old dict d
a Segmentation Fault occurs.
How am I able to free the old hashtable struct without having to allocate the memory for the key/value pairs again?
Edit, for completness:
dict *dictCreate(unsigned long size) {
dict *d;
d = malloc(sizeof(dict));
d->size = size;
d->items = 0;
d->table = calloc(size, sizeof(dictEntry*));
return d;
}
void dictAdd(dict *d, void *key, void *value) {
dictEntry *entry;
entry = malloc(sizeof *entry);
entry->key = key;
entry->value = value;
entry->next = '\0';
if ((((float)d->items) / d->size) > 0.5) d = _dictRehash(d);
_dictAddRaw(d, entry);
}
void _dictAddRaw(dict *d, dictEntry *entry) {
int index = (hash(entry->key) & (d->size - 1));
if (d->table[index]) {
dictEntry *next, *prev;
for (next = d->table[index]; next != NULL; next = next->next) {
prev = next;
}
prev->next = entry;
} else {
d->table[index] = entry;
}
d->items++;
}
But to you give some perspective :
when you free(d)
you are expecting more of a destructor
call on your struct dict
which would internally free the memory allocated to the pointer to pointer to dictEntry
why do you have to delete the entire has table to expand it ? you have a next
pointer anyways why not just append new hash entries to it ?
Solution is not to free the d
rather just expand the d
by allocating more struct dictEntry
and assigning them to appropriate next
.
When contracting the d
you will have to iterate over next
to reach the end and then start freeing the memory for struct dictEntry
s inside of your d
.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With