concurrency

This commit is contained in:
aiden 2023-01-23 14:36:06 +00:00
parent 278c327a4f
commit 59bee153a0
Signed by: aiden
GPG Key ID: EFA9C74AEBF806E0
9 changed files with 947 additions and 6973 deletions

1
.gitignore vendored
View File

@ -1,2 +1 @@
.DS_Store
hashmap

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "src/ifc"]
path = src/ifc
url = ssh://git@git.tcp.direct:2222/aiden/ifc.git

13
LICENSE
View File

@ -1,13 +0,0 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.

151
example.c Normal file
View File

@ -0,0 +1,151 @@
#define HASHMAP_HASH_FUNCTION(key) (*(uint64_t *)key ^ 9268326398 /* arbitrary integer */)
#include "hashmap.h"
#include <time.h>
#include <stdio.h>
#define N_THREADS 16
#define _N_BUCKETS 24000000
#define N_BUCKETS (size_t)((_N_BUCKETS / N_THREADS) * N_THREADS)
struct hashmap *the_hashmap;
struct gosh {
size_t idx;
size_t n;
};
void *go(struct gosh *gosh) {
void *x;
struct hashmap_key key;
struct hashmap_area *area = hashmap_area(the_hashmap);
hashmap_reserve(the_hashmap, area, gosh->n);
for (size_t it = 0; it < gosh->n; ++it) {
size_t idx = gosh->idx + it;
hashmap_key(&(idx), sizeof(idx), &(key));
if (hashmap_cas(
the_hashmap, area, &(key),
&(x), (void *)idx,
hashmap_cas_set, NULL
) == hashmap_cas_error) {
puts("error!");
exit(1);
}
}
hashmap_area_release(the_hashmap, area);
return NULL;
}
void *readt(struct gosh *gosh) {
void *x;
struct hashmap_key key;
struct hashmap_area *area = hashmap_area(the_hashmap);
for (size_t it = 0; it < gosh->n; ++it) {
size_t idx = gosh->idx + it;
hashmap_key(&(idx), sizeof(idx), &(key));
if (hashmap_cas(
the_hashmap, area, &(key),
&(x), NULL,
hashmap_cas_get, NULL
) == hashmap_cas_error) {
puts("error!");
exit(1);
}
}
hashmap_area_release(the_hashmap, area);
return NULL;
}
void *deletet(struct gosh *gosh) {
void *x;
struct hashmap_key key;
struct hashmap_area *area = hashmap_area(the_hashmap);
for (size_t it = 0; it < gosh->n; ++it) {
size_t idx = gosh->idx + it;
hashmap_key(&(idx), sizeof(idx), &(key));
if (hashmap_cas(
the_hashmap, area, &(key),
&(x), NULL + 1,
hashmap_cas_delete, NULL
) == hashmap_cas_error) {
puts("error!");
exit(1);
}
}
hashmap_area_release(the_hashmap, area);
return NULL;
}
double rc(void) {
struct timespec now;
clock_gettime(CLOCK_REALTIME, &(now));
return now.tv_sec + (now.tv_nsec * 1e-9);
}
int main(int argc, char *argv[]) {
the_hashmap = hashmap_create(
N_THREADS, N_BUCKETS / 0.94, 1, 2,
NULL
);
if (the_hashmap == NULL) {
puts("error");
exit(1);
}
printf("writing %zu values...\n", N_BUCKETS);
pthread_t threads[N_THREADS];
struct gosh gosh[N_THREADS];
size_t idx = 0;
for (size_t x = 0; x < N_THREADS; ++x) {
gosh[x].idx = idx;
idx += (gosh[x].n = (N_BUCKETS / N_THREADS));
pthread_create(&(threads[x]), NULL, (void *)&(go), &(gosh[x]));
}
double time = rc();
for (size_t x = 0; x < N_THREADS; ++x) {
void *fuck;
pthread_join(threads[x], &(fuck));
}
printf("success! %lfs\nreading %zu values...\n", rc() - time, N_BUCKETS);
for (size_t x = 0; x < N_THREADS; ++x) {
pthread_create(&(threads[x]), NULL, (void *)&(readt), &(gosh[x]));
}
time = rc();
for (size_t x = 0; x < N_THREADS; ++x) {
void *fuck;
pthread_join(threads[x], &(fuck));
}
printf("success! %lfs\n", rc() - time);
for (size_t x = 0; x < N_THREADS; ++x) {
pthread_create(&(threads[x]), NULL, (void *)&(deletet), &(gosh[x]));
}
time = rc();
for (size_t x = 0; x < N_THREADS; ++x) {
void *fuck;
pthread_join(threads[x], &(fuck));
}
printf("success! %lfs\n", rc() - time);
hashmap_destroy(the_hashmap);
return 0;
}

View File

@ -1,133 +0,0 @@
#ifndef HASHMAP_H
#define HASHMAP_H
#define _GNU_SOURCE
#include <pthread.h>
#include <stdbool.h>
#include <stddef.h>
enum hashmap_drop_mode {
hashmap_drop_set,
hashmap_drop_delete,
};
typedef void (*hashmap_drop_handler)(void *value, enum hashmap_drop_mode drop_mode);
struct hashmap_entries {
struct hashmap_entry *nodes;
size_t base, n_nodes;
};
struct hashmap_bucket {
struct hashmap_entries entries;
struct hashmap_bucket **prev_next;
struct hashmap_bucket *next;
};
struct hashmap_key {
const unsigned char *key;
size_t key_sz;
size_t hash;
struct hashmap_bucket *bucket;
};
#define HASHMAP_KEY_INITIALIZER { .bucket = NULL }
struct hashmap_entry {
size_t hash;
size_t key_sz;
struct hashmap_entry_inner *inner;
};
struct hashmap_entry_inner {
void *value;
unsigned char key[];
};
#define __hashmap_header \
hashmap_drop_handler drop_handler; \
size_t n_divisions; \
size_t n_buckets; \
\
pthread_mutex_t meta_mutex; \
struct hashmap_bucket *bucket_with_entries; \
size_t ref_count
struct _hashmap_header {
__hashmap_header;
};
#ifdef PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
#define HASHMAP_STATIC_ENABLED
#define hashmap_create_static_type(arg_buckets, arg_divisions) struct __attribute__((packed)) { \
struct _hashmap_header header; \
struct hashmap_bucket buckets[arg_buckets]; \
pthread_mutex_t mutexes[arg_divisions]; \
}
#define hashmap_create_static_value(type, arg_drop_handler) { \
.header = { \
.drop_handler = arg_drop_handler, \
.n_divisions = sizeof(((type *)NULL)->mutexes) / sizeof(pthread_mutex_t), \
.n_buckets = sizeof(((type *)NULL)->buckets) / sizeof(struct hashmap_bucket), \
\
.meta_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP, \
.bucket_with_entries = NULL, \
.ref_count = SIZE_MAX, \
}, \
.buckets = { [0 ... sizeof(((type *)NULL)->buckets) / sizeof(struct hashmap_bucket) - 1] = { \
.entries = { \
.nodes = NULL, \
.n_nodes = 0, \
.base = 0, \
}, \
.prev_next = NULL, \
.next = NULL, \
}, }, \
.mutexes = { [0 ... sizeof(((type *)NULL)->mutexes) / sizeof(pthread_mutex_t) - 1] = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP, } \
}
void hashmap_static_key_obtain(
void *static_hashmap,
struct hashmap_key *hmap_key,
const unsigned char *key,
size_t key_sz
);
void hashmap_static_key_release(void *static_hashmap, struct hashmap_key *key, bool hold_lock);
void hashmap_static_delete_entries(void *static_hashmap);
bool hashmap_static_get(void *static_hashmap, struct hashmap_key *key, void **value);
bool hashmap_static_set(void *static_hashmap, struct hashmap_key *key, void *value);
bool hashmap_static_delete(void *static_hashmap, struct hashmap_key *key);
#endif
struct hashmap {
__hashmap_header;
unsigned char buf[];
};
#undef __hashmap_header
struct hashmap *hashmap_create(
size_t n_entries,
size_t n_divisions,
hashmap_drop_handler drop_handler
);
struct hashmap *hashmap_copy_ref(struct hashmap *hashmap);
struct hashmap *hashmap_move_ref(struct hashmap **src);
void hashmap_destroy_ref(struct hashmap **src);
void hashmap_key_initialise(struct hashmap_key *key);
void hashmap_key_obtain(
struct hashmap *hashmap,
struct hashmap_key *hmap_key,
const unsigned char *key,
size_t key_sz
);
void hashmap_key_release(struct hashmap *hashmap, struct hashmap_key *key, bool hold_lock);
bool hashmap_get(struct hashmap *hashmap, struct hashmap_key *key, void **value);
bool hashmap_set(struct hashmap *hashmap, struct hashmap_key *key, void *value);
bool hashmap_delete(struct hashmap *hashmap, struct hashmap_key *key);
#endif

View File

@ -1,757 +0,0 @@
#define _GNU_SOURCE
#include <pthread.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <assert.h>
#include <stdio.h>
#include "../headers/hashmap.h"
#define XXH_INLINE_ALL
#include "headers/xxhash.h"
#define ENSURE(expr) if (expr != 0) { abort(); };
static void hashmap_entries_destroy(struct hashmap_entries *arr) {
free(arr->nodes);
arr->nodes = NULL;
arr->n_nodes = 0;
arr->base = 0;
return;
}
static bool hashmap_entries_insert(
struct hashmap_entries *arr,
size_t idx,
struct hashmap_entry *element
) {
struct hashmap_entry **nodes = &(arr->nodes);
size_t *base = &(arr->base), *n_nodes = &(arr->n_nodes);
if (*nodes == NULL) {
if (idx != 0) {
abort();
}
} else {
if (idx > *n_nodes) {
abort();
}
}
if (*nodes == NULL) {
*nodes = malloc(sizeof(**nodes) * 2);
if (*nodes == NULL) {
return false;
}
*base = 1;
*n_nodes = 1;
(*nodes)[*base] = *element;
return true;
}
if (*n_nodes > 1 && (*n_nodes & (*n_nodes - 1)) == 0) {
if (*base != 0) {
abort();
}
struct hashmap_entry *new_nodes = malloc(sizeof(**nodes) * (*n_nodes * 2));
if (new_nodes == NULL) {
return false;
}
*base = (*n_nodes + 1) / 2;
memcpy(&(new_nodes[*base]), *nodes, idx * sizeof(**nodes));
new_nodes[*base + idx] = *element;
memcpy(&(new_nodes[*base + idx + 1]), &((*nodes)[idx]), (*n_nodes - idx) * sizeof(**nodes));
free(*nodes);
*nodes = new_nodes;
*n_nodes += 1;
return true;
}
#define move_elements_behind() memmove(&((*nodes)[*base - 1]), &((*nodes)[*base]), idx * sizeof(**nodes)); *base -= 1
if (idx == 0 && *base != 0) {
*base -= 1;
} else if (*base > 0 && ((*n_nodes + *base) & (*n_nodes + *base - 1)) == 0) {
move_elements_behind();
} else if (idx != *n_nodes) {
if (idx < *n_nodes / 2 && *base != 0) {
move_elements_behind();
} else {
memmove(&((*nodes)[*base + idx + 1]), &((*nodes)[*base + idx]), (*n_nodes - idx) * sizeof(**nodes));
}
}
#undef move_entries_behind
(*nodes)[*base + idx] = *element;
*n_nodes += 1;
return true;
}
static void hashmap_entries_remove(struct hashmap_entries *arr, size_t idx) {
struct hashmap_entry **nodes = &(arr->nodes);
size_t *base = &(arr->base), *n_nodes = &(arr->n_nodes);
if (*nodes == NULL) {
abort();
}
if (idx < *n_nodes / 2) {
// move elements at the start of the array forward by one position
memmove(&((*nodes)[*base]), &((*nodes)[*base + 1]), idx * sizeof(**nodes));
*base += 1;
} else {
// move the elements at the end of the array backward by one position
memmove(&((*nodes)[idx + 1]), &((*nodes)[idx]), (*n_nodes - idx) * sizeof(**nodes));
}
*n_nodes -= 1;
if (*n_nodes > 1 && (*n_nodes & (*n_nodes - 1)) == 0) {
memmove(nodes, &((*nodes)[*base]), sizeof(**nodes) * *n_nodes);
struct hashmap_entry *new_nodes = realloc(*nodes, sizeof(**nodes) * *n_nodes);
*base = 0;
if (new_nodes != NULL) {
*nodes = new_nodes;
}
} else if (*n_nodes == 0) {
hashmap_entries_destroy(arr);
}
return;
}
static inline struct hashmap_entry *hashmap_entries_get(struct hashmap_entries *arr, size_t *sz) {
if (arr->nodes == NULL) {
if (sz != NULL) {
*sz = 0;
}
return NULL;
}
if (sz != NULL) {
*sz = arr->n_nodes;
}
return &(arr->nodes[arr->base]);
}
static inline size_t hashmap_hash(const unsigned char *key, size_t key_sz) {
return XXH3_64bits(key, key_sz);
}
static inline pthread_mutex_t *hashmap_mutexes(struct hashmap *hashmap) {
struct hashmap_bucket *buckets = (struct hashmap_bucket *)hashmap->buf;
pthread_mutex_t *mutexes = (pthread_mutex_t *)&(buckets[hashmap->n_buckets]);
return mutexes;
}
static void hashmap_delete_entries(struct hashmap *hashmap) {
hashmap_drop_handler drop_handler = hashmap->drop_handler;
while (hashmap->bucket_with_entries != NULL) {
struct hashmap_bucket *bucket = hashmap->bucket_with_entries;
hashmap->bucket_with_entries = bucket->next;
bucket->prev_next = NULL;
bucket->next = NULL;
size_t n_entries;
struct hashmap_entry *entries = hashmap_entries_get(&(bucket->entries), &(n_entries));
if (drop_handler != NULL) {
for (size_t idx = 0; idx < n_entries; ++idx) {
drop_handler(entries[idx].inner->value, hashmap_drop_delete);
free(entries[idx].inner);
}
} else {
for (size_t idx = 0; idx < n_entries; ++idx) {
free(entries[idx].inner);
}
}
hashmap_entries_destroy(&(bucket->entries));
}
return;
}
static void hashmap_destroy(struct hashmap *hashmap) {
pthread_mutex_t *mutexes = hashmap_mutexes(hashmap);
for (size_t idx = 0; idx < hashmap->n_divisions; ++idx) {
if (pthread_mutex_destroy(&(mutexes[idx])) != 0) {
fputs("programming error: tried to delete a hashmap's entries while holding one of its keys\n", stderr);
abort();
}
}
ENSURE(pthread_mutex_destroy(&(hashmap->meta_mutex)));
hashmap_delete_entries(hashmap);
free(hashmap);
return;
}
struct hashmap *hashmap_copy_ref(struct hashmap *hashmap) {
pthread_mutex_t *mutex = &(hashmap->meta_mutex);
ENSURE(pthread_mutex_lock(mutex));
if (hashmap->ref_count == (SIZE_MAX - 1)) {
hashmap = NULL;
} else {
hashmap->ref_count += 1;
}
ENSURE(pthread_mutex_unlock(mutex));
return hashmap;
}
struct hashmap *hashmap_move_ref(struct hashmap **src) {
struct hashmap *hashmap = *src;
*src = NULL;
return hashmap;
}
void hashmap_destroy_ref(struct hashmap **src) {
struct hashmap *hashmap = *src;
*src = NULL;
ENSURE(pthread_mutex_lock(&(hashmap->meta_mutex)));
bool destroy = --hashmap->ref_count == 0;
ENSURE(pthread_mutex_unlock(&(hashmap->meta_mutex)));
if (destroy) {
hashmap_destroy(hashmap);
}
return;
}
struct hashmap *hashmap_create(
size_t n_buckets,
size_t n_divisions,
hashmap_drop_handler drop_handler
) {
if (n_buckets == 0 || n_divisions == 0) {
return NULL;
}
if (n_divisions > n_buckets) {
n_divisions = n_buckets;
}
struct hashmap *hashmap = malloc(
sizeof(struct hashmap) +
(sizeof(struct hashmap_bucket) * n_buckets) +
(sizeof(pthread_mutex_t) * n_divisions)
);
if (hashmap == NULL) {
return NULL;
}
bool attr_s = false;
pthread_mutex_t mutex_initializer = PTHREAD_MUTEX_INITIALIZER;
memcpy(&(hashmap->meta_mutex), &(mutex_initializer), sizeof(pthread_mutex_t));
struct hashmap_bucket *buckets = (struct hashmap_bucket *)hashmap->buf;
pthread_mutex_t *mutexes = (pthread_mutex_t *)&(buckets[n_buckets]);
hashmap->drop_handler = drop_handler;
hashmap->ref_count = 1;
hashmap->n_buckets = n_buckets;
hashmap->n_divisions = 0;
hashmap->bucket_with_entries = NULL;
for (size_t idx = 0; idx < n_buckets; ++idx) {
buckets[idx].next = NULL;
buckets[idx].prev_next = NULL;
hashmap_entries_destroy(&(buckets[idx].entries));
}
pthread_mutexattr_t attr;
if (pthread_mutexattr_init(&(attr)) != 0) {
goto err;
}
attr_s = true;
pthread_mutexattr_settype(&(attr), PTHREAD_MUTEX_RECURSIVE);
if (pthread_mutex_init(&(hashmap->meta_mutex), NULL) != 0) {
goto err;
}
for (; hashmap->n_divisions < n_divisions; ++(hashmap->n_divisions)) {
pthread_mutex_t *mutex = &(mutexes[hashmap->n_divisions]);
if (pthread_mutex_init(mutex, &(attr)) != 0) {
goto err;
}
}
goto out;
err:;
hashmap_destroy(hashmap);
hashmap = NULL;
out:;
if (attr_s) {
ENSURE(pthread_mutexattr_destroy(&(attr)));
}
return hashmap;
}
static pthread_mutex_t *hashmap_division_mutex_for_bucket(
struct hashmap *hashmap,
size_t bucket_id
) {
size_t n_divisions = hashmap->n_divisions;
size_t buckets_per_division = hashmap->n_buckets / n_divisions;
pthread_mutex_t *mutexes = hashmap_mutexes(hashmap);
size_t division = bucket_id / buckets_per_division;
if (division == n_divisions) {
assert(n_divisions != 0);
division -= 1;
}
return &(mutexes[division]);
}
#ifndef NDEBUG
static void validate_bucket(struct hashmap *hashmap, struct hashmap_bucket *bucket) {
struct hashmap_bucket *buckets = (struct hashmap_bucket *)hashmap->buf;
assert(bucket >= buckets);
assert(bucket < &(buckets[hashmap->n_buckets]));
assert(((uintptr_t)bucket - (uintptr_t)buckets) % sizeof(struct hashmap_bucket) == 0);
}
#else
#define validate_bucket(x, y) ;
#endif
static pthread_mutex_t *hashmap_key_locked_mutex(struct hashmap *hashmap, struct hashmap_key *key) {
struct hashmap_bucket *bucket = key->bucket;
if (bucket == NULL) {
return NULL;
}
validate_bucket(hashmap, bucket);
unsigned char *buckets = hashmap->buf;
size_t locked_bucket_id = ((unsigned char *)bucket - buckets) / sizeof(struct hashmap_bucket);
return hashmap_division_mutex_for_bucket(hashmap, locked_bucket_id);
}
void hashmap_key_initialise(struct hashmap_key *key) {
key->key = NULL;
key->key_sz = 1;
key->bucket = NULL;
return;
}
void hashmap_key_obtain(
struct hashmap *hashmap,
struct hashmap_key *hmap_key,
const unsigned char *key,
size_t key_sz
) {
if (key == NULL && key_sz != 0) {
abort();
}
struct hashmap_bucket *buckets = (struct hashmap_bucket *)hashmap->buf;
size_t hash = hashmap_hash(key, key_sz);
size_t bucket_id = hash % hashmap->n_buckets;
pthread_mutex_t *division_mutex = hashmap_division_mutex_for_bucket(hashmap, bucket_id);
pthread_mutex_t *locked_mutex = hashmap_key_locked_mutex(hashmap, hmap_key);
if (division_mutex != locked_mutex) {
if (locked_mutex != NULL) {
ENSURE(pthread_mutex_unlock(locked_mutex));
}
ENSURE(pthread_mutex_lock(division_mutex));
}
hmap_key->key = key;
hmap_key->key_sz = key_sz;
hmap_key->hash = hash;
hmap_key->bucket = &(buckets[bucket_id]);
return;
}
void hashmap_key_release(struct hashmap *hashmap, struct hashmap_key *key, bool hold_lock) {
if (key->bucket == NULL) {
return;
}
// invalidate key
key->key = NULL;
key->key_sz = 1 /* arbitrary non-zero value */;
if (!hold_lock) {
ENSURE(pthread_mutex_unlock(
hashmap_key_locked_mutex(hashmap, key)
));
key->bucket = NULL;
}
return;
}
static bool hashmap_entry_find(struct hashmap_key *key, struct hashmap_entry **out_entry) {
/*
* shits are (unstable) sorted by hash, then by key_sz, and then by the byte values in the key itself
* e.g.
* +---------+---------+---------+
* |hash:0 |hash:0 |hash:1 |
* |key_sz:12|key_sz:16|key_sz:11|
* |inner:...|inner:...|inner:...|
* +---------+---------+---------+
*/
size_t hash = key->hash;
size_t key_sz = key->key_sz;
// find stage 1: binary search for hash
size_t n_entries;
struct hashmap_entry *entries = hashmap_entries_get(&(key->bucket->entries), &(n_entries));
struct hashmap_entry *entries_subset = entries;
struct hashmap_entry *after_entries = &(entries[n_entries]);
// do not call this function if there are no entries in the bucket
assert(entries != NULL);
size_t len = n_entries;
assert(len != 0);
struct hashmap_entry *entry;
for (;;) {
size_t idx = len / 2;
size_t entry_hash = entries_subset[idx].hash;
if (hash < entry_hash) {
len /= 2;
} else {
if (hash == entry_hash) {
entry = &(entries_subset[idx]);
break;
}
entries_subset = &(entries_subset[idx + 1]);
len = (len - 1) / 2;
}
if (len == 0) {
*out_entry = entries_subset;
return false;
}
}
int memcmp_result;
// find stage 2: look for matching keys
#define entry_memcmp(entry) \
if ((memcmp_result = memcmp(key->key, entry->inner->key, key_sz)) == 0) { \
*out_entry = entry; \
return true; \
}
if (key_sz == entry->key_sz) {
entry_memcmp(entry);
if (memcmp_result > 0) {
for (
entry += 1;
entry < after_entries && key_sz == entry->key_sz && hash == entry->hash;
++entry
) {
entry_memcmp(entry);
if (memcmp_result < 0) {
break;
}
}
} else if (entry != entries) {
for (
entry -= 1;
key_sz == entry->key_sz && hash == entry->hash;
--entry
) {
entry_memcmp(entry);
if (memcmp_result > 0) {
break;
}
if (entry == entries) {
goto err;
}
}
entry += 1;
}
err:;
*out_entry = entry;
return false;
}
#define check_sentinel() if (entry == sentinel) { goto err; }
struct hashmap_entry *sentinel;
if (key_sz < entry->key_sz) {
sentinel = entries;
check_sentinel();
entry -= 1;
for (;;) {
if (entry->hash != hash || entry->key_sz < key_sz) {
entry += 1;
goto err;
}
if (entry->key_sz == key_sz) {
break;
}
check_sentinel();
entry -= 1;
}
do {
entry_memcmp(entry);
if (memcmp_result > 0) {
break;
}
check_sentinel();
entry -= 1;
} while (entry->key_sz == key_sz && entry->hash == hash);
entry += 1;
goto err;
} else {
sentinel = after_entries - 1;
check_sentinel();
entry += 1;
for (;;) {
if (entry->hash != hash || entry->key_sz > key_sz) {
goto err;
}
if (entry->key_sz == key_sz) {
break;
}
check_sentinel();
entry += 1;
}
do {
entry_memcmp(entry);
if (memcmp_result < 0) {
break;
}
check_sentinel();
entry += 1;
} while (entry->key_sz == key_sz && entry->hash == hash);
goto err;
}
#undef entry_memcmp
#undef check_sentinel
}
bool hashmap_get(struct hashmap *hashmap, struct hashmap_key *key, void **value) {
if (
key->bucket == NULL ||
(key->key == NULL && key->key_sz != 0)
) {
return false;
}
validate_bucket(hashmap, key->bucket);
if (hashmap_entries_get(&(key->bucket->entries), NULL) == NULL) {
return false;
}
struct hashmap_entry *entry;
if (!hashmap_entry_find(key, &(entry))) {
return false;
}
*value = entry->inner->value;
return true;
}
static inline struct hashmap_entry_inner *hashmap_alloc_entry_inner(struct hashmap_key *key, void *value) {
struct hashmap_entry_inner *inner = malloc(
sizeof(struct hashmap_entry_inner) +
key->key_sz
);
if (inner == NULL) {
return NULL;
}
memcpy(inner->key, key->key, key->key_sz);
inner->value = value;
return inner;
}
bool hashmap_set(struct hashmap *hashmap, struct hashmap_key *key, void *value) {
if (
key->bucket == NULL /* no lock obtained */ ||
(key->key == NULL && key->key_sz != 0) /* key dropped */
) {
return false;
}
validate_bucket(hashmap, key->bucket);
struct hashmap_bucket *bucket = key->bucket;
struct hashmap_entry *entries = hashmap_entries_get(&(bucket->entries), NULL);
size_t idx = 0;
#define add_entry() \
struct hashmap_entry entry = { \
.hash = key->hash, \
.key_sz = key->key_sz, \
.inner = hashmap_alloc_entry_inner(key, value), \
}; \
if (entry.inner == NULL) { \
return false; \
} \
if (!hashmap_entries_insert(&(bucket->entries), idx, &(entry))) { \
free(entry.inner); \
return false; \
}
if (entries == NULL) {
add_entry();
ENSURE(pthread_mutex_lock(&(hashmap->meta_mutex)));
struct hashmap_bucket **bucket_with_entries = &(hashmap->bucket_with_entries);
bucket->prev_next = bucket_with_entries;
if (bucket->next != NULL) {
bucket->next->prev_next = &(bucket->next);
}
bucket->next = (*bucket_with_entries);
(*bucket_with_entries) = bucket;
ENSURE(pthread_mutex_unlock(&(hashmap->meta_mutex)));
return true;
}
{
struct hashmap_entry *entry;
if (hashmap_entry_find(key, &(entry))) {
if (hashmap->drop_handler != NULL) {
hashmap->drop_handler(entry->inner->value, hashmap_drop_set);
}
entry->inner->value = value;
return true;
}
idx = entry - entries;
}
add_entry();
return true;
#undef add_entry
}
bool hashmap_delete(struct hashmap *hashmap, struct hashmap_key *key) {
if (
key->bucket == NULL ||
(key->key == NULL && key->key_sz != 0)
) {
return false;
}
struct hashmap_bucket *bucket = key->bucket;
validate_bucket(hashmap, bucket);
size_t n_entries;
struct hashmap_entry *entries = hashmap_entries_get(&(bucket->entries), &(n_entries));
if (entries == NULL) {
return false;
}
struct hashmap_entry *entry;
if (!hashmap_entry_find(key, &(entry))) {
return false;
}
if (hashmap->drop_handler != NULL) {
hashmap->drop_handler(entry->inner->value, hashmap_drop_delete);
}
free(entry->inner);
hashmap_entries_remove(&(bucket->entries), (size_t)(entry - entries));
if (n_entries == 1) {
ENSURE(pthread_mutex_lock(&(hashmap->meta_mutex)));
struct hashmap_bucket *next_bucket = bucket->next;
(*bucket->prev_next) = next_bucket;
if (next_bucket != NULL) {
next_bucket->prev_next = bucket->prev_next;
}
bucket->next = NULL;
bucket->prev_next = NULL;
ENSURE(pthread_mutex_unlock(&(hashmap->meta_mutex)));
}
return true;
}
#ifdef HASHMAP_STATIC_ENABLED
void hashmap_static_key_obtain(
void *static_hashmap,
struct hashmap_key *hmap_key,
const unsigned char *key,
size_t key_sz
) {
struct hashmap *hashmap = static_hashmap;
assert(hashmap->ref_count == SIZE_MAX);
return hashmap_key_obtain(hashmap, hmap_key, key, key_sz);
}
void hashmap_static_key_release(void *static_hashmap, struct hashmap_key *key, bool hold_lock) {
struct hashmap *hashmap = static_hashmap;
assert(hashmap->ref_count == SIZE_MAX);
return hashmap_key_release(hashmap, key, hold_lock);
}
void hashmap_static_delete_entries(void *static_hashmap) {
struct hashmap *hashmap = static_hashmap;
assert(hashmap->ref_count == SIZE_MAX);
pthread_mutex_t *mutexes = hashmap_mutexes(hashmap);
for (size_t idx = 0; idx < hashmap->n_divisions; ++idx) {
ENSURE(pthread_mutex_lock(&(mutexes[idx])));
}
ENSURE(pthread_mutex_lock(&(hashmap->meta_mutex)));
hashmap_delete_entries(hashmap);
ENSURE(pthread_mutex_unlock(&(hashmap->meta_mutex)));
for (size_t idx = 0; idx < hashmap->n_divisions; ++idx) {
ENSURE(pthread_mutex_unlock(&(mutexes[idx])));
}
return;
}
bool hashmap_static_get(void *static_hashmap, struct hashmap_key *key, void **value) {
struct hashmap *hashmap = static_hashmap;
assert(hashmap->ref_count == SIZE_MAX);
return hashmap_get(hashmap, key, value);
}
bool hashmap_static_set(void *static_hashmap, struct hashmap_key *key, void *value) {
struct hashmap *hashmap = static_hashmap;
assert(hashmap->ref_count == SIZE_MAX);
return hashmap_set(hashmap, key, value);
}
bool hashmap_static_delete(void *static_hashmap, struct hashmap_key *key) {
struct hashmap *hashmap = static_hashmap;
assert(hashmap->ref_count == SIZE_MAX);
return hashmap_delete(hashmap, key);
}
#endif
#ifdef HASHMAP_MAIN
#include <time.h>
int main(void) {
struct hashmap *hashmap = hashmap_create(183, 1, NULL);
struct hashmap_key key = HASHMAP_KEY_INITIALIZER;
puts("test: hashmap_set");
clock_t time = clock();
for (size_t it = 0; it < 0x50000; ++it) {
hashmap_key_obtain(hashmap, &(key), (void *)&(it), sizeof(size_t));
if (!hashmap_set(hashmap, &(key), (void *)it)) {
printf("hashmap_set failed on iter %zu!\n", it);
return 1;
}
}
printf("test success in %lf\n", (double)(clock() - time) / (double)CLOCKS_PER_SEC);
puts("test: hashmap_get");
time = clock();
for (size_t it = 0; it < 0x50000; ++it) {
hashmap_key_obtain(hashmap, &(key), (void *)&(it), sizeof(size_t));
void *val;
bool success = hashmap_get(hashmap, &(key), &(val));
if (!success || val != (void *)it) {
printf("hashmap_get failed on iter %zu!\n", it);
return 1;
}
}
printf("test success in %lf\n", (double)(clock() - time) / (double)CLOCKS_PER_SEC);
hashmap_key_release(hashmap, &(key), false);
puts("test: hashmap_destroy_ref");
time = clock();
hashmap_destroy_ref(&(hashmap));
printf("test success in %lf\n", (double)(clock() - time) / (double)CLOCKS_PER_SEC);
return 0;
}
#endif

792
src/hashmap.h Normal file
View File

@ -0,0 +1,792 @@
/*
ISC License
Copyright (c) 2023, aiden (aiden@cmp.bz)
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef HASHMAP_H
#define HASHMAP_H
// this hashmap _can_ be very fast
// with the right hash function.
// the only real bottleneck here is
// malloc, which is used on insert
// because you cannot enforce
// lifetimes in c.
#include <stdlib.h>
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#include <limits.h>
#include <assert.h>
#include <stdatomic.h>
#include <pthread.h>
#include <string.h>
#include <immintrin.h>
#include "ifc/ifc.h"
#define HASHMAP_MIN_RESERVE 24
enum hashmap_callback_reason {
hashmap_acquire,
hashmap_drop_delete,
hashmap_drop_set,
};
typedef void (*hashmap_callback)(void *entry, enum hashmap_callback_reason reason, void *arg);
struct hashmap_key {
void *key;
uint32_t key_sz;
uint32_t hash;
};
struct hashmap_kv {
void *value;
uint32_t key_sz;
unsigned char key[];
};
struct hashmap_bucket_protected {
/*
"probe sequence length";
how many failed entry comparisons it takes to find
this entry in the buckets array via linear probing
in other words:
the value of the variable which stores the loop's
iteration, when the entry is found via linear probing
for example, assuming that the entry exists in the
hashmap, the return value of the following pseudo-code function:
uint32_t psl() {
for (uint32_t it = 0;; it = (it + 1) % capacity) {
if (desired_entry == entry) {
return it;
}
}
unreachable!
}
*/
// to-do: psl u16, compute if doesn't fit
uint32_t psl;
uint32_t hash;
struct hashmap_kv *kv;
};
struct hashmap_bucket {
atomic_flag lock;
struct hashmap_bucket_protected protected;
};
struct hashmap_area {
uint32_t reserved;
atomic_bool lock;
};
struct hashmap {
_Atomic size_t reference_count;
const hashmap_callback callback;
struct hashmap_bucket *buckets;
uint32_t n_buckets;
_Atomic uint32_t occupied_buckets;
const float
resize_percentage,
resize_multiply;
// resize //
atomic_bool resize_fail;
atomic_bool resizing;
uint16_t threads_resizing;
_Atomic uint32_t init_idx;
_Atomic uint32_t resize_idx;
pthread_mutex_t resize_mutex;
pthread_cond_t resize_cond;
pthread_mutex_t ensured_mutex;
pthread_cond_t ensured_cond;
pthread_cond_t stop_resize_cond;
struct hashmap_bucket *new_buckets;
uint32_t new_n_buckets;
// ifc //
struct ifc *const ifc;
};
// *output_bucket will **always** be set to a locked hashmap bucket.
// it is the caller's duty to release the bucket's lock once it is done using *output_bucket.
static bool _hashmap_find(
struct hashmap_bucket *buckets,
uint32_t n_buckets,
struct hashmap_key *hm_key,
struct hashmap_bucket **output_bucket,
uint32_t *psl
) {
*psl = 0;
void *key = hm_key->key;
uint32_t key_sz = hm_key->key_sz;
uint32_t hash = hm_key->hash;
uint32_t bucket_idx = hm_key->hash % n_buckets;
struct hashmap_bucket *sentinel = &(buckets[n_buckets]);
struct hashmap_bucket *bucket = &(buckets[bucket_idx]);
while (__atomic_test_and_set(&(bucket->lock), __ATOMIC_ACQUIRE)) {
_mm_pause();
}
for (size_t it = 0;; ++it) {
struct hashmap_bucket_protected *protected = &(bucket->protected);
if (
protected->kv == NULL ||
protected->psl < *psl
) {
*output_bucket = bucket;
return false;
}
if (protected->hash == hash && protected->kv->key_sz == key_sz) {
if (memcmp(key, protected->kv->key, key_sz) == 0) {
// found entry
*output_bucket = bucket;
return true;
}
}
*psl += 1;
struct hashmap_bucket *next_bucket = bucket + 1;
if (next_bucket == sentinel) {
next_bucket = buckets;
}
while (__atomic_test_and_set(&(next_bucket->lock), __ATOMIC_ACQUIRE)) {
_mm_pause();
}
__atomic_clear(&(bucket->lock), __ATOMIC_RELEASE);
bucket = next_bucket;
}
}
static void _hashmap_cfi(
struct hashmap_bucket *array,
struct hashmap_bucket **current,
struct hashmap_bucket *sentinel,
struct hashmap_bucket_protected interior
) {
struct hashmap_bucket_protected swap_prot;
swap_prot = (*current)->protected;
(*current)->protected = interior;
interior = swap_prot;
if (interior.kv == NULL) {
return;
}
for (;;) {
atomic_flag *old_lock = &((*current)->lock);
(*current) += 1;
if ((*current) == sentinel) {
(*current) = array;
}
while (__atomic_test_and_set(&((*current)->lock), __ATOMIC_ACQUIRE)) {
_mm_pause();
}
__atomic_clear(old_lock, __ATOMIC_RELEASE);
interior.psl += 1;
if ((*current)->protected.kv == NULL) {
(*current)->protected = interior;
return;
}
if ((*current)->protected.psl < interior.psl) {
swap_prot = (*current)->protected;
(*current)->protected = interior;
interior = swap_prot;
}
}
}
static void _hashmap_resize(struct hashmap *hashmap, struct hashmap_area *area, bool is_main_thread) {
if (hashmap->resize_fail) {
return;
}
area->lock = false;
struct hashmap_bucket *buckets, *new_buckets;
size_t n_buckets, new_n_buckets;
if (is_main_thread) {
buckets = hashmap->buckets;
n_buckets = hashmap->n_buckets;
new_n_buckets = n_buckets * hashmap->resize_multiply;
// allocate new buckets array
if (
(new_buckets = malloc(new_n_buckets * sizeof(struct hashmap_bucket))) == NULL
) {
area->lock = true;
hashmap->resize_fail = true;
__atomic_clear(&(hashmap->resizing), __ATOMIC_RELEASE);
pthread_mutex_lock(&(hashmap->ensured_mutex));
pthread_cond_broadcast(&(hashmap->ensured_cond));
pthread_mutex_unlock(&(hashmap->ensured_mutex));
return;
}
hashmap->new_buckets = new_buckets;
hashmap->new_n_buckets = new_n_buckets;
hashmap->init_idx = 0;
hashmap->resize_idx = 0;
uint32_t n = new_n_buckets / *(unsigned int *)hashmap->ifc;
for (;;) {
uint32_t idx = (hashmap->init_idx += n) - n;
if (idx >= new_n_buckets) {
break;
}
if (idx + n >= new_n_buckets) {
n = new_n_buckets - idx;
}
for (uint32_t it = 0; it < n; ++it) {
struct hashmap_bucket *bucket = &(new_buckets[idx + it]);
__atomic_clear(&(bucket->lock), __ATOMIC_RELAXED);
bucket->protected.kv = NULL;
}
}
// wait for all other threads to
// leave non-resize critical sections
pthread_mutex_lock(&(hashmap->resize_mutex));
wait:;
// wait for other threads to stop working
ifc_iter(struct hashmap_area)(hashmap->ifc, it_area) {
if (it_area->lock) {
pthread_cond_wait(&(hashmap->resize_cond), &(hashmap->resize_mutex));
goto wait;
}
}
pthread_mutex_unlock(&(hashmap->resize_mutex));
pthread_mutex_lock(&(hashmap->ensured_mutex));
assert(hashmap->threads_resizing == 0);
hashmap->threads_resizing = 1;
pthread_cond_broadcast(&(hashmap->ensured_cond));
pthread_mutex_unlock(&(hashmap->ensured_mutex));
} else {
pthread_mutex_lock(&(hashmap->resize_mutex));
pthread_cond_signal(&(hashmap->resize_cond));
pthread_mutex_unlock(&(hashmap->resize_mutex));
pthread_mutex_lock(&(hashmap->ensured_mutex));
while (hashmap->threads_resizing == 0) {
if (!hashmap->resizing) {
area->lock = true;
pthread_mutex_unlock(&(hashmap->ensured_mutex));
return;
}
pthread_cond_wait(&(hashmap->ensured_cond), &(hashmap->ensured_mutex));
}
hashmap->threads_resizing += 1;
pthread_mutex_unlock(&(hashmap->ensured_mutex));
buckets = hashmap->buckets;
n_buckets = hashmap->n_buckets;
new_buckets = hashmap->new_buckets;
new_n_buckets = hashmap->new_n_buckets;
}
// hashmap->threads_resizing != 0, so this is safe
area->lock = true;
// assist with the resize
uint32_t n = n_buckets / *(unsigned int *)hashmap->ifc;
for (;;) {
uint32_t idx = (hashmap->resize_idx += n) - n;
if (idx >= n_buckets) {
break;
}
if (idx + n >= n_buckets) {
n = n_buckets - idx;
}
for (uint32_t it = 0; it < n; ++it) {
struct hashmap_bucket_protected *prot =
&(buckets[idx + it].protected);
if (prot->kv == NULL) {
continue;
}
struct hashmap_bucket *bucket;
uint32_t psl;
struct hashmap_key key = {
.key = prot->kv->key,
.key_sz = prot->kv->key_sz,
.hash = prot->hash,
};
_hashmap_find(
new_buckets,
new_n_buckets,
&(key),
&(bucket),
&(psl)
);
_hashmap_cfi(
new_buckets, &(bucket), &(new_buckets[new_n_buckets]),
(struct hashmap_bucket_protected){
.kv = prot->kv,
.hash = prot->hash,
.psl = psl,
}
);
__atomic_clear(&(bucket->lock), __ATOMIC_RELEASE);
}
}
pthread_mutex_lock(&(hashmap->ensured_mutex));
if (--hashmap->threads_resizing == 0) {
free(buckets);
hashmap->buckets = new_buckets;
hashmap->n_buckets = new_n_buckets;
pthread_cond_broadcast(&(hashmap->stop_resize_cond));
__atomic_clear(&(hashmap->resizing), __ATOMIC_RELEASE);
} else {
pthread_cond_wait(&(hashmap->stop_resize_cond), &(hashmap->ensured_mutex));
}
pthread_mutex_unlock(&(hashmap->ensured_mutex));
return;
}
static size_t _hashmap_reserve(struct hashmap *hashmap, struct hashmap_area *area, uint32_t n_reserve, bool *resize_needed) {
if (n_reserve == 0) {
*resize_needed = false;
return 0;
}
uint32_t n_buckets = hashmap->n_buckets;
uint32_t capture = hashmap->occupied_buckets;
uint32_t update;
do {
if (capture + n_reserve > n_buckets * hashmap->resize_percentage && !hashmap->resize_fail) {
*resize_needed = true;
return 0;
}
if (n_reserve > hashmap->n_buckets - capture) {
update = hashmap->n_buckets - capture;
} else {
update = capture + n_reserve;
}
} while (!atomic_compare_exchange_weak_explicit(
&(hashmap->occupied_buckets),
&(capture),
update,
memory_order_relaxed,
memory_order_relaxed
));
size_t reserved = update - capture;
area->reserved += reserved;
*resize_needed = false;
return reserved;
}
static inline void _hashmap_not_running(struct hashmap *hashmap, struct hashmap_area *area) {
area->lock = false;
if (hashmap->resizing) {
// to-do: maybe assist?
pthread_mutex_lock(&(hashmap->resize_mutex));
pthread_cond_signal(&(hashmap->resize_cond));
pthread_mutex_unlock(&(hashmap->resize_mutex));
}
return;
}
static void hashmap_key(
void *key,
uint32_t key_sz,
struct hashmap_key *output_key
) {
if (key == NULL && key_sz != 0) {
abort();
}
output_key->key = key;
output_key->key_sz = key_sz;
output_key->hash = HASHMAP_HASH_FUNCTION(key);
return;
}
struct hashmap_area *hashmap_area(struct hashmap *hashmap) {
return ifc_area(hashmap->ifc);
}
void hashmap_area_flush(struct hashmap *hashmap, struct hashmap_area *area) {
// does not require a lock
hashmap->occupied_buckets -= area->reserved;
area->reserved = 0;
return;
}
void hashmap_area_release(struct hashmap *hashmap, struct hashmap_area *area) {
hashmap_area_flush(hashmap, area);
ifc_release(hashmap->ifc, area);
return;
}
static size_t hashmap_reserve(struct hashmap *hashmap, struct hashmap_area *area, size_t n_reserve) {
assert(hashmap != NULL && area != NULL);
// try to enter critical section
// (conceptually a trylock)
area->lock = true;
if (hashmap->resizing) {
// "trylock" failed, so we must
// assist with the ongoing resize
_hashmap_resize(hashmap, area, false);
// area->lock is still true, and the
// resize has completed, so we can enter
// the critical section
}
reserve:;
bool resize_needed;
size_t reserved = _hashmap_reserve(hashmap, area, n_reserve, &(resize_needed));
if (resize_needed) {
bool acq = __atomic_test_and_set(&(hashmap->resizing), __ATOMIC_ACQUIRE) == false;
_hashmap_resize(hashmap, area, acq);
goto reserve;
}
_hashmap_not_running(hashmap, area);
return reserved;
}
enum hashmap_cas_result {
hashmap_cas_success,
hashmap_cas_again,
hashmap_cas_error,
};
enum hashmap_cas_option {
hashmap_cas_set,
hashmap_cas_delete,
hashmap_cas_get,
};
static enum hashmap_cas_result hashmap_cas(
struct hashmap *hashmap,
struct hashmap_area *area,
struct hashmap_key *key,
void **expected_value,
void *new_value,
enum hashmap_cas_option option,
void *callback_arg
) {
assert(hashmap != NULL && area != NULL && key != NULL && expected_value != NULL);
// try to enter critical section
// (conceptually a trylock)
area->lock = true;
if (hashmap->resizing) {
// "trylock" failed, so we must
// assist with the ongoing resize
_hashmap_resize(hashmap, area, false);
// area->lock is still true, and the
// resize has completed, so we can enter
// the critical section
}
// enter critical section
// this function cannot be interrupted
// while in the critical section
cas:;
#define _hashmap_cas_leave_critical_section() do { \
__atomic_clear(&(bucket->lock), __ATOMIC_RELEASE); \
_hashmap_not_running(hashmap, area); \
} while (0);
struct hashmap_bucket
*buckets = hashmap->buckets,
*bucket;
uint32_t n_buckets = hashmap->n_buckets;
uint32_t psl;
bool find = _hashmap_find(
buckets,
n_buckets,
key,
&(bucket),
&(psl)
);
if (find) {
void **current_value = &(bucket->protected.kv->value);
if (option == hashmap_cas_delete) {
if (new_value == NULL && *expected_value != *current_value) {
*expected_value = *current_value;
_hashmap_cas_leave_critical_section();
return hashmap_cas_again;
}
if (hashmap->callback != NULL) {
hashmap->callback(*current_value, hashmap_drop_delete, callback_arg);
}
free(bucket->protected.kv);
bucket->protected.kv = NULL;
struct hashmap_bucket *sentinel = &(buckets[n_buckets]);
for (;;) {
struct hashmap_bucket *next_bucket = bucket + 1;
if (next_bucket == sentinel) {
next_bucket = buckets;
}
while (__atomic_test_and_set(&(next_bucket->lock), __ATOMIC_ACQUIRE)) {
_mm_pause();
}
if (next_bucket->protected.kv == NULL || next_bucket->protected.psl == 0) {
__atomic_clear(&(bucket->lock), __ATOMIC_RELEASE);
__atomic_clear(&(next_bucket->lock), __ATOMIC_RELEASE);
break;
}
bucket->protected = next_bucket->protected;
__atomic_clear(&(bucket->lock), __ATOMIC_RELEASE);
bucket = next_bucket;
}
area->reserved += 1;
_hashmap_cas_leave_critical_section();
return hashmap_cas_success;
}
if (
(option == hashmap_cas_set && *expected_value != *current_value) ||
option == hashmap_cas_get
) {
if (hashmap->callback != NULL) {
hashmap->callback(*current_value, hashmap_acquire, callback_arg);
}
*expected_value = *current_value;
_hashmap_cas_leave_critical_section();
return hashmap_cas_again;
}
if (hashmap->callback != NULL) {
hashmap->callback(*current_value, hashmap_drop_set, callback_arg);
}
*current_value = new_value;
_hashmap_cas_leave_critical_section();
return hashmap_cas_success;
}
if (option != hashmap_cas_set) {
_hashmap_cas_leave_critical_section();
return hashmap_cas_error;
}
if (area->reserved == 0) {
bool resize_needed;
if (_hashmap_reserve(hashmap, area, HASHMAP_MIN_RESERVE, &(resize_needed)) == 0) {
if (resize_needed) {
__atomic_clear(&(bucket->lock), __ATOMIC_RELEASE);
bool acq = __atomic_test_and_set(&(hashmap->resizing), __ATOMIC_ACQUIRE) == false;
_hashmap_resize(hashmap, area, acq);
// even if the resize failed, the bucket
// may have been inserted by another thread
// after we released our exclusive control
// over the bucket (key) via atomic_clear.
goto cas;
}
_hashmap_cas_leave_critical_section();
return hashmap_cas_error;
}
}
// allocate kv (probably a bottleneck)
struct hashmap_kv *kv = malloc(
sizeof(struct hashmap_kv) +
key->key_sz
);
if (kv == NULL) {
_hashmap_cas_leave_critical_section();
return hashmap_cas_error;
}
area->reserved -= 1;
kv->value = new_value;
kv->key_sz = key->key_sz;
memcpy(kv->key, key->key, key->key_sz);
_hashmap_cfi(
buckets, &(bucket), &(buckets[n_buckets]),
(struct hashmap_bucket_protected){
.hash = key->hash,
.psl = psl,
.kv = kv,
}
);
_hashmap_cas_leave_critical_section();
return hashmap_cas_success;
}
struct hashmap *hashmap_create(
uint16_t n_threads,
uint32_t initial_size,
float resize_percentage,
float resize_multiply,
hashmap_callback callback
) {
if (n_threads == 0) {
return NULL;
}
if (resize_percentage <= 0 || resize_percentage > 1) {
resize_percentage = 0.94;
}
if (resize_multiply <= 1) {
resize_multiply = 2;
}
size_t n_buckets = n_threads + 1;
float _min = HASHMAP_MIN_RESERVE / resize_percentage;
uint32_t min = ((uint32_t)_min + (_min == (uint32_t)_min ? 0 : 1));
if (initial_size < min) {
initial_size = min;
}
if (n_buckets < initial_size) {
n_buckets = initial_size;
}
struct hashmap *hashmap = malloc(sizeof(struct hashmap));
if (hashmap == NULL) {
return NULL;
}
struct hashmap_bucket *buckets = malloc(sizeof(struct hashmap_bucket) * n_buckets);
if (buckets == NULL) {
err1:;
free(hashmap);
return NULL;
}
*(struct ifc **)&(hashmap->ifc) = ifc_alloc(n_threads, sizeof(size_t));
if (hashmap->ifc == NULL) {
err2:;
free(buckets);
goto err1;
}
if (pthread_mutex_init(&(hashmap->resize_mutex), NULL) != 0) {
err3:;
ifc_free(hashmap->ifc);
goto err2;
}
if (pthread_cond_init(&(hashmap->resize_cond), NULL) != 0) {
err4:;
pthread_mutex_destroy(&(hashmap->resize_mutex));
goto err3;
}
if (pthread_mutex_init(&(hashmap->ensured_mutex), NULL) != 0) {
err5:;
pthread_cond_destroy(&(hashmap->resize_cond));
goto err4;
}
if (pthread_cond_init(&(hashmap->ensured_cond), NULL) != 0) {
err6:;
pthread_mutex_destroy(&(hashmap->ensured_mutex));
goto err5;
}
if (pthread_cond_init(&(hashmap->stop_resize_cond), NULL) != 0) {
pthread_cond_destroy(&(hashmap->resize_cond));
goto err6;
}
hashmap->reference_count = 0;
*(hashmap_callback *)&(hashmap->callback) = callback;
hashmap->buckets = buckets;
hashmap->n_buckets = n_buckets;
hashmap->occupied_buckets = 0;
for (size_t idx = 0; idx < n_buckets; ++idx) {
buckets[idx].protected.kv = NULL;
__atomic_clear(&(buckets[idx].lock), __ATOMIC_RELAXED);
}
*(float *)&(hashmap->resize_percentage) = resize_percentage;
*(float *)&(hashmap->resize_multiply) = resize_multiply;
// resize
__atomic_clear(&(hashmap->resize_fail), __ATOMIC_RELAXED);
__atomic_clear(&(hashmap->resizing), __ATOMIC_RELAXED);
hashmap->threads_resizing = 0;
// ifc
ifc_iter(struct hashmap_area)(hashmap->ifc, area) {
area->reserved = 0;
area->lock = false;
}
return hashmap;
}
struct hashmap *hashmap_copy_ref(struct hashmap *hashmap) {
hashmap->reference_count += 1;
return hashmap;
}
void hashmap_destroy(struct hashmap *hashmap) {
if (--hashmap->reference_count == 0) {
pthread_cond_destroy(&(hashmap->stop_resize_cond));
pthread_cond_destroy(&(hashmap->resize_cond));
pthread_mutex_destroy(&(hashmap->ensured_mutex));
pthread_cond_destroy(&(hashmap->resize_cond));
pthread_mutex_destroy(&(hashmap->resize_mutex));
free(hashmap->buckets);
ifc_free(hashmap->ifc);
free(hashmap);
}
return;
}
#endif

File diff suppressed because it is too large Load Diff

1
src/ifc Submodule

@ -0,0 +1 @@
Subproject commit ecf24f6e7605f0f90e99f5d3cebe207060bc8da7