gitlab is upgraded to version 13, please report any issues and enjoy

Commit ef42092e authored by Matthieu Schaller's avatar Matthieu Schaller
Browse files

Applied code formatting tool.

parent bca94340
......@@ -250,8 +250,8 @@ int main(int argc, char *argv[]) {
NULL, 0, 0),
OPT_INTEGER('T', "timers", &with_verbose_timers,
"Print timers every time-step.", NULL, 0, 0),
OPT_BOOLEAN('u', "fof", &with_fof,
"Run Friends-of-Friends algorithm.", NULL, 0, 0),
OPT_BOOLEAN('u', "fof", &with_fof, "Run Friends-of-Friends algorithm.",
NULL, 0, 0),
OPT_INTEGER('v', "verbose", &verbose,
"Run in verbose mode, in MPI mode 2 outputs from all ranks.",
NULL, 0, 0),
......@@ -1112,7 +1112,6 @@ int main(int argc, char *argv[]) {
/* Is there a dump before the end of the first time-step? */
engine_check_for_dumps(&e);
}
/* Legend */
......
......@@ -238,8 +238,8 @@ int main(int argc, char *argv[]) {
NULL, 0, 0),
OPT_INTEGER('T', "timers", &with_verbose_timers,
"Print timers every time-step.", NULL, 0, 0),
OPT_BOOLEAN('u', "fof", &with_fof,
"Run Friends-of-Friends algorithm.", NULL, 0, 0),
OPT_BOOLEAN('u', "fof", &with_fof, "Run Friends-of-Friends algorithm.",
NULL, 0, 0),
OPT_INTEGER('v', "verbose", &verbose,
"Run in verbose mode, in MPI mode 2 outputs from all ranks.",
NULL, 0, 0),
......
......@@ -89,7 +89,7 @@ __attribute__((always_inline)) INLINE static void atomic_min(
} while (test_val != old_val);
}
/**
/**
* @brief Atomic min operation on doubles.
*
* This is a text-book implementation based on an atomic CAS.
......
......@@ -20,15 +20,16 @@
void hashmap_allocate_chunks(hashmap_t *m, int num_chunks) {
/* Allocate a fresh set of chunks. */
hashmap_chunk_t *alloc;
if ((alloc = (hashmap_chunk_t *)swift_calloc("hashmap", num_chunks,
sizeof(hashmap_chunk_t))) == NULL) {
if ((alloc = (hashmap_chunk_t *)swift_calloc(
"hashmap", num_chunks, sizeof(hashmap_chunk_t))) == NULL) {
error("Unable to allocate chunks.");
}
/* Hook up the alloc, so that we can clean it up later. */
if (m->allocs_count == m->allocs_size) {
m->allocs_size *= 2;
void **new_allocs = (void **)swift_malloc("hashmap", sizeof(void *) * m->allocs_size);
void **new_allocs =
(void **)swift_malloc("hashmap", sizeof(void *) * m->allocs_size);
memcpy(new_allocs, m->allocs, sizeof(void *) * m->allocs_count);
swift_free("hashmap", m->allocs);
m->allocs = new_allocs;
......@@ -50,8 +51,8 @@ void hashmap_allocate_chunks(hashmap_t *m, int num_chunks) {
void hashmap_init(hashmap_t *m) {
/* Allocate the first (empty) list of chunks. */
m->nr_chunks = INITIAL_NUM_CHUNKS;
if ((m->chunks = (hashmap_chunk_t **)swift_calloc("hashmap",
m->nr_chunks, sizeof(hashmap_chunk_t *))) == NULL) {
if ((m->chunks = (hashmap_chunk_t **)swift_calloc(
"hashmap", m->nr_chunks, sizeof(hashmap_chunk_t *))) == NULL) {
error("Unable to allocate hashmap chunks.");
}
......@@ -61,7 +62,8 @@ void hashmap_init(hashmap_t *m) {
/* Init the array of allocations. */
m->allocs_size = HASHMAP_ALLOCS_INITIAL_SIZE;
if ((m->allocs = (void **)swift_malloc("hashmap", sizeof(void *) * m->allocs_size)) == NULL) {
if ((m->allocs = (void **)swift_malloc(
"hashmap", sizeof(void *) * m->allocs_size)) == NULL) {
error("Unable to allocate allocs pointer array.");
}
m->allocs_count = 0;
......@@ -190,7 +192,7 @@ hashmap_element_t *hashmap_find(hashmap_t *m, hashmap_key_t key, int create_new,
/* Mark this element as taken and increase the size counter. */
chunk->masks[mask_offset] |= search_mask;
m->size += 1;
if(created_new_element) *created_new_element = 1;
if (created_new_element) *created_new_element = 1;
/* Set the key. */
chunk->data[offset_in_chunk].key = key;
......@@ -234,12 +236,12 @@ void hashmap_grow(hashmap_t *m) {
if (HASHMAP_DEBUG_OUTPUT) {
message("Increasing hash table size from %zu (%zu kb) to %zu (%zu kb).",
old_table_size, old_table_size * sizeof(hashmap_element_t) / 1024,
m->table_size, m->table_size * sizeof(hashmap_element_t) / 1024);
old_table_size, old_table_size * sizeof(hashmap_element_t) / 1024,
m->table_size, m->table_size * sizeof(hashmap_element_t) / 1024);
}
if ((m->chunks = (hashmap_chunk_t **)swift_calloc("hashmap",
m->nr_chunks, sizeof(hashmap_chunk_t *))) == NULL) {
if ((m->chunks = (hashmap_chunk_t **)swift_calloc(
"hashmap", m->nr_chunks, sizeof(hashmap_chunk_t *))) == NULL) {
error("Unable to allocate hashmap chunks.");
}
......@@ -266,8 +268,9 @@ void hashmap_grow(hashmap_t *m) {
&chunk->data[mid * HASHMAP_BITS_PER_MASK + eid];
/* Copy the element over to the new hashmap. */
hashmap_element_t *new_element = hashmap_find(
m, element->key, /*create_new=*/1, /*chain_length=*/NULL, /*created_new_element=*/NULL);
hashmap_element_t *new_element =
hashmap_find(m, element->key, /*create_new=*/1,
/*chain_length=*/NULL, /*created_new_element=*/NULL);
if (!new_element) {
/* TODO(pedro): Deal with this type of failure more elegantly. */
error("Failed to re-hash element.");
......@@ -286,15 +289,17 @@ void hashmap_grow(hashmap_t *m) {
}
void hashmap_put(hashmap_t *m, hashmap_key_t key, hashmap_value_t value) {
/* Try to find an element for the given key. */
hashmap_element_t *element =
hashmap_find(m, key, /*create_new=*/1, /*chain_length=*/NULL, /*created_new_element=*/NULL);
hashmap_find(m, key, /*create_new=*/1, /*chain_length=*/NULL,
/*created_new_element=*/NULL);
/* Loop around, trying to find our place in the world. */
while (!element) {
hashmap_grow(m);
element = hashmap_find(m, key, /*create_new=*/1, /*chain_length=*/NULL, /*created_new_element=*/NULL);
element = hashmap_find(m, key, /*create_new=*/1, /*chain_length=*/NULL,
/*created_new_element=*/NULL);
}
/* Set the value. */
......@@ -302,31 +307,36 @@ void hashmap_put(hashmap_t *m, hashmap_key_t key, hashmap_value_t value) {
}
hashmap_value_t *hashmap_get(hashmap_t *m, hashmap_key_t key) {
/* Look for the given key. */
hashmap_element_t *element =
hashmap_find(m, key, /*create_new=*/1, /*chain_length=*/NULL, /*created_new_element=*/NULL);
hashmap_find(m, key, /*create_new=*/1, /*chain_length=*/NULL,
/*created_new_element=*/NULL);
while (!element) {
hashmap_grow(m);
element = hashmap_find(m, key, /*create_new=*/1, /*chain_length=*/NULL, /*created_new_element=*/NULL);
element = hashmap_find(m, key, /*create_new=*/1, /*chain_length=*/NULL,
/*created_new_element=*/NULL);
}
return &element->value;
}
hashmap_value_t *hashmap_get_new(hashmap_t *m, hashmap_key_t key, int *created_new_element) {
hashmap_value_t *hashmap_get_new(hashmap_t *m, hashmap_key_t key,
int *created_new_element) {
/* Look for the given key. */
hashmap_element_t *element =
hashmap_find(m, key, /*create_new=*/1, /*chain_length=*/NULL, created_new_element);
hashmap_element_t *element = hashmap_find(
m, key, /*create_new=*/1, /*chain_length=*/NULL, created_new_element);
while (!element) {
hashmap_grow(m);
element = hashmap_find(m, key, /*create_new=*/1, /*chain_length=*/NULL, created_new_element);
element = hashmap_find(m, key, /*create_new=*/1, /*chain_length=*/NULL,
created_new_element);
}
return &element->value;
}
hashmap_value_t *hashmap_lookup(hashmap_t *m, hashmap_key_t key) {
hashmap_element_t *element =
hashmap_find(m, key, /*create_new=*/0, /*chain_length=*/NULL, /*created_new_element=*/NULL);
hashmap_find(m, key, /*create_new=*/0, /*chain_length=*/NULL,
/*created_new_element=*/NULL);
return element ? &element->value : NULL;
}
......@@ -388,7 +398,8 @@ void hashmap_count_chain_lengths(hashmap_key_t key, hashmap_value_t *value,
void *data) {
hashmap_t *m = (hashmap_t *)data;
int count = 0;
hashmap_find(m, key, /*create_entry=*/0, &count, /*created_new_element=*/NULL);
hashmap_find(m, key, /*create_entry=*/0, &count,
/*created_new_element=*/NULL);
m->chain_length_counts[count] += 1;
}
#endif
......
......@@ -120,7 +120,8 @@ extern hashmap_value_t *hashmap_get(hashmap_t *m, hashmap_key_t key);
* Note that the returned pointer is volatile and will be invalidated if the
* hashmap is re-hashed!
*/
extern hashmap_value_t *hashmap_get_new(hashmap_t *m, hashmap_key_t key, int *created_new_element);
extern hashmap_value_t *hashmap_get_new(hashmap_t *m, hashmap_key_t key,
int *created_new_element);
/**
* @brief Look for the given key and return a pointer to its value or NULL if
......
......@@ -2,9 +2,9 @@
* A unit test and example of how to use the simple C hashmap
*/
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "hashmap.h"
......@@ -12,87 +12,85 @@
#define KEY_PREFIX ("somekey")
#define KEY_COUNT (26 * 1024 * 1024)
typedef struct data_struct_s
{
char key_string[KEY_MAX_LENGTH];
int number;
typedef struct data_struct_s {
char key_string[KEY_MAX_LENGTH];
int number;
} data_struct_t;
int main(char* argv, int argc)
{
int index;
int error;
map_t mymap;
char key_string[KEY_MAX_LENGTH];
data_struct_t* value;
mymap = hashmap_new();
/* First, populate the hash map with ascending values */
for (index=0; index<KEY_COUNT; index+=1)
{
/* Store the key string along side the numerical value so we can free it later */
value = malloc(sizeof(data_struct_t));
snprintf(value->key_string, KEY_MAX_LENGTH, "%s%d", KEY_PREFIX, index);
value->number = index;
error = hashmap_put(mymap, value->key_string, value);
assert(error==MAP_OK);
}
/* Now, check all of the expected values are there */
for (index=0; index<KEY_COUNT; index+=1)
{
snprintf(key_string, KEY_MAX_LENGTH, "%s%d", KEY_PREFIX, index);
error = hashmap_get(mymap, key_string, (void**)(&value));
/* Make sure the value was both found and the correct number */
assert(error==MAP_OK);
assert(value->number==index);
}
/* Make sure that a value that wasn't in the map can't be found */
snprintf(key_string, KEY_MAX_LENGTH, "%s%d", KEY_PREFIX, KEY_COUNT);
int main(char* argv, int argc) {
int index;
int error;
map_t mymap;
char key_string[KEY_MAX_LENGTH];
data_struct_t* value;
error = hashmap_get(mymap, key_string, (void**)(&value));
/* Make sure the value was not found */
assert(error==MAP_MISSING);
mymap = hashmap_new();
/* First, populate the hash map with ascending values */
for (index = 0; index < KEY_COUNT; index += 1) {
/* Store the key string along side the numerical value so we can free it
* later */
value = malloc(sizeof(data_struct_t));
snprintf(value->key_string, KEY_MAX_LENGTH, "%d", 666);
value->number = 1;
snprintf(value->key_string, KEY_MAX_LENGTH, "%s%d", KEY_PREFIX, index);
value->number = index;
error = hashmap_put(mymap, value->key_string, value);
assert(error == MAP_OK);
}
snprintf(key_string, KEY_MAX_LENGTH, "%d", 666);
/* Now, check all of the expected values are there */
for (index = 0; index < KEY_COUNT; index += 1) {
snprintf(key_string, KEY_MAX_LENGTH, "%s%d", KEY_PREFIX, index);
error = hashmap_get(mymap, key_string, (void**)(&value));
printf("Value: %d hash table size: %d\n", value->number, hashmap_length(mymap));
value->number = 2;
error = hashmap_get(mymap, key_string, (void**)(&value));
printf("Value: %d hash table size: %d\n", value->number, hashmap_length(mymap));
/* Make sure the value was both found and the correct number */
assert(error == MAP_OK);
assert(value->number == index);
}
/* Make sure that a value that wasn't in the map can't be found */
snprintf(key_string, KEY_MAX_LENGTH, "%s%d", KEY_PREFIX, KEY_COUNT);
error = hashmap_get(mymap, key_string, (void**)(&value));
/* Make sure the value was not found */
assert(error == MAP_MISSING);
/* Free all of the values we allocated and remove them from the map */
for (index=0; index<KEY_COUNT; index+=1)
{
snprintf(key_string, KEY_MAX_LENGTH, "%s%d", KEY_PREFIX, index);
value = malloc(sizeof(data_struct_t));
snprintf(value->key_string, KEY_MAX_LENGTH, "%d", 666);
value->number = 1;
error = hashmap_put(mymap, value->key_string, value);
snprintf(key_string, KEY_MAX_LENGTH, "%d", 666);
error = hashmap_get(mymap, key_string, (void**)(&value));
printf("Value: %d hash table size: %d\n", value->number,
hashmap_length(mymap));
value->number = 2;
error = hashmap_get(mymap, key_string, (void**)(&value));
printf("Value: %d hash table size: %d\n", value->number,
hashmap_length(mymap));
/* Free all of the values we allocated and remove them from the map */
for (index = 0; index < KEY_COUNT; index += 1) {
snprintf(key_string, KEY_MAX_LENGTH, "%s%d", KEY_PREFIX, index);
error = hashmap_get(mymap, key_string, (void**)(&value));
assert(error == MAP_OK);
error = hashmap_get(mymap, key_string, (void**)(&value));
assert(error==MAP_OK);
error = hashmap_remove(mymap, key_string);
assert(error == MAP_OK);
error = hashmap_remove(mymap, key_string);
assert(error==MAP_OK);
free(value);
}
free(value);
}
/* Now, destroy the map */
hashmap_free(mymap);
/* Now, destroy the map */
hashmap_free(mymap);
return 1;
return 1;
}
......@@ -1158,8 +1158,8 @@ void io_copy_temp_buffer(void* temp, const struct engine* e,
/* Copy the whole thing into a buffer */
threadpool_map((struct threadpool*)&e->threadpool,
io_convert_part_i_mapper, temp_i, N, copySize, 0,
(void*)&props);
io_convert_part_i_mapper, temp_i, N, copySize, 0,
(void*)&props);
} else if (props.convert_part_d != NULL) {
......
......@@ -2613,9 +2613,11 @@ void engine_prepare(struct engine *e) {
message("Communicating rebuild flag took %.3f %s.",
clocks_from_ticks(getticks() - tic3), clocks_getunit());
/* Perform FOF search to seed black holes. Only if there is a rebuild coming and no repartitioing. */
if (e->policy & engine_policy_fof && e->forcerebuild &&
!e->forcerepart && e->run_fof) engine_fof(e);
/* Perform FOF search to seed black holes. Only if there is a rebuild coming
* and no repartitioing. */
if (e->policy & engine_policy_fof && e->forcerebuild && !e->forcerepart &&
e->run_fof)
engine_fof(e);
/* Do we need repartitioning ? */
if (e->forcerepart) {
......@@ -3774,8 +3776,9 @@ void engine_step(struct engine *e) {
e->forcerebuild = 1;
/* Trigger a FOF search every N steps. */
if (e->policy & engine_policy_fof && !(e->step % e->s->fof_data.run_freq)) e->run_fof = 1;
if (e->policy & engine_policy_fof && !(e->step % e->s->fof_data.run_freq))
e->run_fof = 1;
#ifdef WITH_LOGGER
/* Mark the current time step in the particle logger file. */
logger_log_timestamp(e->logger, e->ti_current, e->time,
......@@ -5084,8 +5087,8 @@ void engine_config(int restart, struct engine *e, struct swift_params *params,
/* Read the FOF search frequency. */
if (e->policy & engine_policy_fof) {
e->s->fof_data.run_freq = parser_get_opt_param_int(
params, "FOF:run_freq", 2000);
e->s->fof_data.run_freq =
parser_get_opt_param_int(params, "FOF:run_freq", 2000);
}
/* Deal with affinity. For now, just figure out the number of cores. */
......@@ -6282,15 +6285,14 @@ void engine_fof(struct engine *e) {
/* Perform local FOF tasks. */
engine_launch(e);
/* Perform FOF search over foreign particles and
/* Perform FOF search over foreign particles and
* find groups which require black hole seeding. */
fof_search_tree(e->s);
/* Reset flag. */
e->run_fof = 0;
if(e->verbose && engine_rank == 0)
if (e->verbose && engine_rank == 0)
message("Complete FOF search took: %.3f %s.",
clocks_from_ticks(getticks() - tic), clocks_getunit());
clocks_from_ticks(getticks() - tic), clocks_getunit());
}
......@@ -2581,7 +2581,7 @@ void engine_make_fof_tasks(struct engine *e) {
clocks_from_ticks(getticks() - tic), clocks_getunit());
tic = getticks();
/* Split the tasks. */
scheduler_splittasks(sched);
......@@ -2590,14 +2590,16 @@ void engine_make_fof_tasks(struct engine *e) {
clocks_from_ticks(getticks() - tic), clocks_getunit());
tic = getticks();
/* Activate all FOF tasks by default. */
for(int i=0; i<sched->nr_tasks; i++) {
for (int i = 0; i < sched->nr_tasks; i++) {
struct task *t = &sched->tasks[i];
if (t->type == task_type_fof_self || t->type == task_type_fof_pair) scheduler_activate(sched, t);
else t->skip = 1;
if (t->type == task_type_fof_self || t->type == task_type_fof_pair)
scheduler_activate(sched, t);
else
t->skip = 1;
}
if (e->verbose)
......@@ -2642,7 +2644,6 @@ void engine_make_fof_tasks(struct engine *e) {
if (e->verbose)
message("took %.3f %s (including reweight).",
clocks_from_ticks(getticks() - tic), clocks_getunit());
}
/**
......
......@@ -871,7 +871,7 @@ void engine_marktasks_mapper(void *map_data, int num_elements,
scheduler_activate(s, t);
cell_activate_super_spart_drifts(t->ci, s);
}
}
}
}
}
......
This diff is collapsed.
......@@ -61,19 +61,19 @@ struct fof {
double *group_mass;
long long *max_part_density_index;
float *max_part_density;
/*! The extra no. of black holes to seed locally. */
int extra_bh_seed_count;
/*! The FOF linking length squared. */
double l_x2;
/*! The minimum halo mass for black hole seeding. */
double seed_halo_mass;
/*! The no. of steps between each FOF search. */
int run_freq;
int num_groups;
size_t min_group_size;
size_t group_id_default;
......@@ -110,10 +110,11 @@ struct fof_final_mass {
float max_part_density;
} SWIFT_STRUCT_ALIGN;
/* Struct used to iterate over the hash table and unpack the mass fragments of a group when using MPI */
/* Struct used to iterate over the hash table and unpack the mass fragments of a
* group when using MPI */
struct fof_mass_send_hashmap {
struct fof_final_mass *mass_send;
size_t nsend;
size_t nsend;
} SWIFT_STRUCT_ALIGN;
#endif
......@@ -133,8 +134,8 @@ void fof_search_pair_cells_foreign(struct space *s, struct cell *ci,
struct fof_mpi **group_links,
int *group_links_size);
void fof_search_tree(struct space *s);
void fof_dump_group_data(char *out_file, struct space *s,
int num_groups, struct group_length *group_sizes);
void fof_dump_group_data(char *out_file, struct space *s, int num_groups,
struct group_length *group_sizes);
void rec_fof_search_self(struct cell *c, struct space *s, const double dim[3],
const double search_r2);
void rec_fof_search_pair(struct cell *restrict ci, struct cell *restrict cj,
......
......@@ -178,10 +178,9 @@ INLINE static void hydro_write_particles(const struct part* parts,
list[9] = io_make_output_field_convert_part("Potential", FLOAT, 1,
UNIT_CONV_POTENTIAL, parts,
xparts, convert_part_potential);
list[10] = io_make_output_field_convert_part("GroupIDs", INT, 1,
UNIT_CONV_NO_UNITS, parts,
xparts, convert_part_group_id);
list[10] =
io_make_output_field_convert_part("GroupIDs", INT, 1, UNIT_CONV_NO_UNITS,
parts, xparts, convert_part_group_id);
#ifdef DEBUG_INTERACTIONS_SPH
......
......@@ -41,8 +41,8 @@ typedef void (*conversion_func_part_float)(const struct engine*,
const struct part*,
const struct xpart*, float*);
typedef void (*conversion_func_part_int)(const struct engine*,
const struct part*,
const struct xpart*, int*);
const struct part*,
const struct xpart*, int*);
typedef void (*conversion_func_part_double)(const struct engine*,
const struct part*,
const struct xpart*, double*);
......
......@@ -1178,7 +1178,7 @@ void write_output_serial(struct engine* e, const char* baseName,
xpart_align,
Ngas_written * sizeof(struct xpart)) != 0)
error("Error while allocating temporary memory for xparts");
/* Collect the particles we want to write */
io_collect_parts_to_write(parts, xparts, parts_written,
xparts_written, Ngas, Ngas_written);
......
......@@ -92,7 +92,7 @@ const char *taskID_names[task_type_count] = {"none",
"bh_in",
"bh_out",
"bh_ghost",
"fof_self",
"fof_self",
"fof_pair"};
/* Sub-task type names. */
......
......@@ -33,9 +33,9 @@ int main(int argc, char *argv[]) {
message("Initialising hash table...");
hashmap_init(&m);
message("Populating hash table...");
for(hashmap_key_t key=0; key<NUM_KEYS; key++) {
for (hashmap_key_t key = 0; key < NUM_KEYS; key++) {
hashmap_value_t value;
value.value_st = key;
hashmap_put(&m, key, value);
......@@ -45,20 +45,25 @@ int main(int argc, char *argv[]) {
hashmap_print_stats(&m);
message("Retrieving elements from the hash table...");
for(hashmap_key_t key=0; key<NUM_KEYS; key++) {
for (hashmap_key_t key = 0; key < NUM_KEYS; key++) {
hashmap_value_t value = *hashmap_lookup(&m, key);
if(value.value_st != key) error("Incorrect value (%zu) found for key: %zu", value.value_st, key);
//else message("Retrieved element, Key: %zu Value: %zu", key, value);
if (value.value_st != key)
error("Incorrect value (%zu) found for key: %zu", value.value_st, key);
// else message("Retrieved element, Key: %zu Value: %zu", key, value);
}
message("Checking for invalid key...");
if(hashmap_lookup(&m, NUM_KEYS + 1) != NULL) error("Key: %d shouldn't exist or be created.", NUM_KEYS + 1);
message("Checking for invalid key...");