Commit 085f82c1 authored by James Willis's avatar James Willis
Browse files

Fixed casting for icpc when compiling with MPI.

parent a8d7b01e
......@@ -128,16 +128,16 @@ void readArray_chunk(hid_t h_data, hid_t h_plist_id,
/* message("Converting ! factor=%e", factor); */
if (io_is_double_precision(props.type)) {
double* temp_d = temp;
double* temp_d = (double *)temp;
for (size_t i = 0; i < num_elements; ++i) temp_d[i] *= factor;
} else {
float* temp_f = temp;
float* temp_f = (float *)temp;
for (size_t i = 0; i < num_elements; ++i) temp_f[i] *= factor;
}
}
/* Copy temporary buffer to particle data */
char* temp_c = temp;
char* temp_c = (char *)temp;
for (size_t i = 0; i < N; ++i)
memcpy(props.field + i * props.partSize, &temp_c[i * copySize], copySize);
......@@ -640,7 +640,7 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
H5Gclose(h_grp);
/* Read the unit system used in the ICs */
struct unit_system* ic_units = malloc(sizeof(struct unit_system));
struct unit_system* ic_units = (struct unit_system *)malloc(sizeof(struct unit_system));
if (ic_units == NULL) error("Unable to allocate memory for IC unit system");
io_read_unit_system(h_file, ic_units, mpi_rank);
......@@ -683,7 +683,7 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
/* Allocate memory to store SPH particles */
if (with_hydro) {
*Ngas = N[0];
if (posix_memalign((void*)parts, part_align,
if (posix_memalign((void**)parts, part_align,
(*Ngas) * sizeof(struct part)) != 0)
error("Error while allocating memory for particles");
bzero(*parts, *Ngas * sizeof(struct part));
......@@ -692,7 +692,7 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
/* Allocate memory to store star particles */
if (with_stars) {
*Nstars = N[swift_type_star];
if (posix_memalign((void*)sparts, spart_align,
if (posix_memalign((void**)sparts, spart_align,
*Nstars * sizeof(struct spart)) != 0)
error("Error while allocating memory for star particles");
bzero(*sparts, *Nstars * sizeof(struct spart));
......@@ -704,7 +704,7 @@ void read_ic_parallel(char* fileName, const struct unit_system* internal_units,
*Ngparts = (with_hydro ? N[swift_type_gas] : 0) +
N[swift_type_dark_matter] +
(with_stars ? N[swift_type_star] : 0);
if (posix_memalign((void*)gparts, gpart_align,
if (posix_memalign((void**)gparts, gpart_align,
*Ngparts * sizeof(struct gpart)) != 0)
error("Error while allocating memory for gravity particles");
bzero(*gparts, *Ngparts * sizeof(struct gpart));
......@@ -1199,7 +1199,7 @@ void write_output_parallel(struct engine* e, const char* baseName,
case swift_type_dark_matter:
/* Allocate temporary array */
if (posix_memalign((void*)&dmparts, gpart_align,
if (posix_memalign((void**)&dmparts, gpart_align,
Ndm * sizeof(struct gpart)) != 0)
error(
"Error while allocating temporart memory for "
......
......@@ -442,7 +442,7 @@ static void pick_metis(struct space *s, int nregions, double *vertexw,
* of old and new ranks. Each element of the array has a cell count and
* an unique index so we can sort into decreasing counts. */
int indmax = nregions * nregions;
struct indexval *ivs = malloc(sizeof(struct indexval) * indmax);
struct indexval *ivs = (indexval *)malloc(sizeof(struct indexval) * indmax);
bzero(ivs, sizeof(struct indexval) * indmax);
for (int k = 0; k < ncells; k++) {
int index = regionid[k] + nregions * s->cells_top[k].nodeID;
......@@ -453,8 +453,8 @@ static void pick_metis(struct space *s, int nregions, double *vertexw,
/* Go through the ivs using the largest counts first, these are the
* regions with the most cells in common, old partition to new. */
int *oldmap = malloc(sizeof(int) * nregions);
int *newmap = malloc(sizeof(int) * nregions);
int *oldmap = (int *)malloc(sizeof(int) * nregions);
int *newmap = (int *)malloc(sizeof(int) * nregions);
for (int k = 0; k < nregions; k++) {
oldmap[k] = -1;
newmap[k] = -1;
......
......@@ -2002,7 +2002,7 @@ void *runner_main(void *data) {
break;
case task_type_recv:
if (t->subtype == task_subtype_tend) {
cell_unpack_end_step(ci, t->buff);
cell_unpack_end_step(ci, (pcell_step *)t->buff);
free(t->buff);
} else if (t->subtype == task_subtype_xv) {
runner_do_recv_part(r, ci, 1, 1);
......@@ -2015,7 +2015,7 @@ void *runner_main(void *data) {
} else if (t->subtype == task_subtype_spart) {
runner_do_recv_spart(r, ci, 1);
} else if (t->subtype == task_subtype_multipole) {
cell_unpack_multipoles(ci, t->buff);
cell_unpack_multipoles(ci, (gravity_tensors *)t->buff);
free(t->buff);
} else {
error("Unknown/invalid task subtype (%d).", t->subtype);
......
......@@ -1501,7 +1501,7 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
#ifdef WITH_MPI
if (t->subtype == task_subtype_tend) {
t->buff = (struct pcell_step *)malloc(sizeof(struct pcell_step) * t->ci->pcell_size);
cell_pack_end_step(t->ci, t->buff);
cell_pack_end_step(t->ci, (pcell_step *)t->buff);
if ((t->ci->pcell_size * sizeof(struct pcell_step)) >
s->mpi_message_limit)
err = MPI_Isend(
......@@ -1539,7 +1539,7 @@ void scheduler_enqueue(struct scheduler *s, struct task *t) {
t->cj->nodeID, t->flags, MPI_COMM_WORLD, &t->req);
} else if (t->subtype == task_subtype_multipole) {
t->buff = (struct gravity_tensors *)malloc(sizeof(struct gravity_tensors) * t->ci->pcell_size);
cell_pack_multipoles(t->ci, t->buff);
cell_pack_multipoles(t->ci, (gravity_tensors *)t->buff);
err = MPI_Isend(
t->buff, t->ci->pcell_size * sizeof(struct gravity_tensors),
MPI_BYTE, t->cj->nodeID, t->flags, MPI_COMM_WORLD, &t->req);
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment