proxy.c 39.5 KB
Newer Older
1
2
/*******************************************************************************
 * This file is part of SWIFT.
3
 * Copyright (c) 2013 Pedro Gonnet (pedro.gonnet@durham.ac.uk)
4
 *
5
6
7
8
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Lesser General Public License as published
 * by the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
9
 *
10
11
12
13
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
14
 *
15
16
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17
 *
18
19
20
21
22
23
24
25
26
 ******************************************************************************/

/* Config parameters. */
#include "../config.h"

/* Some standard headers. */
#include <float.h>
#include <limits.h>
#include <sched.h>
27
28
29
30
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
31
32
33

/* MPI headers. */
#ifdef WITH_MPI
34
#include <mpi.h>
35
36
#endif

37
/* This object's header. */
38
#include "proxy.h"
39
40

/* Local headers. */
41
#include "cell.h"
42
#include "engine.h"
43
#include "error.h"
44
#include "memuse.h"
45
#include "space.h"
46
#include "threadpool.h"
47

48
#ifdef WITH_MPI
49
50
/* MPI data type for the communications */
MPI_Datatype pcell_mpi_type;
51
#endif
52

53
/**
54
 * @brief Exchange tags between nodes.
Pedro Gonnet's avatar
Pedro Gonnet committed
55
56
57
58
59
60
61
62
 *
 * Note that this function assumes that the cell structures have already
 * been exchanged, e.g. via #proxy_cells_exchange.
 *
 * @param proxies The list of #proxy that will send/recv tags
 * @param num_proxies The number of proxies.
 * @param s The space into which the tags will be unpacked.
 */
63
64
65
66
67
void proxy_tags_exchange(struct proxy *proxies, int num_proxies,
                         struct space *s) {

#ifdef WITH_MPI

68
69
  ticks tic2 = getticks();

70
71
72
  /* Run through the cells and get the size of the tags that will be sent off.
   */
  int count_out = 0;
73
74
75
76
  int *offset_out =
      (int *)swift_malloc("tags_offsets_out", s->nr_cells * sizeof(int));
  if (offset_out == NULL) error("Error allocating memory for tag offsets");

77
  for (int k = 0; k < s->nr_cells; k++) {
78
    offset_out[k] = count_out;
79
80
    if (s->cells_top[k].mpi.sendto) {
      count_out += s->cells_top[k].mpi.pcell_size;
81
    }
82
83
84
85
  }

  /* Run through the proxies and get the count of incoming tags. */
  int count_in = 0;
86
87
88
89
  int *offset_in =
      (int *)swift_malloc("tags_offsets_in", s->nr_cells * sizeof(int));
  if (offset_in == NULL) error("Error allocating memory for tag offsets");

90
91
  for (int k = 0; k < num_proxies; k++) {
    for (int j = 0; j < proxies[k].nr_cells_in; j++) {
92
      offset_in[proxies[k].cells_in[j] - s->cells_top] = count_in;
93
      count_in += proxies[k].cells_in[j]->mpi.pcell_size;
94
95
96
97
98
99
    }
  }

  /* Allocate the tags. */
  int *tags_in = NULL;
  int *tags_out = NULL;
100
  if (swift_memalign("tags_in", (void **)&tags_in, SWIFT_CACHE_ALIGNMENT,
101
                     sizeof(int) * count_in) != 0 ||
102
      swift_memalign("tags_out", (void **)&tags_out, SWIFT_CACHE_ALIGNMENT,
103
104
105
106
                     sizeof(int) * count_out) != 0)
    error("Failed to allocate tags buffers.");

  /* Pack the local tags. */
107
  for (int k = 0; k < s->nr_cells; k++) {
108
    if (s->cells_top[k].mpi.sendto) {
109
      cell_pack_tags(&s->cells_top[k], &tags_out[offset_out[k]]);
110
    }
111
  }
112

113
114
115
116
  if (s->e->verbose)
    message("Cell pack tags took %.3f %s.",
            clocks_from_ticks(getticks() - tic2), clocks_getunit());

117
118
119
120
121
122
123
  /* Allocate the incoming and outgoing request handles. */
  int num_reqs_out = 0;
  int num_reqs_in = 0;
  for (int k = 0; k < num_proxies; k++) {
    num_reqs_in += proxies[k].nr_cells_in;
    num_reqs_out += proxies[k].nr_cells_out;
  }
Josh Borrow's avatar
Josh Borrow committed
124
125
  MPI_Request *reqs_in = NULL;
  int *cids_in = NULL;
126
127
128
129
130
131
132
133
134
135
136
137
138
139
  if ((reqs_in = (MPI_Request *)malloc(sizeof(MPI_Request) *
                                       (num_reqs_in + num_reqs_out))) == NULL ||
      (cids_in = (int *)malloc(sizeof(int) * (num_reqs_in + num_reqs_out))) ==
          NULL)
    error("Failed to allocate MPI_Request arrays.");
  MPI_Request *reqs_out = &reqs_in[num_reqs_in];
  int *cids_out = &cids_in[num_reqs_in];

  /* Emit the sends and recvs. */
  for (int send_rid = 0, recv_rid = 0, k = 0; k < num_proxies; k++) {
    for (int j = 0; j < proxies[k].nr_cells_in; j++) {
      const int cid = proxies[k].cells_in[j] - s->cells_top;
      cids_in[recv_rid] = cid;
      int err = MPI_Irecv(
140
141
          &tags_in[offset_in[cid]], proxies[k].cells_in[j]->mpi.pcell_size,
          MPI_INT, proxies[k].nodeID, cid, MPI_COMM_WORLD, &reqs_in[recv_rid]);
142
143
144
145
146
147
148
      if (err != MPI_SUCCESS) mpi_error(err, "Failed to irecv tags.");
      recv_rid += 1;
    }
    for (int j = 0; j < proxies[k].nr_cells_out; j++) {
      const int cid = proxies[k].cells_out[j] - s->cells_top;
      cids_out[send_rid] = cid;
      int err = MPI_Isend(
149
          &tags_out[offset_out[cid]], proxies[k].cells_out[j]->mpi.pcell_size,
150
          MPI_INT, proxies[k].nodeID, cid, MPI_COMM_WORLD, &reqs_out[send_rid]);
151
152
153
154
155
      if (err != MPI_SUCCESS) mpi_error(err, "Failed to isend tags.");
      send_rid += 1;
    }
  }

156
157
  tic2 = getticks();

158
159
160
161
162
163
164
165
  /* Wait for each recv and unpack the tags into the local cells. */
  for (int k = 0; k < num_reqs_in; k++) {
    int pid = MPI_UNDEFINED;
    MPI_Status status;
    if (MPI_Waitany(num_reqs_in, reqs_in, &pid, &status) != MPI_SUCCESS ||
        pid == MPI_UNDEFINED)
      error("MPI_Waitany failed.");
    const int cid = cids_in[pid];
166
    cell_unpack_tags(&tags_in[offset_in[cid]], &s->cells_top[cid]);
167
168
  }

169
170
171
172
  if (s->e->verbose)
    message("Cell unpack tags took %.3f %s.",
            clocks_from_ticks(getticks() - tic2), clocks_getunit());

173
174
175
176
177
  /* Wait for all the sends to have completed. */
  if (MPI_Waitall(num_reqs_out, reqs_out, MPI_STATUSES_IGNORE) != MPI_SUCCESS)
    error("MPI_Waitall on sends failed.");

  /* Clean up. */
178
179
  swift_free("tags_in", tags_in);
  swift_free("tags_out", tags_out);
180
181
  swift_free("tags_offsets_in", offset_in);
  swift_free("tags_offsets_out", offset_out);
182
183
184
185
186
187
188
  free(reqs_in);
  free(cids_in);

#else
  error("SWIFT was not compiled with MPI support.");
#endif
}
189

190
/**
191
192
193
194
195
 * @brief Exchange cells with a remote node, first part.
 *
 * The first part of the transaction sends the local cell count and the packed
 * #pcell array to the destination node, and enqueues an @c MPI_Irecv for
 * the foreign cell counts.
196
 *
197
 * @param p The #proxy.
198
 */
199
void proxy_cells_exchange_first(struct proxy *p) {
200
201
202

#ifdef WITH_MPI

203
204
  /* Get the number of pcells we will need to send. */
  p->size_pcells_out = 0;
205
  for (int k = 0; k < p->nr_cells_out; k++)
206
    p->size_pcells_out += p->cells_out[k]->mpi.pcell_size;
207
208

  /* Send the number of pcells. */
209
210
211
212
  int err = MPI_Isend(&p->size_pcells_out, 1, MPI_INT, p->nodeID,
                      p->mynodeID * proxy_tag_shift + proxy_tag_count,
                      MPI_COMM_WORLD, &p->req_cells_count_out);
  if (err != MPI_SUCCESS) mpi_error(err, "Failed to isend nr of pcells.");
213
214
215
216
  // message( "isent pcell count (%i) from node %i to node %i." ,
  // p->size_pcells_out , p->mynodeID , p->nodeID ); fflush(stdout);

  /* Allocate and fill the pcell buffer. */
217
  if (p->pcells_out != NULL) swift_free("pcells_out", p->pcells_out);
218
219
  if (swift_memalign("pcells_out", (void **)&p->pcells_out,
                     SWIFT_STRUCT_ALIGNMENT,
220
                     sizeof(struct pcell) * p->size_pcells_out) != 0)
221
    error("Failed to allocate pcell_out buffer.");
222

223
  for (int ind = 0, k = 0; k < p->nr_cells_out; k++) {
224
225
226
    memcpy(&p->pcells_out[ind], p->cells_out[k]->mpi.pcell,
           sizeof(struct pcell) * p->cells_out[k]->mpi.pcell_size);
    ind += p->cells_out[k]->mpi.pcell_size;
227
228
229
  }

  /* Send the pcell buffer. */
230
  err = MPI_Isend(p->pcells_out, p->size_pcells_out, pcell_mpi_type, p->nodeID,
231
232
233
234
                  p->mynodeID * proxy_tag_shift + proxy_tag_cells,
                  MPI_COMM_WORLD, &p->req_cells_out);

  if (err != MPI_SUCCESS) mpi_error(err, "Failed to pcell_out buffer.");
235
236
237
238
  // message( "isent pcells (%i) from node %i to node %i." , p->size_pcells_out
  // , p->mynodeID , p->nodeID ); fflush(stdout);

  /* Receive the number of pcells. */
239
240
241
242
  err = MPI_Irecv(&p->size_pcells_in, 1, MPI_INT, p->nodeID,
                  p->nodeID * proxy_tag_shift + proxy_tag_count, MPI_COMM_WORLD,
                  &p->req_cells_count_in);
  if (err != MPI_SUCCESS) mpi_error(err, "Failed to irecv nr of pcells.");
Pedro Gonnet's avatar
Pedro Gonnet committed
243
244
    // message( "irecv pcells count on node %i from node %i." , p->mynodeID ,
    // p->nodeID ); fflush(stdout);
245

246
#else
247
  error("SWIFT was not compiled with MPI support.");
248
#endif
249
}
250

251
252
253
254
255
256
257
258
259
260
/**
 * @brief Exchange cells with a remote node, second part.
 *
 * Once the incomming cell count has been received, allocate a buffer
 * for the foreign packed #pcell array and emit the @c MPI_Irecv for
 * it.
 *
 * @param p The #proxy.
 */
void proxy_cells_exchange_second(struct proxy *p) {
261
262
263

#ifdef WITH_MPI

264
  /* Re-allocate the pcell_in buffer. */
265
  if (p->pcells_in != NULL) swift_free("pcells_in", p->pcells_in);
266
267
  if (swift_memalign("pcells_in", (void **)&p->pcells_in,
                     SWIFT_STRUCT_ALIGNMENT,
268
                     sizeof(struct pcell) * p->size_pcells_in) != 0)
269
270
271
    error("Failed to allocate pcell_in buffer.");

  /* Receive the particle buffers. */
272
273
  int err = MPI_Irecv(p->pcells_in, p->size_pcells_in, pcell_mpi_type,
                      p->nodeID, p->nodeID * proxy_tag_shift + proxy_tag_cells,
274
275
276
                      MPI_COMM_WORLD, &p->req_cells_in);

  if (err != MPI_SUCCESS) mpi_error(err, "Failed to irecv part data.");
Pedro Gonnet's avatar
Pedro Gonnet committed
277
278
    // message( "irecv pcells (%i) on node %i from node %i." , p->size_pcells_in
    // , p->mynodeID , p->nodeID ); fflush(stdout);
279
280

#else
281
  error("SWIFT was not compiled with MPI support.");
282
#endif
283
}
284

285
286
287
288
289
290
291
#ifdef WITH_MPI

void proxy_cells_count_mapper(void *map_data, int num_elements,
                              void *extra_data) {
  struct cell *cells = (struct cell *)map_data;

  for (int k = 0; k < num_elements; k++) {
292
    if (cells[k].mpi.sendto) cells[k].mpi.pcell_size = cell_getsize(&cells[k]);
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
  }
}

struct pack_mapper_data {
  struct space *s;
  int *offset;
  struct pcell *pcells;
  int with_gravity;
};

void proxy_cells_pack_mapper(void *map_data, int num_elements,
                             void *extra_data) {
  struct cell *cells = (struct cell *)map_data;
  struct pack_mapper_data *data = (struct pack_mapper_data *)extra_data;

  for (int k = 0; k < num_elements; k++) {
309
    if (cells[k].mpi.sendto) {
310
      ptrdiff_t ind = &cells[k] - data->s->cells_top;
311
312
      cells[k].mpi.pcell = &data->pcells[data->offset[ind]];
      cell_pack(&cells[k], cells[k].mpi.pcell, data->with_gravity);
313
314
315
316
    }
  }
}

317
318
319
320
321
322
323
324
325
void proxy_cells_exchange_first_mapper(void *map_data, int num_elements,
                                       void *extra_data) {
  struct proxy *proxies = (struct proxy *)map_data;

  for (int k = 0; k < num_elements; k++) {
    proxy_cells_exchange_first(&proxies[k]);
  }
}

326
327
328
329
330
331
struct wait_and_unpack_mapper_data {
  struct space *s;
  int num_proxies;
  MPI_Request *reqs_in;
  struct proxy *proxies;
  int with_gravity;
332
  swift_lock_type lock;
333
334
335
336
};

void proxy_cells_wait_and_unpack_mapper(void *unused_map_data, int num_elements,
                                        void *extra_data) {
337
338
339
340

  // MATTHIEU: This is currently unused. Scalar (non-threadpool) version is
  // faster but we still need to explore why this happens.

341
342
343
344
345
346
  struct wait_and_unpack_mapper_data *data =
      (struct wait_and_unpack_mapper_data *)extra_data;

  for (int k = 0; k < num_elements; k++) {
    int pid = MPI_UNDEFINED;
    MPI_Status status;
Pedro Gonnet's avatar
Pedro Gonnet committed
347
    int res;
348
349
350
351
352

    /* We need a lock to prevent concurrent calls to MPI_Waitany on
       the same array of requests since this is not supported in the MPI
       standard (v3.1). This is not really a problem since the threads
       would block inside MPI_Waitany anyway. */
353
    lock_lock(&data->lock);
Pedro Gonnet's avatar
Pedro Gonnet committed
354
    if ((res = MPI_Waitany(data->num_proxies, data->reqs_in, &pid, &status)) !=
355
356
            MPI_SUCCESS ||
        pid == MPI_UNDEFINED)
Pedro Gonnet's avatar
Pedro Gonnet committed
357
      mpi_error(res, "MPI_Waitany failed.");
358
359
360
    if (lock_unlock(&data->lock) != 0) {
      error("Failed to release lock.");
    }
361

362
363
364
365
366
367
368
369
    // message( "cell data from proxy %i has arrived." , pid );
    for (int count = 0, j = 0; j < data->proxies[pid].nr_cells_in; j++)
      count += cell_unpack(&data->proxies[pid].pcells_in[count],
                           data->proxies[pid].cells_in[j], data->s,
                           data->with_gravity);
  }
}

370
371
#endif  // WITH_MPI

372
373
374
375
376
377
/**
 * @brief Exchange the cell structures with all proxies.
 *
 * @param proxies The list of #proxy that will send/recv cells.
 * @param num_proxies The number of proxies.
 * @param s The space into which the particles will be unpacked.
378
379
 * @param with_gravity Are we running with gravity and hence need
 *      to exchange multipoles?
380
 */
381
void proxy_cells_exchange(struct proxy *proxies, int num_proxies,
382
                          struct space *s, const int with_gravity) {
383
384
385
386

#ifdef WITH_MPI

  MPI_Request *reqs;
387
388
  if ((reqs = (MPI_Request *)malloc(sizeof(MPI_Request) * 2 * num_proxies)) ==
      NULL)
389
390
391
392
    error("Failed to allocate request buffers.");
  MPI_Request *reqs_in = reqs;
  MPI_Request *reqs_out = &reqs[num_proxies];

393
394
  ticks tic2 = getticks();

395
396
  /* Run through the cells and get the size of the ones that will be sent off.
   */
397
  threadpool_map(&s->e->threadpool, proxy_cells_count_mapper, s->cells_top,
398
                 s->nr_cells, sizeof(struct cell), threadpool_auto_chunk_size,
399
                 /*extra_data=*/NULL);
400
  int count_out = 0;
401
402
403
404
  int *offset =
      (int *)swift_malloc("proxy_cell_offset", s->nr_cells * sizeof(int));
  if (offset == NULL) error("Error allocating memory for proxy cell offsets");

405
406
  for (int k = 0; k < s->nr_cells; k++) {
    offset[k] = count_out;
407
    if (s->cells_top[k].mpi.sendto) count_out += s->cells_top[k].mpi.pcell_size;
408
409
  }

410
411
412
413
  if (s->e->verbose)
    message("Counting cells to send took %.3f %s.",
            clocks_from_ticks(getticks() - tic2), clocks_getunit());

414
415
  /* Allocate the pcells. */
  struct pcell *pcells = NULL;
416
  if (swift_memalign("pcells", (void **)&pcells, SWIFT_CACHE_ALIGNMENT,
417
418
419
                     sizeof(struct pcell) * count_out) != 0)
    error("Failed to allocate pcell buffer.");

420
421
  tic2 = getticks();

422
  /* Pack the cells. */
423
424
  struct pack_mapper_data data = {s, offset, pcells, with_gravity};
  threadpool_map(&s->e->threadpool, proxy_cells_pack_mapper, s->cells_top,
425
426
                 s->nr_cells, sizeof(struct cell), threadpool_auto_chunk_size,
                 &data);
427

428
429
430
  if (s->e->verbose)
    message("Packing cells took %.3f %s.", clocks_from_ticks(getticks() - tic2),
            clocks_getunit());
431
432

  /* Launch the first part of the exchange. */
433
  threadpool_map(&s->e->threadpool, proxy_cells_exchange_first_mapper, proxies,
434
                 num_proxies, sizeof(struct proxy), threadpool_auto_chunk_size,
435
                 /*extra_data=*/NULL);
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
  for (int k = 0; k < num_proxies; k++) {
    reqs_in[k] = proxies[k].req_cells_count_in;
    reqs_out[k] = proxies[k].req_cells_count_out;
  }

  /* Wait for each count to come in and start the recv. */
  for (int k = 0; k < num_proxies; k++) {
    int pid = MPI_UNDEFINED;
    MPI_Status status;
    if (MPI_Waitany(num_proxies, reqs_in, &pid, &status) != MPI_SUCCESS ||
        pid == MPI_UNDEFINED)
      error("MPI_Waitany failed.");
    // message( "request from proxy %i has arrived." , pid );
    proxy_cells_exchange_second(&proxies[pid]);
  }

  /* Wait for all the sends to have finished too. */
  if (MPI_Waitall(num_proxies, reqs_out, MPI_STATUSES_IGNORE) != MPI_SUCCESS)
    error("MPI_Waitall on sends failed.");

  /* Set the requests for the cells. */
  for (int k = 0; k < num_proxies; k++) {
    reqs_in[k] = proxies[k].req_cells_in;
    reqs_out[k] = proxies[k].req_cells_out;
  }

462
463
  tic2 = getticks();

464
465
466
467
468
469
470
471
472
473
  /* Wait for each pcell array to come in from the proxies. */
  for (int k = 0; k < num_proxies; k++) {
    int pid = MPI_UNDEFINED;
    MPI_Status status;
    if (MPI_Waitany(num_proxies, reqs_in, &pid, &status) != MPI_SUCCESS ||
        pid == MPI_UNDEFINED)
      error("MPI_Waitany failed.");
    // message( "cell data from proxy %i has arrived." , pid );
    for (int count = 0, j = 0; j < proxies[pid].nr_cells_in; j++)
      count += cell_unpack(&proxies[pid].pcells_in[count],
474
                           proxies[pid].cells_in[j], s, with_gravity);
475
476
  }

477
478
479
480
  if (s->e->verbose)
    message("Un-packing cells took %.3f %s.",
            clocks_from_ticks(getticks() - tic2), clocks_getunit());

481
482
483
484
485
486
  /* Wait for all the sends to have finished too. */
  if (MPI_Waitall(num_proxies, reqs_out, MPI_STATUSES_IGNORE) != MPI_SUCCESS)
    error("MPI_Waitall on sends failed.");

  /* Clean up. */
  free(reqs);
487
  swift_free("pcells", pcells);
488
  swift_free("proxy_cell_offset", offset);
489
490
491
492
  for (int k = 0; k < num_proxies; k++) {
    swift_free("pcells_in", proxies[k].pcells_in);
    swift_free("pcells_out", proxies[k].pcells_out);
  }
493

494
495
496
497
#else
  error("SWIFT was not compiled with MPI support.");
#endif
}
498
499
500
501
502
503

/**
 * @brief Add a cell to the given proxy's input list.
 *
 * @param p The #proxy.
 * @param c The #cell.
504
 * @param type Why is this cell in the proxy (hdro, gravity, ...) ?
505
 */
506
507
508
void proxy_addcell_in(struct proxy *p, struct cell *c, int type) {

  if (type == proxy_cell_type_none) error("Invalid type for proxy");
509
510

  /* Check if the cell is already registered with the proxy. */
511
  for (int k = 0; k < p->nr_cells_in; k++)
512
513
514
    if (p->cells_in[k] == c) {

      /* Update the type */
515
      p->cells_in_type[k] |= type;
516
517
      return;
    }
518
519
520

  /* Do we need to grow the number of in cells? */
  if (p->nr_cells_in == p->size_cells_in) {
521

522
    p->size_cells_in *= proxy_buffgrow;
523
524

    struct cell **temp_cell;
525
526
    if ((temp_cell = (struct cell **)swift_malloc(
             "cells_in", sizeof(struct cell *) * p->size_cells_in)) == NULL)
527
      error("Failed to allocate incoming cell list.");
528
    memcpy(temp_cell, p->cells_in, sizeof(struct cell *) * p->nr_cells_in);
529
    swift_free("cells_in", p->cells_in);
530
531
    p->cells_in = temp_cell;

532
    int *temp_type;
533
534
    if ((temp_type = (int *)swift_malloc(
             "cells_in_type", sizeof(int) * p->size_cells_in)) == NULL)
535
      error("Failed to allocate incoming cell type list.");
536
    memcpy(temp_type, p->cells_in_type, sizeof(int) * p->nr_cells_in);
537
    swift_free("cells_in_type", p->cells_in_type);
538
    p->cells_in_type = temp_type;
539
540
541
542
  }

  /* Add the cell. */
  p->cells_in[p->nr_cells_in] = c;
543
  p->cells_in_type[p->nr_cells_in] = type;
544
545
  p->nr_cells_in += 1;
}
546
547
548
549
550
551

/**
 * @brief Add a cell to the given proxy's output list.
 *
 * @param p The #proxy.
 * @param c The #cell.
552
 * @param type Why is this cell in the proxy (hdro, gravity, ...) ?
553
 */
554
555
556
void proxy_addcell_out(struct proxy *p, struct cell *c, int type) {

  if (type == proxy_cell_type_none) error("Invalid type for proxy");
557
558

  /* Check if the cell is already registered with the proxy. */
559
  for (int k = 0; k < p->nr_cells_out; k++)
560
561
562
563
564
565
    if (p->cells_out[k] == c) {

      /* Update the type */
      p->cells_out_type[k] |= type;
      return;
    }
566
567
568
569

  /* Do we need to grow the number of out cells? */
  if (p->nr_cells_out == p->size_cells_out) {
    p->size_cells_out *= proxy_buffgrow;
570
571

    struct cell **temp_cell;
572
573
    if ((temp_cell = (struct cell **)swift_malloc(
             "cells_out", sizeof(struct cell *) * p->size_cells_out)) == NULL)
574
      error("Failed to allocate outgoing cell list.");
575
    memcpy(temp_cell, p->cells_out, sizeof(struct cell *) * p->nr_cells_out);
576
    swift_free("cells_out", p->cells_out);
577
578
    p->cells_out = temp_cell;

579
    int *temp_type;
580
581
    if ((temp_type = (int *)swift_malloc(
             "cells_out_type", sizeof(int) * p->size_cells_out)) == NULL)
582
      error("Failed to allocate outgoing cell type list.");
583
    memcpy(temp_type, p->cells_out_type, sizeof(int) * p->nr_cells_out);
584
    swift_free("cells_out_type", p->cells_out_type);
585
    p->cells_out_type = temp_type;
586
587
588
589
  }

  /* Add the cell. */
  p->cells_out[p->nr_cells_out] = c;
590
  p->cells_out_type[p->nr_cells_out] = type;
591
592
  p->nr_cells_out += 1;
}
593
594
595
596

/**
 * @brief Exchange particles with a remote node.
 *
597
 * @param p The #proxy.
598
 */
Pedro Gonnet's avatar
Pedro Gonnet committed
599
void proxy_parts_exchange_first(struct proxy *p) {
600
601
602

#ifdef WITH_MPI

603
  /* Send the number of particles. */
604
605
  p->buff_out[0] = p->nr_parts_out;
  p->buff_out[1] = p->nr_gparts_out;
606
  p->buff_out[2] = p->nr_sparts_out;
607
608
  p->buff_out[3] = p->nr_bparts_out;
  if (MPI_Isend(p->buff_out, 4, MPI_INT, p->nodeID,
609
610
611
                p->mynodeID * proxy_tag_shift + proxy_tag_count, MPI_COMM_WORLD,
                &p->req_parts_count_out) != MPI_SUCCESS)
    error("Failed to isend nr of parts.");
612
613
  /* message( "isent particle counts [%i, %i] from node %i to node %i." ,
  p->buff_out[0], p->buff_out[1], p->mynodeID , p->nodeID ); fflush(stdout); */
614
615
616

  /* Send the particle buffers. */
  if (p->nr_parts_out > 0) {
617
618
    if (MPI_Isend(p->parts_out, p->nr_parts_out, part_mpi_type, p->nodeID,
                  p->mynodeID * proxy_tag_shift + proxy_tag_parts,
619
                  MPI_COMM_WORLD, &p->req_parts_out) != MPI_SUCCESS ||
620
        MPI_Isend(p->xparts_out, p->nr_parts_out, xpart_mpi_type, p->nodeID,
621
                  p->mynodeID * proxy_tag_shift + proxy_tag_xparts,
622
                  MPI_COMM_WORLD, &p->req_xparts_out) != MPI_SUCCESS)
623
624
625
      error("Failed to isend part data.");
    // message( "isent particle data (%i) to node %i." , p->nr_parts_out ,
    // p->nodeID ); fflush(stdout);
626
    /*for (int k = 0; k < p->nr_parts_out; k++)
627
628
      message("sending particle %lli, x=[%.3e %.3e %.3e], h=%.3e, to node %i.",
              p->parts_out[k].id, p->parts_out[k].x[0], p->parts_out[k].x[1],
629
              p->parts_out[k].x[2], p->parts_out[k].h, p->nodeID);*/
630
  }
631
  if (p->nr_gparts_out > 0) {
632
    if (MPI_Isend(p->gparts_out, p->nr_gparts_out, gpart_mpi_type, p->nodeID,
633
634
                  p->mynodeID * proxy_tag_shift + proxy_tag_gparts,
                  MPI_COMM_WORLD, &p->req_gparts_out) != MPI_SUCCESS)
635
      error("Failed to isend gpart data.");
636
    // message( "isent gpart data (%i) to node %i." , p->nr_gparts_out ,
637
638
639
640
641
642
643
644
    // p->nodeID ); fflush(stdout);
  }

  if (p->nr_sparts_out > 0) {
    if (MPI_Isend(p->sparts_out, p->nr_sparts_out, spart_mpi_type, p->nodeID,
                  p->mynodeID * proxy_tag_shift + proxy_tag_sparts,
                  MPI_COMM_WORLD, &p->req_sparts_out) != MPI_SUCCESS)
      error("Failed to isend spart data.");
645
646
647
648
649
650
651
652
653
    // message( "isent spart data (%i) to node %i." , p->nr_sparts_out ,
    // p->nodeID ); fflush(stdout);
  }
  if (p->nr_bparts_out > 0) {
    if (MPI_Isend(p->bparts_out, p->nr_bparts_out, bpart_mpi_type, p->nodeID,
                  p->mynodeID * proxy_tag_shift + proxy_tag_bparts,
                  MPI_COMM_WORLD, &p->req_bparts_out) != MPI_SUCCESS)
      error("Failed to isend bpart data.");
    // message( "isent bpart data (%i) to node %i." , p->nr_bparts_out ,
654
655
    // p->nodeID ); fflush(stdout);
  }
656
657

  /* Receive the number of particles. */
658
  if (MPI_Irecv(p->buff_in, 4, MPI_INT, p->nodeID,
659
660
661
662
                p->nodeID * proxy_tag_shift + proxy_tag_count, MPI_COMM_WORLD,
                &p->req_parts_count_in) != MPI_SUCCESS)
    error("Failed to irecv nr of parts.");

663
#else
664
  error("SWIFT was not compiled with MPI support.");
665
#endif
666
}
667

Pedro Gonnet's avatar
Pedro Gonnet committed
668
void proxy_parts_exchange_second(struct proxy *p) {
669
670
671

#ifdef WITH_MPI

672
673
674
  /* Unpack the incomming parts counts. */
  p->nr_parts_in = p->buff_in[0];
  p->nr_gparts_in = p->buff_in[1];
675
  p->nr_sparts_in = p->buff_in[2];
676
  p->nr_bparts_in = p->buff_in[3];
Pedro Gonnet's avatar
Pedro Gonnet committed
677

678
  /* Is there enough space in the buffers? */
679
680
681
682
  if (p->nr_parts_in > p->size_parts_in) {
    do {
      p->size_parts_in *= proxy_buffgrow;
    } while (p->nr_parts_in > p->size_parts_in);
683
684
685
686
687
688
    swift_free("parts_in", p->parts_in);
    swift_free("xparts_in", p->xparts_in);
    if ((p->parts_in = (struct part *)swift_malloc(
             "parts_in", sizeof(struct part) * p->size_parts_in)) == NULL ||
        (p->xparts_in = (struct xpart *)swift_malloc(
             "xparts_in", sizeof(struct xpart) * p->size_parts_in)) == NULL)
689
690
      error("Failed to re-allocate parts_in buffers.");
  }
691
692
693
694
  if (p->nr_gparts_in > p->size_gparts_in) {
    do {
      p->size_gparts_in *= proxy_buffgrow;
    } while (p->nr_gparts_in > p->size_gparts_in);
695
696
697
    swift_free("gparts_in", p->gparts_in);
    if ((p->gparts_in = (struct gpart *)swift_malloc(
             "gparts_in", sizeof(struct gpart) * p->size_gparts_in)) == NULL)
698
699
      error("Failed to re-allocate gparts_in buffers.");
  }
700
701
702
703
  if (p->nr_sparts_in > p->size_sparts_in) {
    do {
      p->size_sparts_in *= proxy_buffgrow;
    } while (p->nr_sparts_in > p->size_sparts_in);
704
705
706
    swift_free("sparts_in", p->sparts_in);
    if ((p->sparts_in = (struct spart *)swift_malloc(
             "sparts_in", sizeof(struct spart) * p->size_sparts_in)) == NULL)
707
708
      error("Failed to re-allocate sparts_in buffers.");
  }
709
710
711
712
713
714
715
716
717
  if (p->nr_bparts_in > p->size_bparts_in) {
    do {
      p->size_bparts_in *= proxy_buffgrow;
    } while (p->nr_bparts_in > p->size_bparts_in);
    swift_free("bparts_in", p->bparts_in);
    if ((p->bparts_in = (struct bpart *)swift_malloc(
             "bparts_in", sizeof(struct bpart) * p->size_bparts_in)) == NULL)
      error("Failed to re-allocate bparts_in buffers.");
  }
718
719
720

  /* Receive the particle buffers. */
  if (p->nr_parts_in > 0) {
721
722
723
724
725
    if (MPI_Irecv(p->parts_in, p->nr_parts_in, part_mpi_type, p->nodeID,
                  p->nodeID * proxy_tag_shift + proxy_tag_parts, MPI_COMM_WORLD,
                  &p->req_parts_in) != MPI_SUCCESS ||
        MPI_Irecv(p->xparts_in, p->nr_parts_in, xpart_mpi_type, p->nodeID,
                  p->nodeID * proxy_tag_shift + proxy_tag_xparts,
726
                  MPI_COMM_WORLD, &p->req_xparts_in) != MPI_SUCCESS)
727
728
729
730
      error("Failed to irecv part data.");
    // message( "irecv particle data (%i) from node %i." , p->nr_parts_in ,
    // p->nodeID ); fflush(stdout);
  }
731
  if (p->nr_gparts_in > 0) {
732
    if (MPI_Irecv(p->gparts_in, p->nr_gparts_in, gpart_mpi_type, p->nodeID,
Pedro Gonnet's avatar
Pedro Gonnet committed
733
                  p->nodeID * proxy_tag_shift + proxy_tag_gparts,
734
735
736
737
738
                  MPI_COMM_WORLD, &p->req_gparts_in) != MPI_SUCCESS)
      error("Failed to irecv gpart data.");
    // message( "irecv gpart data (%i) from node %i." , p->nr_gparts_in ,
    // p->nodeID ); fflush(stdout);
  }
739
740
741
742
743
  if (p->nr_sparts_in > 0) {
    if (MPI_Irecv(p->sparts_in, p->nr_sparts_in, spart_mpi_type, p->nodeID,
                  p->nodeID * proxy_tag_shift + proxy_tag_sparts,
                  MPI_COMM_WORLD, &p->req_sparts_in) != MPI_SUCCESS)
      error("Failed to irecv spart data.");
744
745
746
747
748
749
750
751
752
    // message( "irecv spart data (%i) from node %i." , p->nr_sparts_in ,
    // p->nodeID ); fflush(stdout);
  }
  if (p->nr_bparts_in > 0) {
    if (MPI_Irecv(p->bparts_in, p->nr_bparts_in, bpart_mpi_type, p->nodeID,
                  p->nodeID * proxy_tag_shift + proxy_tag_bparts,
                  MPI_COMM_WORLD, &p->req_bparts_in) != MPI_SUCCESS)
      error("Failed to irecv bpart data.");
    // message( "irecv bpart data (%i) from node %i." , p->nr_bparts_in ,
753
754
    // p->nodeID ); fflush(stdout);
  }
755
756

#else
757
  error("SWIFT was not compiled with MPI support.");
758
#endif
759
}
760
761
762
763
764
765
766
767
768

/**
 * @brief Load parts onto a proxy for exchange.
 *
 * @param p The #proxy.
 * @param parts Pointer to an array of #part to send.
 * @param xparts Pointer to an array of #xpart to send.
 * @param N The number of parts.
 */
Pedro Gonnet's avatar
const.    
Pedro Gonnet committed
769
770
void proxy_parts_load(struct proxy *p, const struct part *parts,
                      const struct xpart *xparts, int N) {
771
772
773
774
775
776

  /* Is there enough space in the buffer? */
  if (p->nr_parts_out + N > p->size_parts_out) {
    do {
      p->size_parts_out *= proxy_buffgrow;
    } while (p->nr_parts_out + N > p->size_parts_out);
Matthieu Schaller's avatar
Matthieu Schaller committed
777
778
    struct part *tp = NULL;
    struct xpart *txp = NULL;
779
780
781
782
    if ((tp = (struct part *)swift_malloc(
             "parts_out", sizeof(struct part) * p->size_parts_out)) == NULL ||
        (txp = (struct xpart *)swift_malloc(
             "xparts_out", sizeof(struct xpart) * p->size_parts_out)) == NULL)
783
784
785
      error("Failed to re-allocate parts_out buffers.");
    memcpy(tp, p->parts_out, sizeof(struct part) * p->nr_parts_out);
    memcpy(txp, p->xparts_out, sizeof(struct xpart) * p->nr_parts_out);
786
787
    swift_free("parts_out", p->parts_out);
    swift_free("xparts_out", p->xparts_out);
788
789
790
791
792
793
794
795
796
797
798
    p->parts_out = tp;
    p->xparts_out = txp;
  }

  /* Copy the parts and xparts data to the buffer. */
  memcpy(&p->parts_out[p->nr_parts_out], parts, sizeof(struct part) * N);
  memcpy(&p->xparts_out[p->nr_parts_out], xparts, sizeof(struct xpart) * N);

  /* Increase the counters. */
  p->nr_parts_out += N;
}
799

800
/**
801
 * @brief Load gparts onto a proxy for exchange.
802
803
804
 *
 * @param p The #proxy.
 * @param gparts Pointer to an array of #gpart to send.
805
 * @param N The number of gparts.
806
 */
Pedro Gonnet's avatar
const.    
Pedro Gonnet committed
807
void proxy_gparts_load(struct proxy *p, const struct gpart *gparts, int N) {
808
809
810
811
812
813
814

  /* Is there enough space in the buffer? */
  if (p->nr_gparts_out + N > p->size_gparts_out) {
    do {
      p->size_gparts_out *= proxy_buffgrow;
    } while (p->nr_gparts_out + N > p->size_gparts_out);
    struct gpart *tp;
815
816
    if ((tp = (struct gpart *)swift_malloc(
             "gparts_out", sizeof(struct gpart) * p->size_gparts_out)) == NULL)
817
818
      error("Failed to re-allocate gparts_out buffers.");
    memcpy(tp, p->gparts_out, sizeof(struct gpart) * p->nr_gparts_out);
819
    swift_free("gparts_out", p->gparts_out);
820
821
822
823
    p->gparts_out = tp;
  }

  /* Copy the parts and xparts data to the buffer. */
Pedro Gonnet's avatar
Pedro Gonnet committed
824
  memcpy(&p->gparts_out[p->nr_gparts_out], gparts, sizeof(struct gpart) * N);
825
826
827
828
829

  /* Increase the counters. */
  p->nr_gparts_out += N;
}

830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
/**
 * @brief Load sparts onto a proxy for exchange.
 *
 * @param p The #proxy.
 * @param sparts Pointer to an array of #spart to send.
 * @param N The number of sparts.
 */
void proxy_sparts_load(struct proxy *p, const struct spart *sparts, int N) {

  /* Is there enough space in the buffer? */
  if (p->nr_sparts_out + N > p->size_sparts_out) {
    do {
      p->size_sparts_out *= proxy_buffgrow;
    } while (p->nr_sparts_out + N > p->size_sparts_out);
    struct spart *tp;
845
846
    if ((tp = (struct spart *)swift_malloc(
             "sparts_out", sizeof(struct spart) * p->size_sparts_out)) == NULL)
847
848
      error("Failed to re-allocate sparts_out buffers.");
    memcpy(tp, p->sparts_out, sizeof(struct spart) * p->nr_sparts_out);
849
    swift_free("sparts_out", p->sparts_out);
850
851
852
853
854
855
856
857
858
859
    p->sparts_out = tp;
  }

  /* Copy the parts and xparts data to the buffer. */
  memcpy(&p->sparts_out[p->nr_sparts_out], sparts, sizeof(struct spart) * N);

  /* Increase the counters. */
  p->nr_sparts_out += N;
}

860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
/**
 * @brief Load bparts onto a proxy for exchange.
 *
 * @param p The #proxy.
 * @param bparts Pointer to an array of #bpart to send.
 * @param N The number of bparts.
 */
void proxy_bparts_load(struct proxy *p, const struct bpart *bparts, int N) {

  /* Is there enough space in the buffer? */
  if (p->nr_bparts_out + N > p->size_bparts_out) {
    do {
      p->size_bparts_out *= proxy_buffgrow;
    } while (p->nr_bparts_out + N > p->size_bparts_out);
    struct bpart *tp;
    if ((tp = (struct bpart *)swift_malloc(
             "bparts_out", sizeof(struct bpart) * p->size_bparts_out)) == NULL)
      error("Failed to re-allocate bparts_out buffers.");
    memcpy(tp, p->bparts_out, sizeof(struct bpart) * p->nr_bparts_out);
    swift_free("bparts_out", p->bparts_out);
    p->bparts_out = tp;
  }

  /* Copy the parts and xparts data to the buffer. */
  memcpy(&p->bparts_out[p->nr_bparts_out], bparts, sizeof(struct bpart) * N);

  /* Increase the counters. */
  p->nr_bparts_out += N;
}

890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
/**
 * @brief Frees the memory allocated for the particle proxies and sets their
 * size back to the initial state.
 *
 * @param p The #proxy.
 */
void proxy_free_particle_buffers(struct proxy *p) {

  if (p->size_parts_out > proxy_buffinit) {
    swift_free("parts_out", p->parts_out);
    p->size_parts_out = proxy_buffinit;
    if ((p->parts_out = (struct part *)swift_malloc(
             "parts_out", sizeof(struct part) * p->size_parts_out)) == NULL)
      error("Failed to allocate parts_out buffers.");
    if ((p->xparts_out = (struct xpart *)swift_malloc(
             "xparts_out", sizeof(struct xpart) * p->size_parts_out)) == NULL)
      error("Failed to allocate xparts_out buffers.");
  }
  if (p->size_parts_in > proxy_buffinit) {
    swift_free("parts_in", p->parts_in);
    p->size_parts_in = proxy_buffinit;
    if ((p->parts_in = (struct part *)swift_malloc(
             "parts_in", sizeof(struct part) * p->size_parts_in)) == NULL)
      error("Failed to allocate parts_in buffers.");
    if ((p->xparts_in = (struct xpart *)swift_malloc(
             "xparts_in", sizeof(struct xpart) * p->size_parts_in)) == NULL)
      error("Failed to allocate xparts_in buffers.");
  }

  if (p->size_gparts_out > proxy_buffinit) {
    swift_free("gparts_out", p->gparts_out);
    p->size_gparts_out = proxy_buffinit;
    if ((p->gparts_out = (struct gpart *)swift_malloc(
             "gparts_out", sizeof(struct gpart) * p->size_gparts_out)) == NULL)
      error("Failed to allocate gparts_out buffers.");
  }
  if (p->size_gparts_in > proxy_buffinit) {
    swift_free("gparts_in", p->gparts_in);
    p->size_gparts_in = proxy_buffinit;
    if ((p->gparts_in = (struct gpart *)swift_malloc(
             "gparts_in", sizeof(struct gpart) * p->size_gparts_in)) == NULL)
      error("Failed to allocate gparts_in buffers.");
  }

  if (p->size_sparts_out > proxy_buffinit) {
    swift_free("sparts_out", p->sparts_out);
    p->size_sparts_out = proxy_buffinit;
    if ((p->sparts_out = (struct spart *)swift_malloc(
             "sparts_out", sizeof(struct spart) * p->size_sparts_out)) == NULL)
      error("Failed to allocate sparts_out buffers.");
  }
  if (p->size_sparts_in > proxy_buffinit) {
    swift_free("sparts_in", p->sparts_in);
    p->size_sparts_in = proxy_buffinit;
    if ((p->sparts_in = (struct spart *)swift_malloc(
             "sparts_in", sizeof(struct spart) * p->size_sparts_in)) == NULL)
      error("Failed to allocate sparts_in buffers.");
  }

  if (p->size_bparts_out > proxy_buffinit) {
    swift_free("bparts_out", p->bparts_out);
    p->size_bparts_out = proxy_buffinit;
    if ((p->bparts_out = (struct bpart *)swift_malloc(
             "bparts_out", sizeof(struct bpart) * p->size_bparts_out)) == NULL)
      error("Failed to allocate bparts_out buffers.");
  }
  if (p->size_bparts_in > proxy_buffinit) {
    swift_free("bparts_in", p->bparts_in);
    p->size_bparts_in = proxy_buffinit;
    if ((p->bparts_in = (struct bpart *)swift_malloc(
             "bparts_in", sizeof(struct bpart) * p->size_bparts_in)) == NULL)
      error("Failed to allocate bparts_in buffers.");
  }
}

965
966
967
968
/**
 * @brief Initialize the given proxy.
 *
 * @param p The #proxy.
969
 * @param mynodeID The node this proxy is running on.
970
971
 * @param nodeID The node with which this proxy will communicate.
 */
972
973
974
975
976
977
978
979
980
void proxy_init(struct proxy *p, int mynodeID, int nodeID) {

  /* Set the nodeID. */
  p->mynodeID = mynodeID;
  p->nodeID = nodeID;

  /* Allocate the cell send and receive buffers, if needed. */
  if (p->cells_in == NULL) {
    p->size_cells_in = proxy_buffinit;
981
982
    if ((p->cells_in = (struct cell **)swift_malloc(
             "cells_in", sizeof(void *) * p->size_cells_in)) == NULL)
983
      error("Failed to allocate cells_in buffer.");
984
985
    if ((p->cells_in_type = (int *)swift_malloc(
             "cells_in_type", sizeof(int) * p->size_cells_in)) == NULL)
986
      error("Failed to allocate cells_in_type buffer.");
987
988
989
990
  }
  p->nr_cells_in = 0;
  if (p->cells_out == NULL) {
    p->size_cells_out = proxy_buffinit;
991
992
    if ((p->cells_out = (struct cell **)swift_malloc(
             "cells_out", sizeof(void *) * p->size_cells_out)) == NULL)
993
      error("Failed to allocate cells_out buffer.");
994
995
    if ((p->cells_out_type = (int *)swift_malloc(
             "cells_out_type", sizeof(int) * p->size_cells_out)) == NULL)
996
      error("Failed to allocate cells_out_type buffer.");
997
998
999
1000
1001
1002
  }
  p->nr_cells_out = 0;

  /* Allocate the part send and receive buffers, if needed. */
  if (p->parts_in == NULL) {
    p->size_parts_in = proxy_buffinit;
1003
1004
1005
1006
    if ((p->parts_in = (struct part *)swift_malloc(
             "parts_in", sizeof(struct part) * p->size_parts_in)) == NULL ||
        (p->xparts_in = (struct xpart *)swift_malloc(
             "xparts_in", sizeof(struct xpart) * p->size_parts_in)) == NULL)
1007
1008
1009
1010
1011
      error("Failed to allocate parts_in buffers.");
  }
  p->nr_parts_in = 0;
  if (p->parts_out == NULL) {
    p->size_parts_out = proxy_buffinit;
1012
1013
1014
1015
    if ((p->parts_out = (struct part *)swift_malloc(
             "parts_out", sizeof(struct part) * p->size_parts_out)) == NULL ||
        (p->xparts_out = (struct xpart *)swift_malloc(
             "xparts_out", sizeof(struct xpart) * p->size_parts_out)) == NULL)
1016
1017
1018
      error("Failed to allocate parts_out buffers.");
  }
  p->nr_parts_out = 0;
1019
1020
1021
1022

  /* Allocate the gpart send and receive buffers, if needed. */
  if (p->gparts_in == NULL) {
    p->size_gparts_in = proxy_buffinit;
1023
1024
    if ((p->gparts_in = (struct gpart *)swift_malloc(
             "gparts_in", sizeof(struct gpart) * p->size_gparts_in)) == NULL)
1025
1026
1027
1028
1029
      error("Failed to allocate gparts_in buffers.");
  }
  p->nr_gparts_in = 0;
  if (p->gparts_out == NULL) {
    p->size_gparts_out = proxy_buffinit;
1030
1031
    if ((p->gparts_out = (struct gpart *)swift_malloc(
             "gparts_out", sizeof(struct gpart) * p->size_gparts_out)) == NULL)
1032
1033
1034
      error("Failed to allocate gparts_out buffers.");
  }
  p->nr_gparts_out = 0;
1035
1036
1037
1038

  /* Allocate the spart send and receive buffers, if needed. */
  if (p->sparts_in == NULL) {
    p->size_sparts_in = proxy_buffinit;
1039
1040
    if ((p->sparts_in = (struct spart *)swift_malloc(
             "sparts_in", sizeof(struct spart) * p->size_sparts_in)) == NULL)
1041
1042
1043
1044
1045
      error("Failed to allocate sparts_in buffers.");
  }
  p->nr_sparts_in = 0;
  if (p->sparts_out == NULL) {
    p->size_sparts_out = proxy_buffinit;
1046
1047
    if ((p->sparts_out = (struct spart *)swift_malloc(
             "sparts_out", sizeof(struct spart) * p->size_sparts_out)) == NULL)
1048
1049
1050
      error("Failed to allocate sparts_out buffers.");
  }
  p->nr_sparts_out = 0;
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066

  /* Allocate the bpart send and receive buffers, if needed. */
  if (p->bparts_in == NULL) {
    p->size_bparts_in = proxy_buffinit;
    if ((p->bparts_in = (struct bpart *)swift_malloc(
             "bparts_in", sizeof(struct bpart) * p->size_bparts_in)) == NULL)
      error("Failed to allocate bparts_in buffers.");
  }
  p->nr_bparts_in = 0;
  if (p->bparts_out == NULL) {
    p->size_bparts_out = proxy_buffinit;
    if ((p->bparts_out = (struct bpart *)swift_malloc(
             "bparts_out", sizeof(struct bpart) * p->size_bparts_out)) == NULL)
      error("Failed to allocate bparts_out buffers.");
  }
  p->nr_bparts_out = 0;
1067
}
1068

1069
1070
1071
1072
1073
/**
 * @brief Free the memory allocated by a #proxy
 */
void proxy_clean(struct proxy *p) {

1074
1075
1076
1077
  swift_free("cells_in", p->cells_in);
  swift_free("cells_out", p->cells_out);
  swift_free("cells_in_type", p->cells_in_type);
  swift_free("cells_out_type", p->cells_out_type);
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
  swift_free("pcells_in", p->pcells_in);
  swift_free("pcells_out", p->pcells_out);
  swift_free("parts_out", p->parts_out);
  swift_free("xparts_out", p->xparts_out);
  swift_free("gparts_out", p->gparts_out);
  swift_free("sparts_out", p->sparts_out);
  swift_free("bparts_out", p->bparts_out);
  swift_free("parts_in", p->parts_in);
  swift_free("xparts_in", p->xparts_in);
  swift_free("gparts_in", p->gparts_in);
  swift_free("sparts_in", p->sparts_in);
  swift_free("bparts_in", p->bparts_in);
}

1092
1093
1094
1095
1096
/**
 * @brief Registers the MPI types for the proxy cells.
 */
void proxy_create_mpi_type(void) {

1097
#ifdef WITH_MPI
1098
1099
1100
1101
1102
  if (MPI_Type_contiguous(sizeof(struct pcell) / sizeof(unsigned char),
                          MPI_BYTE, &pcell_mpi_type) != MPI_SUCCESS ||
      MPI_Type_commit(&pcell_mpi_type) != MPI_SUCCESS) {
    error("Failed to create MPI type for parts.");
  }
1103
1104
1105
#else
  error("SWIFT was not compiled with MPI support.");
#endif
1106
}
1107
1108
1109
1110
1111
1112
1113
1114

void proxy_free_mpi_type(void) {
#ifdef WITH_MPI
  MPI_Type_free(&pcell_mpi_type);
#else
  error("SWIFT was not compiled with MPI support.");
#endif
}