diff --git a/src/Makefile.am b/src/Makefile.am index 0e7fc5f39d48fec89d43656be21106022b3dc2a1..e041246dd3f64c84045387e84f14274ec555cae5 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -29,12 +29,12 @@ include_HEADERS += interpolation.hpp parameters.hpp cosmology.hpp single_field.h include_HEADERS += error.hpp definitions.hpp field.hpp include_HEADERS += parser.hpp inline.hpp particle_type.hpp openmp.hpp include_HEADERS += logfile_writer.h version.hpp cache.hpp csds_array.hpp -include_HEADERS += mapped_tracking_element.hpp mapped_logfile.hpp +include_HEADERS += mapped_tracking_element.hpp # Source files for the reader AM_SOURCES = header.cpp mapped_file.cpp time_array.cpp tools.cpp reader.cpp AM_SOURCES += logfile.cpp index_file.cpp parameters.cpp reader_generate_index.cpp -AM_SOURCES += cosmology.cpp single_field.cpp mapped_logfile.cpp +AM_SOURCES += cosmology.cpp single_field.cpp AM_SOURCES += parser.cpp particle_type.cpp if HAVEPYTHON diff --git a/src/header.cpp b/src/header.cpp index fbaf0decd29fd019e945117347e0885fa55b22c5..d0c27424f2c22d695eab99b6e582aba12e259d4c 100644 --- a/src/header.cpp +++ b/src/header.cpp @@ -31,28 +31,22 @@ #include "reader.hpp" #include "tools.hpp" -/* Name of each offset direction. */ -const char *csds_offset_name[csds_offset_count] = { +const std::string Header::sOffsetName[] = { "Backward", "Forward", "Corrupted", }; -/** - * @brief Print the properties of the header to stdout. - * - * @param h The #header. - */ -void header_print(const struct header *h) { +void Header::Print() const { #ifdef CSDS_DEBUG_CHECKS message("Debug checks enabled."); #endif - message("First Offset: " << h->offset_first_record); - message("Offset direction: " << csds_offset_name[h->offset_direction]); + message("First Offset: " << mOffsetFirstRecord); + message("Offset direction: " << sOffsetName[mOffsetDirection]); for (int type = 0; type < csds_type_count; type++) { message("Masks for particle type " << type << ":"); - for (auto const &field : h->fields[type]) { + for (auto const &field : mFields[type]) { const SingleField &tmp = field.GetField(); /* Do the field */ message(std::setfill(' ') @@ -76,49 +70,31 @@ void header_print(const struct header *h) { } }; -/** - * @brief Update the offset direction in the structure and - * write it to the logfile. - * - * @param h #header file structure. - * @param new_value The new value to write. - * - */ -void header_change_offset_direction(struct header *h, - enum csds_offset_direction new_value) { - h->offset_direction = new_value; +void Header::WriteOffsetDirection(enum csds_offset_direction new_value, + LogFile &log) { + mOffsetDirection = new_value; /* Skip file format and version numbers. */ size_t offset = CSDS_STRING_SIZE + 2 * sizeof(int); - h->log->log->WriteData(offset, sizeof(unsigned int), &new_value); + log.WriteData(offset, sizeof(unsigned int), &new_value); } -/** - * @brief read the csds header. - * - * @param h out: The #header. - * @param log The #csds_logfile. - * @param verbose The verbose level - */ -void header_read(struct header *h, LogFile *log, int verbose) { - - /* Set pointer to log. */ - h->log = log; +void Header::Read(LogFile &log, int verbose) { /* Position in the file */ size_t offset = 0; /* read the file format. */ char file_format[CSDS_STRING_SIZE]; - offset = log->log->ReadData(offset, CSDS_STRING_SIZE, &file_format); + offset = log.ReadData(offset, CSDS_STRING_SIZE, &file_format); if (strcmp(file_format, CSDS_FORMAT_STRING)) csds_error("Wrong file format: " << file_format); /* Read the major version number. */ - offset = log->log->ReadData(offset, sizeof(int), &h->major_version); + offset = log.ReadData(offset, sizeof(int), &mMajorVersion); /* Read the minor version number. */ - offset = log->log->ReadData(offset, sizeof(int), &h->minor_version); + offset = log.ReadData(offset, sizeof(int), &mMinorVersion); /* Check the mask size */ if (sizeof(mask_type) != CSDS_MASK_SIZE) @@ -126,23 +102,21 @@ void header_read(struct header *h, LogFile *log, int verbose) { << sizeof(mask_type) << " != " << CSDS_MASK_SIZE << ")"); if (verbose > 0) - message("File version " << h->major_version << "." << h->minor_version); + message("File version " << mMajorVersion << "." << mMinorVersion); /* Read the offset directions. */ - offset = log->log->ReadData(offset, sizeof(int), &h->offset_direction); + offset = log.ReadData(offset, sizeof(int), &mOffsetDirection); - if (!header_is_forward(h) && !header_is_backward(h) && - !header_is_corrupted(h)) - csds_error("Wrong offset value in the header:" << h->offset_direction); + if (!OffsetsAreForward() && !OffsetsAreBackward() && !OffsetsAreCorrupted()) + csds_error("Wrong offset value in the header:" << mOffsetDirection); /* Read offset to first record. */ - h->offset_first_record = 0; - offset = - log->log->ReadData(offset, CSDS_OFFSET_SIZE, &h->offset_first_record); + mOffsetFirstRecord = 0; + offset = log.ReadData(offset, CSDS_OFFSET_SIZE, &mOffsetFirstRecord); /* Read the size of the strings. */ unsigned int string_length = 0; - offset = log->log->ReadData(offset, sizeof(unsigned int), &string_length); + offset = log.ReadData(offset, sizeof(unsigned int), &string_length); /* Check if value defined in this file is large enough. */ if (CSDS_STRING_SIZE < string_length) { @@ -151,7 +125,7 @@ void header_read(struct header *h, LogFile *log, int verbose) { /* Read the number of masks. */ unsigned int masks_count = 0; - offset = log->log->ReadData(offset, sizeof(unsigned int), &masks_count); + offset = log.ReadData(offset, sizeof(unsigned int), &masks_count); /* Allocate the masks memory. */ std::vector<struct mask_data> masks(masks_count); @@ -159,13 +133,13 @@ void header_read(struct header *h, LogFile *log, int verbose) { /* Loop over all masks. */ for (unsigned int i = 0; i < masks_count; i++) { /* Read the mask name. */ - offset = log->log->ReadData(offset, string_length, masks[i].name); + offset = log.ReadData(offset, string_length, masks[i].name); /* Set the mask value. */ masks[i].mask = 1 << i; /* Read the mask data size. */ - offset = log->log->ReadData(offset, sizeof(unsigned int), &masks[i].size); + offset = log.ReadData(offset, sizeof(unsigned int), &masks[i].size); /* Print the information. */ if (verbose > 1) { @@ -211,7 +185,7 @@ void header_read(struct header *h, LogFile *log, int verbose) { /* Read the number of fields per particle */ int number_fields[csds_type_count]; - offset = log->log->ReadData(offset, sizeof(number_fields), number_fields); + offset = log.ReadData(offset, sizeof(number_fields), number_fields); /* Read the order of the fields */ for (int type = 0; type < csds_type_count; type++) { @@ -222,35 +196,33 @@ void header_read(struct header *h, LogFile *log, int verbose) { /* Allocate and read the order */ size_t size = number_fields[type] * sizeof(int); std::vector<int> order(number_fields[type]); - offset = log->log->ReadData(offset, size, order.data()); + offset = log.ReadData(offset, size, order.data()); /* Set the special flag */ - h->fields[type].emplace_back(CSDS_SPECIAL_FLAGS_NAME, masks); + mFields[type].emplace_back(CSDS_SPECIAL_FLAGS_NAME, masks); /* Set the fields */ for (int k = 0; k < number_fields[type]; k++) { size_t index = order[k]; - h->fields[type].emplace_back(masks[index].name, masks); + mFields[type].emplace_back(masks[index].name, masks); } } /* Check the logfile header's size. */ - if (offset != h->offset_first_record) { - csds_error("Wrong header size (in header " << h->offset_first_record + if (offset != mOffsetFirstRecord) { + csds_error("Wrong header size (in header " << mOffsetFirstRecord << ", current" << offset << ")"); } + + /* Ensures that the first offset is a timestep */ + struct record_header header; + log.ReadRecordHeader(mOffsetFirstRecord, header); + + if (header.mask != CSDS_TIMESTAMP_MASK) + csds_error("Log file should begin by timestep."); }; -/** - * @brief Count number of bits in a given mask (without the record header). - * - * @param h #header file structure. - * @param mask Mask to compute. - * - * @return number of bits in mask. - */ -size_t header_get_record_size_from_mask(const struct header *h, - mask_type mask) { +size_t Header::GetRecordSizeFromMask(mask_type mask) const { size_t count = 0; /* Do the time stamp */ @@ -261,7 +233,7 @@ size_t header_get_record_size_from_mask(const struct header *h, /* Loop over each masks. */ for (int part_type = 0; part_type < csds_type_count; part_type++) { - for (auto const &field : h->fields[part_type]) { + for (auto const &field : mFields[part_type]) { if (mask & field.GetMask()) { count += field.GetFieldSize(); @@ -283,20 +255,10 @@ size_t header_get_record_size_from_mask(const struct header *h, return count; } -/** - * @brief Find the #field_information from the field name. - * - * @param h The #header - * @param name The name of the field. - * @param part_type The particle type. - * - * @return The first field found. - */ -Field const &header_get_field_from_name(const struct header *h, - const std::string name, - enum part_type part_type) { +Field const &Header::GetFieldFromName(const std::string name, + enum part_type part_type) const { - for (auto const &field : h->fields[part_type]) { + for (auto const &field : mFields[part_type]) { if (name.compare(field.GetName()) == 0) { return field; @@ -307,25 +269,3 @@ Field const &header_get_field_from_name(const struct header *h, csds_error("The logfile does not contain the field " << name << " for particle type " << part_type); } - -/** - * @brief get offset of first time record - * - * @param h file #header - * @return offset of first time record - * - */ -size_t header_get_first_record(const struct header *h) { - - /* Initialize a few variables. */ - size_t offset = h->offset_first_record; - MappedLogFile &map = *h->log->log; - - struct record_header header; - map.ReadRecordHeader(offset, header); - - if (header.mask != CSDS_TIMESTAMP_MASK) - csds_error("Log file should begin by timestep."); - - return h->offset_first_record; -} diff --git a/src/header.hpp b/src/header.hpp index ffaedea424260c487cd79534b394826af821ffd5..542b68fcc1981585320332d8e48081d0960f1fa8 100644 --- a/src/header.hpp +++ b/src/header.hpp @@ -28,75 +28,93 @@ #include "particle_type.hpp" #include "tools.hpp" -/** - * @brief Names of the offset directions. - */ -extern const char *csds_offset_name[]; +/* Forward declaration */ +class LogFile; + +/* Quick typedef as a shortcut in this file */ +typedef std::array<std::vector<Field>, csds_type_count> FieldArray; /** * @brief This structure contains everything from the file header. - * - * This structure is initialized by #header_read and need to be freed - * with #header_free. - * - * The information contained by the header can be easily access with - * the functions #header_get_record_size_from_mask and #header_get_field_index. - * - * The only function that modify the file is #header_change_offset_direction. + * This structure is initialized by #Read. */ -struct header { - /* Dump's major version. */ - int major_version; +class Header { + public: + /** @brief Print the header. */ + void Print() const; + + /** @brief Read the header from the #LogFile + * @param log The #LogFile. + * @param verbose The verbosity level + */ + void Read(LogFile &log, int verbose); + + /** @brief Compute the size of a record from its mask + * @param mask The mask to use. + */ + size_t GetRecordSizeFromMask(mask_type mask) const; + + /** @brief Write the new offset direction into the logfile. + * @param new_value The new value for the direction. + * @param log The #LogFile. + */ + void WriteOffsetDirection(enum csds_offset_direction new_value, LogFile &log); + + /** @brief Get a field from its name and the particle type. + * @param name The name of the field. + * @param part_type The type of particle. + */ + Field const &GetFieldFromName(const std::string name, + enum part_type part_type) const; + + /** @brief Check if the offsets are forward. */ + INLINE bool OffsetsAreForward() const { + return mOffsetDirection == csds_offset_forward; + } + + /** @brief Check if the offset are backward. */ + INLINE bool OffsetsAreBackward() const { + return mOffsetDirection == csds_offset_backward; + } + + /** @brief Check if the offset are corrupted. */ + INLINE bool OffsetsAreCorrupted() const { + return mOffsetDirection == csds_offset_corrupted; + } + + /** @brief Returns the array of all the fields defined in the header */ + FieldArray const &GetFields() const { return mFields; } - /* Dump's minor version. */ - int minor_version; + /** + * @brief Get the offset of first time record + */ + size_t GetOffsetFirstRecord() const { return mOffsetFirstRecord; } + + int GetMajorVersion() const { return mMajorVersion; } + int GetMinorVersion() const { return mMinorVersion; } + enum csds_offset_direction GetOffsetDirection() const { + return mOffsetDirection; + } + + protected: + /* The major version. */ + int mMajorVersion; + + /* The minor version. */ + int mMinorVersion; /* Offset of the first record. */ - size_t offset_first_record; + size_t mOffsetFirstRecord; /* Direction of the offset in the records. */ - enum csds_offset_direction offset_direction; - - /* The corresponding log. */ - // TODO Remove this dependency - LogFile *log; + enum csds_offset_direction mOffsetDirection; /* The fields for each particle type in the correct order. By construction, the special flag is the first element in each array. */ - std::vector<Field> fields[csds_type_count]; -}; - -void header_print(const struct header *h); -void header_read(struct header *h, LogFile *log, int verbose); -size_t header_get_record_size_from_mask(const struct header *h, mask_type mask); -void header_change_offset_direction(struct header *h, - enum csds_offset_direction new_value); -Field const &header_get_field_from_name(const struct header *h, - const std::string name, - enum part_type part_type); -size_t header_get_first_record(const struct header *h); -/** - * @brief Check if the offset are forward. - * @param h The #header. - */ -INLINE static int header_is_forward(const struct header *h) { - return h->offset_direction == csds_offset_forward; -} - -/** - * @brief Check if the offset are backward. - * @param h The #header. - */ -INLINE static int header_is_backward(const struct header *h) { - return h->offset_direction == csds_offset_backward; -} + FieldArray mFields; -/** - * @brief Check if the offset are corrupted. - * @param h The #header. - */ -INLINE static int header_is_corrupted(const struct header *h) { - return h->offset_direction == csds_offset_corrupted; -} + /* The name corresponding to the different offset directions */ + static const std::string sOffsetName[csds_offset_count]; +}; #endif // CSDS_HEADER_H diff --git a/src/logfile.cpp b/src/logfile.cpp index fc552af5a607a8edf03c5ed7110a420eabb770f6..85b5124bb806dc53a3972859c769b090dd057869 100644 --- a/src/logfile.cpp +++ b/src/logfile.cpp @@ -33,7 +33,7 @@ void LogFile::PopulateTimeArray(const string &basename, int verbose) { string filename = basename + "_0000.index"; /* Try restoring the time array */ - if (this->times.Load(filename, verbose)) { + if (mTimes.Load(filename, verbose)) { return; } @@ -42,19 +42,19 @@ void LogFile::PopulateTimeArray(const string &basename, int verbose) { } /* get file size. */ - size_t file_size = this->log->GetFileSize(); + size_t file_size = GetFileSize(); /* get first timestamp. */ - size_t offset = header_get_first_record(&this->header); + size_t offset = mHeader.GetOffsetFirstRecord(); while (offset < file_size) { /* read current time record and store it. */ size_t tmp_offset = offset; struct time_record time; - this->log->ReadTimeRecord(time, tmp_offset); - this->times.Append(time); + ReadTimeRecord(time, tmp_offset); + mTimes.Append(time); /* get next record. */ - bool next_present = this->log->GetNextRecord(this->header, offset); + bool next_present = GetNextRecord(offset); if (!next_present) break; } } @@ -67,23 +67,20 @@ void LogFile::PopulateTimeArray(const string &basename, int verbose) { * @param verbose The verbose level. * @param only_header Read only the header. */ -LogFile::LogFile(const string basename, int only_header, int verbose) { +LogFile::LogFile(const string basename, bool only_header, int verbose) + : MappedFile(basename + ".dump", /* read_only */ true, + /* track_mmap */ true) { /* Generate the logfile filename */ string logfile_name = basename + ".dump"; - /* Open file, map it and get its size. */ - if (verbose > 1) message("Mapping the log file."); - this->log = new MappedLogFile(logfile_name, /* read_only */ true, - /* track_mmap */ true); - /* Read the header. */ if (verbose > 1) message("Reading the header."); - header_read(&this->header, this, verbose); + mHeader.Read(*this, verbose); /* Print the header. */ if (verbose > 0) { - header_print(&this->header); + mHeader.Print(); } /* No need to continue if only the @@ -91,12 +88,12 @@ LogFile::LogFile(const string basename, int only_header, int verbose) { if (only_header) return; /* Check if the offset are corrupted. */ - if (header_is_corrupted(&this->header)) { + if (mHeader.OffsetsAreCorrupted()) { csds_error("The offsets have been corrupted."); } /* Reverse the offsets direction. */ - if (header_is_backward(&this->header)) { + if (mHeader.OffsetsAreBackward()) { ReverseOffset(logfile_name, verbose); } @@ -106,31 +103,21 @@ LogFile::LogFile(const string basename, int only_header, int verbose) { /* Print the time array. */ if (verbose > 0) { - this->times.Print(); + mTimes.Print(); } } -/** - * @brief Free the allocated memory and unmap the file. - * - * @param log The #csds_logfile. - */ -LogFile::~LogFile() { delete this->log; } - /** * @brief debugging function checking the offset and the mask of all the * records. * * Compare the mask with the one pointed by the header. * if the record is a particle, check the id too. - * - * @param log The #csds_logfile * @param verbose The verbose level */ -void csds_logfile_check_record_consistency(ATTR_UNUSED LogFile &log, - ATTR_UNUSED int verbose) { +void LogFile::CheckRecordConsistency(ATTR_UNUSED int verbose) { #ifdef CSDS_DEBUG_CHECKS - struct header *header = &log.header; + const Header &header = GetHeader(); if (verbose > 0) { message("Check record's headers..."); @@ -142,12 +129,12 @@ void csds_logfile_check_record_consistency(ATTR_UNUSED LogFile &log, const high_resolution_clock::time_point init = high_resolution_clock::now(); /* check that the record offset points to another record. */ - for (size_t offset_debug = header->offset_first_record; - offset_debug < log.log->GetFileSize(); - offset_debug = tools_check_record_consistency(&log, offset_debug)) { + for (size_t offset_debug = header.GetOffsetFirstRecord(); + offset_debug < GetFileSize(); + offset_debug = CheckCurrentRecordConsistency(offset_debug)) { /* Check if we should update the progress bar. */ - float current = 100 * ((float)offset_debug) / log.log->GetFileSize(); + float current = 100 * ((float)offset_debug) / GetFileSize(); if (verbose > 0 && current > next_percentage) { /* Print the bar */ tools_print_progress(current, init, "Checking offsets"); @@ -178,23 +165,22 @@ void csds_logfile_check_record_consistency(ATTR_UNUSED LogFile &log, void LogFile::ReverseOffset(std::string filename, int verbose) { /* Close and reopen the file in write mode. */ - delete this->log; - this->log = - new MappedLogFile(filename, /* read_only */ 0, /* track_mmap */ 0); + Close(); + Open(filename, /* read_only */ false, /* track_mmap */ false); /* Check if the offsets need to be reversed. */ - if (!header_is_backward(&header)) { + if (!mHeader.OffsetsAreBackward()) { csds_error("The offsets are already reversed."); } #ifdef CSDS_DEBUG_CHECKS - csds_logfile_check_record_consistency(*this, verbose); + CheckRecordConsistency(verbose); #endif message("WARNING: Modifying the logfile, do not kill the job!"); /* Set the offset direction to a corrupted status. */ - header_change_offset_direction(&header, csds_offset_corrupted); + mHeader.WriteOffsetDirection(csds_offset_corrupted, *this); if (verbose > 0) { message("Reversing offsets..."); @@ -206,11 +192,10 @@ void LogFile::ReverseOffset(std::string filename, int verbose) { const high_resolution_clock::time_point init = high_resolution_clock::now(); /* reverse the record's offset. */ - for (size_t offset = header.offset_first_record; - offset < this->log->GetFileSize(); - offset = this->log->ReverseOffset(header, offset)) { + for (size_t offset = mHeader.GetOffsetFirstRecord(); offset < GetFileSize(); + offset = ReverseCurrentOffset(offset)) { /* Check if we should update the progress. */ - float current = 100 * ((float)offset) / this->log->GetFileSize(); + float current = 100 * ((float)offset) / GetFileSize(); if (verbose > 0 && current > next_percentage) { /* Print the remaining time */ @@ -229,16 +214,190 @@ void LogFile::ReverseOffset(std::string filename, int verbose) { /* Now that the offset are effectively reversed, can set the direction to forward. */ - header_change_offset_direction(&header, csds_offset_forward); + mHeader.WriteOffsetDirection(csds_offset_forward, *this); message("WARNING: Modification done, you can now safely kill the job."); #ifdef CSDS_DEBUG_CHECKS - csds_logfile_check_record_consistency(*this, verbose); + CheckRecordConsistency(verbose); #endif - /* Close and reopen the file in read mode. */ - delete this->log; - this->log = new MappedLogFile(filename, /* read_only */ 1, - /* track_mmap */ 1); + /* Close and reopen the file in read only mode. */ + Close(); + Open(filename, /* read_only */ true, /* track_mmap */ true); +} + +/** + * @brief get the offset of the next corresponding record. + * @param offset In: initial offset, Out: offset of the next record + * @return Is a next record present? + */ +bool LogFile::GetNextRecord(size_t &offset) { + if (mHeader.OffsetsAreForward()) return GetNextRecordForward(offset); + if (mHeader.OffsetsAreBackward()) + return GetNextRecordBackward(offset); + else + csds_error("Offsets are corrupted."); +} + +/** + * @brief internal function of #tools_get_next_record. Should not be used + * outside. + * + * @param offset (Out) offset of the next record + * + * @return Is a next record present? + */ +bool LogFile::GetNextRecordForward(size_t &offset) { + + /* Read the offset. */ + struct record_header header; + ReadRecordHeader(offset, header); + + if (header.offset == 0) return false; + + /* Set the absolute offset. */ + offset += header.offset; + return true; +} + +/** + * @brief internal function of #tools_get_next_record. Should not be used (very + * slow) + * + * @param h #header structure of the file + * @param offset In: initial offset, Out: offset of the next record + * + * @return Is a next record present? + */ +bool LogFile::GetNextRecordBackward(size_t &offset) { +#ifndef CSDS_DEBUG_CHECKS + csds_error("Should not be used, method too slow"); +#endif + size_t current_offset = offset; + size_t record_header = CSDS_MASK_SIZE + CSDS_OFFSET_SIZE; + + while (current_offset < mMapSize) { + struct record_header header; + ReadRecordHeader(current_offset, header); + + header.offset = current_offset - header.offset - record_header; + if (offset == header.offset) { + offset = current_offset - record_header; + return true; + } + + current_offset += mHeader.GetRecordSizeFromMask(header.mask); + } + + return false; +} + +/** + * @brief switch side offset. + * + * From current record, switch side of the offset of the previous one. + * @param offset position of the record. + * + * @return position after the record. + */ +size_t LogFile::ReverseCurrentOffset(size_t offset) { + const size_t cur_offset = offset; + + /* read mask + offset. */ + struct record_header header; + offset = ReadRecordHeader(offset, header); + + /* write offset of zero (in case it is the last record). */ + const size_t zero = 0; + offset -= CSDS_OFFSET_SIZE; + offset = WriteData(offset, CSDS_OFFSET_SIZE, &zero); + + /* set offset after current record. */ + offset += mHeader.GetRecordSizeFromMask(header.mask); + const size_t after_current_record = offset; + + /* first records do not have a previous partner. */ + if (header.offset == cur_offset) return after_current_record; + if (header.offset > cur_offset) + csds_error("Unexpected offset: header " << header.offset << ", current " + << cur_offset); + + /* modify previous offset. */ + offset = cur_offset - header.offset + CSDS_MASK_SIZE; + offset = WriteData(offset, CSDS_OFFSET_SIZE, &header.offset); + +#ifdef CSDS_DEBUG_CHECKS + struct record_header prev_header; + offset -= CSDS_MASK_SIZE + CSDS_OFFSET_SIZE; + ReadRecordHeader(offset, prev_header); + + /* Check if we are not mixing timestamp and particles */ + if ((prev_header.mask != CSDS_TIMESTAMP_MASK && + header.mask == CSDS_TIMESTAMP_MASK) || + (prev_header.mask == CSDS_TIMESTAMP_MASK && + header.mask != CSDS_TIMESTAMP_MASK)) + csds_error("Unexpected mask: " << header.mask << " got " + << prev_header.mask); + +#endif // CSDS_DEBUG_CHECKS + + return after_current_record; +} + +/** + * @brief debugging function checking the offset and the mask of a record. + * + * Compare the mask with the one pointed by the header. + * if the record is a particle, check the id too. + * + * @param offset position of the record. + * + * @return position after the record. + */ +size_t LogFile::CheckCurrentRecordConsistency(size_t offset) { +#ifndef CSDS_DEBUG_CHECKS + csds_error("Should not check in non debug mode."); +#endif + + const size_t init_offset = offset; + + /* read mask + offset. */ + struct record_header header; + offset = ReadRecordHeader(offset, header); + + /* set offset after current record. */ + offset += mHeader.GetRecordSizeFromMask(header.mask); + const size_t offset_ret = offset; + + /* If something happened, skip the check. */ + if (header.mask & CSDS_SPECIAL_FLAGS_MASK) { + return offset_ret; + } + + /* get absolute offset. */ + if (mHeader.OffsetsAreForward()) + header.offset += init_offset; + else if (mHeader.OffsetsAreBackward()) { + if (init_offset < header.offset) + csds_error("Offset too large for mask: " << header.mask); + header.offset = init_offset - header.offset; + } else { + csds_error("Offset are corrupted."); + } + + if (header.offset == init_offset || header.offset == 0) return offset_ret; + + /* read mask of the pointed record. */ + struct record_header pointed_header; + ReadRecordHeader(header.offset, pointed_header); + + /* check if not mixing timestamp and particles. */ + if ((pointed_header.mask != CSDS_TIMESTAMP_MASK && + header.mask == CSDS_TIMESTAMP_MASK) || + (pointed_header.mask == CSDS_TIMESTAMP_MASK && + header.mask != CSDS_TIMESTAMP_MASK)) + csds_error("Error in the offset for mask: " << header.mask); + + return offset_ret; } diff --git a/src/logfile.hpp b/src/logfile.hpp index 63757b2d84c7d92f8a7e9aa46b40ce349acb1f03..f1351ef9811ada3b0fb3aa972de1810cb12e4f10 100644 --- a/src/logfile.hpp +++ b/src/logfile.hpp @@ -24,30 +24,98 @@ #define CSDS_LOGFILE_H #include "header.hpp" -#include "mapped_logfile.hpp" +#include "mapped_file.hpp" #include "time_array.hpp" /** * @brief This class deals with the log file. */ -class LogFile { +class LogFile : public MappedFile { public: - LogFile(const std::string basename, int only_header, int verbose); - ~LogFile(); + LogFile(const std::string basename, bool only_header, int verbose); - /* Information contained in the file header. */ - struct header header; + /** + * @brief read a time record. + * + * @param time_record (output) The time read. + * @param offset The offset to read + * + * @return The offset after the time record + */ + size_t ReadTimeRecord(struct time_record &time_record, size_t offset) { - /* Information about the time records. */ - TimeArray times; + /* Initialize variables. */ + time_record.int_time = 0; + time_record.time = 0; + time_record.offset = offset; + + /* read record header. */ + struct record_header header; + offset = ReadRecordHeader(offset, header); + + /* check if reading a time record. */ + if (CSDS_TIMESTAMP_MASK != header.mask) csds_error("Not a time record."); + + /* read the record. */ + offset = + ReadData(offset, sizeof(unsigned long long int), &time_record.int_time); + offset = ReadData(offset, sizeof(double), &time_record.time); + + return offset; + } + + /** + * @brief read a record header. + * + * @param offset The offset in the file + * @param header (output) header read from the file. + * + * @return The offset after the record header. + */ + INLINE size_t ReadRecordHeader(size_t offset, record_header &header) { +#ifdef CSDS_MMAP_TRACKING + MMapTrackingElement el(offset); +#endif + + /* read mask */ + header.mask = 0; + memcpy(&header.mask, this->mMap + offset, CSDS_MASK_SIZE); + offset += CSDS_MASK_SIZE; - /* The file. */ - MappedLogFile *log; + /* read offset */ + header.offset = 0; + memcpy(&header.offset, this->mMap + offset, CSDS_OFFSET_SIZE); + offset += CSDS_OFFSET_SIZE; + +#ifdef CSDS_MMAP_TRACKING + /* Write the result into the file */ + if (this->mTracking.use_tracking) { + this->WriteTracking(el); + } +#endif + + return offset; + } + + bool GetNextRecord(size_t &offset); + void SaveTimeArray(std::string filename) const { mTimes.Save(filename); } + TimeArray const &GetTimeArray() const { return mTimes; } + Header const &GetHeader() const { return mHeader; } protected: + bool GetNextRecordBackward(size_t &offset); + bool GetNextRecordForward(size_t &offset); void PopulateTimeArray(const std::string &basename, int verbose); - void ReverseOffset(std::string filename, int verbose); + size_t CheckCurrentRecordConsistency(size_t offset); + size_t ReverseCurrentOffset(size_t offset); + void CheckRecordConsistency(int verbose); + + /* Information contained in the file header. */ + Header mHeader; + + /* Information about the time records. */ + TimeArray mTimes; }; #endif // CSDS_LOGFILE_H diff --git a/src/mapped_file.cpp b/src/mapped_file.cpp index e14e013b4ad2d3792423f79887b55cee277e0581..0bf17a912c8feac1a442643a95bae4af95e4e629 100644 --- a/src/mapped_file.cpp +++ b/src/mapped_file.cpp @@ -64,8 +64,9 @@ string strip_ext(string fname) { * @param track_mmap Should we track the memory reading? * */ -MappedFile::MappedFile(const string filename, bool read_only, - ATTR_UNUSED bool track_mmap) { +void MappedFile::Open(const string filename, bool read_only, + ATTR_UNUSED bool trackl_mmap) { + /* open the file. */ int fd; @@ -121,7 +122,7 @@ MappedFile::MappedFile(const string filename, bool read_only, * * @param map The #mapped_file. */ -MappedFile::~MappedFile() { +void MappedFile::Close() { /* unmap the file. */ if (munmap(this->mMap, this->mMapSize) != 0) { message("Unable to unmap the file :" << strerror(errno)); diff --git a/src/mapped_file.hpp b/src/mapped_file.hpp index 093a254431ccb5eea2e3b0307f8f4db24f622da8..42db161e50cf79b05efa2ebcae4dec9de1423bc8 100644 --- a/src/mapped_file.hpp +++ b/src/mapped_file.hpp @@ -51,8 +51,12 @@ INLINE static bool csds_file_exist(const std::string &filename) { class MappedFile { public: - MappedFile(const std::string filename, bool read_only, bool track_mmap); - ~MappedFile(); + MappedFile(const std::string filename, bool read_only, bool track_mmap) { + Open(filename, read_only, track_mmap); + }; + ~MappedFile() { Close(); }; + void Close(); + void Open(const std::string filename, bool read_only, bool track_mmap); /** * @brief read a single value from a file. diff --git a/src/mapped_logfile.cpp b/src/mapped_logfile.cpp deleted file mode 100644 index 458a04ff25b95c7955ed9eb786754e4b30e2d965..0000000000000000000000000000000000000000 --- a/src/mapped_logfile.cpp +++ /dev/null @@ -1,144 +0,0 @@ -/******************************************************************************* - * This file is part of CSDS. - * Copyright (c) 2021 Loic Hausammann (loic.hausammann@epfl.ch) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - ******************************************************************************/ - -/* This file header */ -#include "mapped_logfile.hpp" - -/** - * @brief get the offset of the next corresponding record. - * - * @param h #header structure of the file - * @param offset In: initial offset, Out: offset of the next record - * - * @return Is a next record present? - */ -bool MappedLogFile::GetNextRecord(const struct header &h, size_t &offset) { - if (header_is_forward(&h)) return GetNextRecordForward(offset); - if (header_is_backward(&h)) - return GetNextRecordBackward(h, offset); - else - csds_error("Offsets are corrupted."); -} - -/** - * @brief internal function of #tools_get_next_record. Should not be used - * outside. - * - * @param offset (Out) offset of the next record - * - * @return Is a next record present? - */ -bool MappedLogFile::GetNextRecordForward(size_t &offset) { - - /* Read the offset. */ - struct record_header header; - ReadRecordHeader(offset, header); - - if (header.offset == 0) return false; - - /* Set the absolute offset. */ - offset += header.offset; - return true; -} - -/** - * @brief internal function of #tools_get_next_record. Should not be used (very - * slow) - * - * @param h #header structure of the file - * @param offset In: initial offset, Out: offset of the next record - * - * @return Is a next record present? - */ -bool MappedLogFile::GetNextRecordBackward(const struct header &h, - size_t &offset) { -#ifndef CSDS_DEBUG_CHECKS - csds_error("Should not be used, method too slow"); -#endif - size_t current_offset = offset; - size_t record_header = CSDS_MASK_SIZE + CSDS_OFFSET_SIZE; - - while (current_offset < mMapSize) { - struct record_header header; - ReadRecordHeader(current_offset, header); - - header.offset = current_offset - header.offset - record_header; - if (offset == header.offset) { - offset = current_offset - record_header; - return true; - } - - current_offset += header_get_record_size_from_mask(&h, header.mask); - } - - return false; -} - -/** - * @brief switch side offset. - * - * From current record, switch side of the offset of the previous one. - * @param h #header structure of the file. - * @param offset position of the record. - * - * @return position after the record. - */ -size_t MappedLogFile::ReverseOffset(const struct header &h, size_t offset) { - const size_t cur_offset = offset; - - /* read mask + offset. */ - struct record_header header; - offset = ReadRecordHeader(offset, header); - - /* write offset of zero (in case it is the last record). */ - const size_t zero = 0; - offset -= CSDS_OFFSET_SIZE; - offset = WriteData(offset, CSDS_OFFSET_SIZE, &zero); - - /* set offset after current record. */ - offset += header_get_record_size_from_mask(&h, header.mask); - const size_t after_current_record = offset; - - /* first records do not have a previous partner. */ - if (header.offset == cur_offset) return after_current_record; - if (header.offset > cur_offset) - csds_error("Unexpected offset: header " << header.offset << ", current " - << cur_offset); - - /* modify previous offset. */ - offset = cur_offset - header.offset + CSDS_MASK_SIZE; - offset = WriteData(offset, CSDS_OFFSET_SIZE, &header.offset); - -#ifdef CSDS_DEBUG_CHECKS - struct record_header prev_header; - offset -= CSDS_MASK_SIZE + CSDS_OFFSET_SIZE; - ReadRecordHeader(offset, prev_header); - - /* Check if we are not mixing timestamp and particles */ - if ((prev_header.mask != CSDS_TIMESTAMP_MASK && - header.mask == CSDS_TIMESTAMP_MASK) || - (prev_header.mask == CSDS_TIMESTAMP_MASK && - header.mask != CSDS_TIMESTAMP_MASK)) - csds_error("Unexpected mask: " << header.mask << " got " - << prev_header.mask); - -#endif // CSDS_DEBUG_CHECKS - - return after_current_record; -} diff --git a/src/mapped_logfile.hpp b/src/mapped_logfile.hpp deleted file mode 100644 index 51ddedd49e5bd03cd50644e1f30a65ea4aeef40d..0000000000000000000000000000000000000000 --- a/src/mapped_logfile.hpp +++ /dev/null @@ -1,106 +0,0 @@ -/******************************************************************************* - * This file is part of CSDS. - * Copyright (c) 2019 Loic Hausammann (loic.hausammann@epfl.ch) - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - * - ******************************************************************************/ -/** - * @file mapped_logfile.hpp - * @brief This file extends the class MappedFile for the logfile - */ -#ifndef MAPPED_LOGFILE_HPP -#define MAPPED_LOGFILE_HPP - -/* Local header */ -#include "mapped_file.hpp" - -// TODO merge with LogFile -class MappedLogFile : public MappedFile { - public: - MappedLogFile(const std::string filename, bool read_only, bool track_mmap) - : MappedFile(filename, read_only, track_mmap){}; - - /** - * @brief read a time record. - * - * @param time_record (output) The time read. - * @param offset The offset to read - * - * @return The offset after the time record - */ - size_t ReadTimeRecord(struct time_record &time_record, size_t offset) { - - /* Initialize variables. */ - time_record.int_time = 0; - time_record.time = 0; - time_record.offset = offset; - - /* read record header. */ - struct record_header header; - offset = ReadRecordHeader(offset, header); - - /* check if reading a time record. */ - if (CSDS_TIMESTAMP_MASK != header.mask) csds_error("Not a time record."); - - /* read the record. */ - offset = - ReadData(offset, sizeof(unsigned long long int), &time_record.int_time); - offset = ReadData(offset, sizeof(double), &time_record.time); - - return offset; - } - - /** - * @brief read a record header. - * - * @param offset The offset in the file - * @param header (output) header read from the file. - * - * @return The offset after the record header. - */ - INLINE size_t ReadRecordHeader(size_t offset, record_header &header) { -#ifdef CSDS_MMAP_TRACKING - MMapTrackingElement el(offset); -#endif - - /* read mask */ - header.mask = 0; - memcpy(&header.mask, this->mMap + offset, CSDS_MASK_SIZE); - offset += CSDS_MASK_SIZE; - - /* read offset */ - header.offset = 0; - memcpy(&header.offset, this->mMap + offset, CSDS_OFFSET_SIZE); - offset += CSDS_OFFSET_SIZE; - -#ifdef CSDS_MMAP_TRACKING - /* Write the result into the file */ - if (this->mTracking.use_tracking) { - this->WriteTracking(el); - } -#endif - - return offset; - } - - bool GetNextRecord(const struct header &h, size_t &offset); - size_t ReverseOffset(const struct header &h, size_t offset); - - protected: - bool GetNextRecordBackward(const struct header &h, size_t &offset); - bool GetNextRecordForward(size_t &offset); -}; - -#endif // MAPPED_LOGFILE_HPP diff --git a/src/particle.hpp b/src/particle.hpp index 46badfd85b4ee1f4d49221318b5a0350752ba95c..7bde238cd184b83b53a7022e756ce7f2e0978fbe 100644 --- a/src/particle.hpp +++ b/src/particle.hpp @@ -24,9 +24,8 @@ /* Include the other local files. */ #include "definitions.hpp" -#include "header.hpp" #include "interpolation.hpp" -#include "mapped_logfile.hpp" +#include "logfile.hpp" #include "parameters.hpp" #include "time_array.hpp" @@ -52,7 +51,7 @@ INLINE static size_t csds_particle_read_field(size_t offset, void *output, const int derivative, struct record_header &header, const std::vector<Field> &fields, - MappedLogFile &log_map) { + LogFile &log_map) { /* Read the record's mask. */ offset = log_map.ReadRecordHeader(offset, header); @@ -138,7 +137,7 @@ INLINE static enum csds_special_flags csds_unpack_flags_and_data( */ INLINE static enum csds_special_flags csds_particle_read_special_flag( size_t offset, struct record_header &header, int *data, int *part_type, - MappedLogFile &map) { + LogFile &map) { /* Read the record's mask. */ offset = map.ReadRecordHeader(offset, header); diff --git a/src/particle_type.hpp b/src/particle_type.hpp index e9a3572d0c97b43cae6307fd355c0c47647d8217..3ef6889a21c3ec220693407f799b654956619e48 100644 --- a/src/particle_type.hpp +++ b/src/particle_type.hpp @@ -24,6 +24,7 @@ * * Note we use the historical values from Gadget for these fields. */ +// TODO Should move within a structure enum part_type { csds_type_gas = 0, csds_type_dark_matter = 1, diff --git a/src/python_wrapper.cpp b/src/python_wrapper.cpp index f0c36c797f77cdc99d07e32c8a1778357d68fd1e..8aabf662bbfc065a2ec1ddbb420817343157a90d 100644 --- a/src/python_wrapper.cpp +++ b/src/python_wrapper.cpp @@ -405,7 +405,7 @@ bp::list PyReader::GetListFields(bp::object &part_type) { GetParticleTypesFromObject(part_type); /* Inputs are done, now get the fields */ - const struct header *h = &mReader->mLog->header; + const Header &h = mReader->mLog->GetHeader(); /* Find if the fields are present in all the types. */ std::vector<int> field_present(field_enum_count, 1); @@ -418,7 +418,7 @@ bp::list PyReader::GetListFields(bp::object &part_type) { /* Search among all the fields of the particle type */ int found = 0; - for (auto const &field : h->fields[type]) { + for (auto const &field : h.GetFields()[type]) { if (field.GetField() == (field_enum)i) { found = 1; break; @@ -459,6 +459,7 @@ bp::list PyReader::GetListFields(bp::object &part_type) { * @return The list of fields to read as enum. */ std::vector<Field> PyReader::GetFieldsFromNames(bp::list &fields) { + const Header &header = mReader->mLog->GetHeader(); /* Get the field enum from the header. */ std::vector<Field> out; @@ -474,7 +475,7 @@ std::vector<Field> PyReader::GetFieldsFromNames(bp::list &fields) { /* Get the field */ bool found = false; for (int type = 0; type < csds_type_count; type++) { - for (auto const &field : mReader->mLog->header.fields[type]) { + for (auto const &field : header.GetFields()[type]) { if (field.GetName().compare(get_string()) == 0) { out.emplace_back(field); found = true; diff --git a/src/reader.cpp b/src/reader.cpp index bca92e0f4919b92e1e49c71b976611e1526e4ea4..5b64ef0225835e604a537f10214c1598872c03f8 100644 --- a/src/reader.cpp +++ b/src/reader.cpp @@ -73,7 +73,7 @@ Reader::Reader(const string basename, int verbose, int number_index, /* Save the time array */ string filename = basename + "_0000.index"; - this->mLog->times.Save(filename); + this->mLog->SaveTimeArray(filename); if (verbose > 1) message("Initialization done."); } @@ -168,6 +168,8 @@ void Reader::SetTime(double time) { unsigned int left = 0; unsigned int right = this->mIndex.n_files - 1; + const TimeArray × = mLog->GetTimeArray(); + /* Check if the time is meaningful */ if (this->mIndex.times[left] > time || this->mIndex.times[right] < time) csds_error("The requested time " @@ -211,19 +213,19 @@ void Reader::SetTime(double time) { } /* Get the offset of the time chunk */ - size_t ind = this->mLog->times.GetIndexFromTime(time); + size_t ind = times.GetIndexFromTime(time); /* We need to time record just above the current time */ ind++; /* For the final time, we wish to use the time record just before writing all the particles */ - if (ind == this->mLog->times.Size() - 1) { + if (ind == times.Size() - 1) { ind--; } /* Save the values */ - this->mTime.int_time = this->mLog->times[ind].int_time; - this->mTime.time_offset = this->mLog->times[ind].offset; + this->mTime.int_time = times[ind].int_time; + this->mTime.time_offset = times[ind].offset; } /** @@ -418,9 +420,11 @@ int Reader::ReadParticle(double time, std::vector<CsdsArray> &output, enum part_type type, int *number_jumps) { /* Get global variables */ - const struct header *h = &this->mLog->header; + const Header &h = mLog->GetHeader(); size_t offset_time = this->mTime.time_offset; + const TimeArray × = mLog->GetTimeArray(); + /* Index in the cache */ int64_t cache_index = -1; @@ -443,14 +447,14 @@ int Reader::ReadParticle(double time, std::vector<CsdsArray> &output, while (offset < offset_time) { *number_jumps += 1; /* Read the particle. */ - this->mLog->log->ReadRecordHeader(offset, header); + this->mLog->ReadRecordHeader(offset, header); /* Is the particle removed from the logfile? */ if (header.mask & CSDS_SPECIAL_FLAGS_MASK) { int data = 0; int part_type = 0; enum csds_special_flags flag = csds_particle_read_special_flag( - offset, header, &data, &part_type, *this->mLog->log); + offset, header, &data, &part_type, *this->mLog); #ifdef CSDS_DEBUG_CHECKS if (part_type != type) { @@ -492,8 +496,8 @@ int Reader::ReadParticle(double time, std::vector<CsdsArray> &output, /* Read the field */ csds_particle_read_field(offset, current_output, full_field, - /* derivative */ 0, header, h->fields[type], - *this->mLog->log); + /* derivative */ 0, header, h.GetFields()[type], + *this->mLog); /* Deal with the first derivative. */ int first_found = @@ -503,8 +507,8 @@ int Reader::ReadParticle(double time, std::vector<CsdsArray> &output, if (first_found) { /* Read the first derivative */ csds_particle_read_field(offset, first_deriv, full_field, - /* derivative */ 1, header, h->fields[type], - *this->mLog->log); + /* derivative */ 1, header, h.GetFields()[type], + *this->mLog); } /* Deal with the second derivative. */ @@ -515,24 +519,24 @@ int Reader::ReadParticle(double time, std::vector<CsdsArray> &output, if (second_found) { /* Read the first derivative */ csds_particle_read_field(offset, second_deriv, full_field, - /* derivative */ 2, header, h->fields[type], - *this->mLog->log); + /* derivative */ 2, header, h.GetFields()[type], + *this->mLog); } /* Get the time. */ // TODO reduce search interval - double time_before = this->mLog->times.GetRecordFromOffset(offset).time; + double time_before = times.GetRecordFromOffset(offset).time; /* Get the mask */ - this->mLog->log->ReadRecordHeader(offset_next, header); + this->mLog->ReadRecordHeader(offset_next, header); /* Output after the requested time. */ char output_after[current_field.GetSize()]; /* Read the field */ csds_particle_read_field(offset_next, output_after, full_field, - /* derivative */ 0, header, h->fields[type], - *this->mLog->log); + /* derivative */ 0, header, h.GetFields()[type], + *this->mLog); /* Deal with the first derivative. */ char first_deriv_after[size_first]; @@ -543,8 +547,8 @@ int Reader::ReadParticle(double time, std::vector<CsdsArray> &output, if (first_found) { /* Read the first derivative */ csds_particle_read_field(offset_next, first_deriv_after, full_field, - /*derivative*/ 1, header, h->fields[type], - *this->mLog->log); + /*derivative*/ 1, header, h.GetFields()[type], + *this->mLog); } /* Deal with the second derivative. */ @@ -556,13 +560,13 @@ int Reader::ReadParticle(double time, std::vector<CsdsArray> &output, if (second_found) { /* Read the second derivative */ csds_particle_read_field(offset_next, second_deriv_after, full_field, - /* derivative */ 2, header, h->fields[type], - *this->mLog->log); + /* derivative */ 2, header, h.GetFields()[type], + *this->mLog); } /* Get the time. */ // TODO reduce search interval - double time_after = this->mLog->times.GetRecordFromOffset(offset_next).time; + double time_after = times.GetRecordFromOffset(offset_next).time; /* Deal with the derivatives */ struct csds_reader_field before; @@ -844,7 +848,7 @@ void Reader::ReadParticlesFromIds( * * @return The initial time */ -double Reader::GetTimeBegin() { return this->mLog->times[0].time; } +double Reader::GetTimeBegin() { return this->mLog->GetTimeArray()[0].time; } /** * @brief Get the simulation final time. @@ -852,8 +856,9 @@ double Reader::GetTimeBegin() { return this->mLog->times[0].time; } * @return The final time */ double Reader::GetTimeEnd() { - const size_t ind = this->mLog->times.Size(); - return this->mLog->times[ind - 1].time; + const TimeArray × = mLog->GetTimeArray(); + const size_t ind = times.Size(); + return times[ind - 1].time; } /** @@ -864,12 +869,13 @@ double Reader::GetTimeEnd() { * @return The offset of the timestamp. */ size_t Reader::GetNextOffsetFromTime(double time) { - size_t ind = this->mLog->times.GetIndexFromTime(time); + const TimeArray × = mLog->GetTimeArray(); + size_t ind = times.GetIndexFromTime(time); /* We do not want to have the sentiel */ - if (this->mLog->times.Size() - 2 == ind) { + if (times.Size() - 2 == ind) { ind -= 1; } - return this->mLog->times[ind + 1].offset; + return times[ind + 1].offset; } /** @@ -890,6 +896,7 @@ int Reader::UpdateSingleParticle(size_t index, enum part_type type, double time, void *output) { Cache &cache = this->mCache[type]; + const TimeArray × = mLog->GetTimeArray(); /* Get the time */ double time_before = cache.GetTimeBefore(index); @@ -903,14 +910,14 @@ int Reader::UpdateSingleParticle(size_t index, enum part_type type, double time, /* Find the correct record */ while (offset_next < offset_time) { /* Read the particle. */ - this->mLog->log->ReadRecordHeader(offset_next, header); + this->mLog->ReadRecordHeader(offset_next, header); /* Is the particle removed? */ if (header.mask & CSDS_SPECIAL_FLAGS_MASK) { int data = 0; int part_type = 0; enum csds_special_flags flag = csds_particle_read_special_flag( - offset_next, header, &data, &part_type, *this->mLog->log); + offset_next, header, &data, &part_type, *this->mLog); #ifdef CSDS_DEBUG_CHECKS if (part_type != type) { @@ -933,14 +940,14 @@ int Reader::UpdateSingleParticle(size_t index, enum part_type type, double time, /* Get a few variables for later */ size_t offset_before = offset_next - header.offset; - time_before = this->mLog->times.GetRecordFromOffset(offset_before).time; - time_after = this->mLog->times.GetRecordFromOffset(offset_next).time; + time_before = times.GetRecordFromOffset(offset_before).time; + time_after = times.GetRecordFromOffset(offset_next).time; /* Update the cache */ cache.UpdateTime(index, time_before, time_after, offset_next); /* Get the fields for the next record */ - const struct header &h = this->mLog->header; + const Header &h = this->mLog->GetHeader(); for (int i = 0; i < cache.GetNumberFields(); i++) { const Field &field = cache.GetFieldFromIndex(i); struct csds_reader_field after = cache.GetFieldAfter(index, i); @@ -948,30 +955,30 @@ int Reader::UpdateSingleParticle(size_t index, enum part_type type, double time, /* Read the field */ csds_particle_read_field(offset_before, before.field, field, - /* derivative */ 0, header, h.fields[type], - *this->mLog->log); + /* derivative */ 0, header, h.GetFields()[type], + *this->mLog); csds_particle_read_field(offset_next, after.field, field, - /* derivative */ 0, header, h.fields[type], - *this->mLog->log); + /* derivative */ 0, header, h.GetFields()[type], + *this->mLog); /* Read the first derivative */ if (field.HasFirstDerivative()) { csds_particle_read_field(offset_before, before.first_deriv, field, - /* derivative */ 1, header, h.fields[type], - *this->mLog->log); + /* derivative */ 1, header, + h.GetFields()[type], *this->mLog); csds_particle_read_field(offset_next, after.first_deriv, field, - /* derivative */ 1, header, h.fields[type], - *this->mLog->log); + /* derivative */ 1, header, + h.GetFields()[type], *this->mLog); } /* Read the second derivative */ if (field.HasSecondDerivative()) { csds_particle_read_field(offset_before, before.second_deriv, field, - /* derivative */ 2, header, h.fields[type], - *this->mLog->log); + /* derivative */ 2, header, + h.GetFields()[type], *this->mLog); csds_particle_read_field(offset_next, after.second_deriv, field, - /* derivative */ 2, header, h.fields[type], - *this->mLog->log); + /* derivative */ 2, header, + h.GetFields()[type], *this->mLog); } } } @@ -1094,9 +1101,10 @@ void Reader::UpdateParticlesSingleType( /* Now look for the new particles */ /* Check if we need to update the cache with the previous index file */ + const TimeArray × = mLog->GetTimeArray(); size_t offset_index = - this->mLog->times.GetIndexFromTime(this->mIndex.index_prev->GetTime()); - offset_index = this->mLog->times[offset_index].offset; + times.GetIndexFromTime(this->mIndex.index_prev->GetTime()); + offset_index = times[offset_index].offset; const bool do_previous = this->mCache[type].GetLastTimeOffset() < offset_index; @@ -1211,6 +1219,8 @@ void Reader::UpdateParticles( const chrono::high_resolution_clock::time_point init = chrono::high_resolution_clock::now(); + const TimeArray × = mLog->GetTimeArray(); + /* Check if the cache is enabled */ if (!this->mUseCache) { csds_error("Cannot update the particles without cache"); @@ -1221,8 +1231,7 @@ void Reader::UpdateParticles( if (this->mIndex.times[i] == this->mIndex.index_prev->GetTime()) { if (i == 0) break; - const size_t offset = - this->mLog->times.GetIndexFromTime(this->mIndex.times[i - 1]); + const size_t offset = times.GetIndexFromTime(this->mIndex.times[i - 1]); if (this->mCache[0].GetLastTimeOffset() < offset) { csds_error( "Trying to update the particles over more than one" diff --git a/src/reader_generate_index.cpp b/src/reader_generate_index.cpp index 2adf4ca77c1459c2c12a18aa4932a2f877746ad2..aef538ff2298ffed49ff85c4f53358184ad2e05a 100644 --- a/src/reader_generate_index.cpp +++ b/src/reader_generate_index.cpp @@ -201,7 +201,7 @@ void Reader::WriteIndex(CsdsUnorderedMap ¤t_state, const struct time_record *time, int file_number) { /* Get the filename */ - string filename = this->GetIndexName(file_number); + string filename = GetIndexName(file_number); /* Open file */ std::ofstream f(filename, std::ofstream::out | std::ofstream::binary); @@ -272,34 +272,34 @@ size_t Reader::GetInitialState(CsdsUnorderedMap ¤t_state, const high_resolution_clock::time_point init = high_resolution_clock::now(); /* Get a few variables. */ - LogFile *log = this->mLog; - const struct header *h = &log->header; const int size_record_header = CSDS_MASK_SIZE + CSDS_OFFSET_SIZE; + const TimeArray × = mLog->GetTimeArray(); + const Header &h = mLog->GetHeader(); /* Warn the OS that we will read in a sequential way */ - log->log->AdviceSequentialReading(); + mLog->AdviceSequentialReading(); /* Get the offset after the dump of all the particles and the time information of the first time record. */ - if (log->times.Size() < 2) { + if (times.Size() < 2) { csds_error("The time array is not large enough"); } - const size_t offset_max = log->times[1].offset; + const size_t offset_max = times[1].offset; /* Here we cheat a bit by using the 0. * The index files provide the last position known at a given offset. * For the first index at t=0, the particles are not written yet. * In order to be able to read t=0, we change a bit the behavior * for the first index file. */ - *time_record = log->times[0]; + *time_record = times[0]; /* Get the offset of the first particle record */ - size_t offset_first = h->offset_first_record; + size_t offset_first = h.GetOffsetFirstRecord(); /* Skip the time record */ struct record_header header; - log->log->ReadRecordHeader(offset_first, header); - const int time_size = header_get_record_size_from_mask(h, header.mask); + mLog->ReadRecordHeader(offset_first, header); + const int time_size = h.GetRecordSizeFromMask(header.mask); offset_first += time_size + size_record_header; /* Get the initial state */ @@ -310,14 +310,14 @@ size_t Reader::GetInitialState(CsdsUnorderedMap ¤t_state, int data = 0; enum csds_special_flags flag = csds_particle_read_special_flag( - offset, header, &data, &part_type, *log->log); + offset, header, &data, &part_type, *mLog); if (flag != csds_flag_create) { csds_error("Reading a particle from ICs without the created flag."); } /* Get the mask for the IDs */ const Field &field_id = - header_get_field_from_name(h, "ParticleIDs", (enum part_type)part_type); + h.GetFieldFromName("ParticleIDs", (enum part_type)part_type); /* Get the particle ID */ if (!(field_id.GetField() & header.mask)) { @@ -327,8 +327,8 @@ size_t Reader::GetInitialState(CsdsUnorderedMap ¤t_state, /* Read the particle ID */ int64_t id = 0; csds_particle_read_field(offset, &id, field_id, - /* derivative */ 0, header, h->fields[part_type], - *this->mLog->log); + /* derivative */ 0, header, + h.GetFields()[part_type], *mLog); /* Log the particle */ if (current_state[part_type].count(id) != 0) { @@ -337,11 +337,11 @@ size_t Reader::GetInitialState(CsdsUnorderedMap ¤t_state, current_state[part_type][id] = offset; /* Increment the offset */ - const int record_size = header_get_record_size_from_mask(h, header.mask); + const int record_size = h.GetRecordSizeFromMask(header.mask); offset += record_size + size_record_header; /* Print the progress */ - if (this->mVerbose > 0) { + if (mVerbose > 0) { float percent = (float)(offset - offset_first) / (float)(offset_max - offset_first); percent *= 100; @@ -353,16 +353,16 @@ size_t Reader::GetInitialState(CsdsUnorderedMap ¤t_state, } /* Close progressbar */ - if (this->mVerbose > 0) { + if (mVerbose > 0) { printf("\n"); } /* Print the time */ - if (this->mVerbose > 0 || this->mVerbose == CSDS_VERBOSE_TIMERS) + if (mVerbose > 0 || mVerbose == CSDS_VERBOSE_TIMERS) message("took " << GetDeltaTime(init) << "ms"); /* Go back to normal */ - log->log->AdviceNormalReading(); + mLog->AdviceNormalReading(); return offset_max; } @@ -379,14 +379,11 @@ size_t Reader::GetInitialState(CsdsUnorderedMap ¤t_state, size_t Reader::GetLastOffsetBefore(const struct index_data &data, size_t offset_limit) { - /* Get a the logfile */ - LogFile *log = this->mLog; - size_t current_offset = data.offset; /* Get the full mask */ struct record_header last_header; - log->log->ReadRecordHeader(current_offset, last_header); + mLog->ReadRecordHeader(current_offset, last_header); /* Ensures that a special flag is present in the mask */ last_header.mask |= CSDS_SPECIAL_FLAGS_MASK; @@ -400,7 +397,7 @@ size_t Reader::GetLastOffsetBefore(const struct index_data &data, while (1) { /* Get the mask */ struct record_header cur_header; - log->log->ReadRecordHeader(current_offset, cur_header); + mLog->ReadRecordHeader(current_offset, cur_header); /* update the offset */ current_offset += cur_header.offset; @@ -449,8 +446,7 @@ size_t Reader::UpdateStateToNextIndex(size_t init_offset, CsdsUnorderedMap ¤t_state, struct index_writer *parts_created, struct index_writer *parts_removed) { - LogFile *log = this->mLog; - const struct header *h = &log->header; + const Header &h = mLog->GetHeader(); const int size_record_header = CSDS_MASK_SIZE + CSDS_OFFSET_SIZE; /* Look for all the created / removed particles */ @@ -461,12 +457,12 @@ size_t Reader::UpdateStateToNextIndex(size_t init_offset, const high_resolution_clock::time_point init = high_resolution_clock::now(); /* Warn the OS that we will read in a sequential way */ - log->log->AdviceSequentialReading(); + mLog->AdviceSequentialReading(); while (offset < time_record.offset) { /* Print status */ - if (this->mVerbose > 0) { + if (mVerbose > 0) { step += 1; if (step % 100 == 0) { step = 0; @@ -485,11 +481,11 @@ size_t Reader::UpdateStateToNextIndex(size_t init_offset, /* Get the mask */ struct record_header header; - log->log->ReadRecordHeader(offset, header); + mLog->ReadRecordHeader(offset, header); /* Go to the next record */ const size_t old_offset = offset; - offset += header_get_record_size_from_mask(h, header.mask); + offset += h.GetRecordSizeFromMask(header.mask); offset += size_record_header; /* Check if we have a particle with a flag */ @@ -500,7 +496,7 @@ size_t Reader::UpdateStateToNextIndex(size_t init_offset, /* Get the special flag */ enum csds_special_flags flag = csds_particle_read_special_flag( - old_offset, header, &data, &part_type, *log->log); + old_offset, header, &data, &part_type, *mLog); #ifdef CSDS_DEBUG_CHECKS if (flag == csds_flag_none) { @@ -517,13 +513,13 @@ size_t Reader::UpdateStateToNextIndex(size_t init_offset, /* Get the mask for the IDs */ // TODO create an array outside the loop const Field &field_id = - header_get_field_from_name(h, "ParticleIDs", (enum part_type)part_type); + h.GetFieldFromName("ParticleIDs", (enum part_type)part_type); /* Read the ID */ int64_t id = 0; csds_particle_read_field(old_offset, &id, field_id, - /* derivative */ 0, header, h->fields[part_type], - *this->mLog->log); + /* derivative */ 0, header, + h.GetFields()[part_type], *mLog); /* Add the particle to the arrays */ if (flag == csds_flag_change_type || flag == csds_flag_mpi_exit || @@ -543,15 +539,15 @@ size_t Reader::UpdateStateToNextIndex(size_t init_offset, } /* Go back to normal */ - log->log->AdviceNormalReading(); + mLog->AdviceNormalReading(); /* Cleanup output */ - if (this->mVerbose > 0) { + if (mVerbose > 0) { printf("\n"); } /* Print the time */ - if (this->mVerbose > 0 || this->mVerbose == CSDS_VERBOSE_TIMERS) + if (mVerbose > 0 || mVerbose == CSDS_VERBOSE_TIMERS) message("Finding new/removed particles took " << GetDeltaTime(init) << "ms"); @@ -583,10 +579,10 @@ size_t Reader::UpdateStateToNextIndex(size_t init_offset, }; /* Update the offset */ - cur->second = this->GetLastOffsetBefore(index_data, time_record.offset); + cur->second = GetLastOffsetBefore(index_data, time_record.offset); /* Are we done or should we print something? */ - if (!(this->mVerbose > 0)) continue; + if (!(mVerbose > 0)) continue; /* Update the counters */ local_counter++; @@ -610,10 +606,10 @@ size_t Reader::UpdateStateToNextIndex(size_t init_offset, } /* Cleanup the output */ - if (this->mVerbose > 0) printf("\n"); + if (mVerbose > 0) printf("\n"); /* Print the time */ - if (this->mVerbose > 0 || this->mVerbose == CSDS_VERBOSE_TIMERS) + if (mVerbose > 0 || mVerbose == CSDS_VERBOSE_TIMERS) message("Updating particles took " << GetDeltaTime(init2) << "ms"); return offset; @@ -628,11 +624,10 @@ size_t Reader::UpdateStateToNextIndex(size_t init_offset, */ void Reader::GenerateIndexFiles(int number_index, int current_index) { /* Get a few pointers */ - LogFile *log = this->mLog; - const struct header *h = &log->header; + const Header &h = mLog->GetHeader(); /* Write a quick message */ - if (this->mVerbose > 0) { + if (mVerbose > 0) { message("Generating " << number_index << " index files"); if (current_index) { message("Restarting from index: " << current_index); @@ -645,7 +640,7 @@ void Reader::GenerateIndexFiles(int number_index, int current_index) { } /* Ensure that the offset are in the assumed direction */ - if (!header_is_forward(h)) { + if (!h.OffsetsAreForward()) { csds_error("The offset are not in the expected direction"); } @@ -665,8 +660,9 @@ void Reader::GenerateIndexFiles(int number_index, int current_index) { size_t offset = 0; /* Variables for the time of each index files */ - const double t_min = log->times[0].time; - const double t_max = log->times[log->times.Size() - 1].time; + const TimeArray × = mLog->GetTimeArray(); + const double t_min = times[0].time; + const double t_max = times[times.Size() - 1].time; const double dt = (t_max - t_min) / (number_index - 1); /* Are we restarting? If not allocate and get the initial state */ @@ -680,19 +676,19 @@ void Reader::GenerateIndexFiles(int number_index, int current_index) { /* Get the initial state */ struct time_record time_record; - offset = this->GetInitialState(current_state, &time_record); + offset = GetInitialState(current_state, &time_record); /* Write the first index file */ - this->WriteIndex(current_state, parts_created, parts_removed, &time_record, - /* file_number */ 0); + WriteIndex(current_state, parts_created, parts_removed, &time_record, + /* file_number */ 0); } /* We are restarting => read state from file */ else { /* Get the index file name */ - string filename = this->GetIndexName(current_index - 1); + string filename = GetIndexName(current_index - 1); /* Initialize the index file */ - IndexFile index(filename, this->mVerbose); + IndexFile index(filename, mVerbose); index.MapFile(filename, /* sorted */ 1, this->mVerbose); /* Loop over all the particle types */ @@ -715,12 +711,12 @@ void Reader::GenerateIndexFiles(int number_index, int current_index) { if (current_index == 1) { /* In this case, we need to cheat a bit (see * GetInitialState) */ - offset = log->times[1].offset; + offset = times[1].offset; } else { const double current_approximate_time = t_min + (current_index - 1) * dt; const size_t index_time = - log->times.GetIndexFromTime(current_approximate_time); - struct time_record index_time_record = log->times[index_time]; + times.GetIndexFromTime(current_approximate_time); + struct time_record index_time_record = times[index_time]; offset = index_time_record.offset; /* Check if we are reading the correct file */ @@ -741,22 +737,21 @@ void Reader::GenerateIndexFiles(int number_index, int current_index) { * The index files are only here to speedup the code, * no need to have the exact time. */ const double current_approximate_time = t_min + file_number * dt; - const size_t index_time = - log->times.GetIndexFromTime(current_approximate_time); - struct time_record time_record = log->times[index_time]; + const size_t index_time = times.GetIndexFromTime(current_approximate_time); + struct time_record time_record = times[index_time]; /* Ensure that we really have the final time (rounding error). */ if (file_number == number_index - 1) { - time_record = log->times[log->times.Size() - 1]; + time_record = times[times.Size() - 1]; } /* Update the state until the next index file. */ - offset = this->UpdateStateToNextIndex(offset, time_record, current_state, - parts_created, parts_removed); + offset = UpdateStateToNextIndex(offset, time_record, current_state, + parts_created, parts_removed); /* Write the index file */ - this->WriteIndex(current_state, parts_created, parts_removed, &time_record, - file_number); + WriteIndex(current_state, parts_created, parts_removed, &time_record, + file_number); } /* Free the memory */ diff --git a/src/time_array.hpp b/src/time_array.hpp index d714a2e8859390e9fe7fa8c06c38e53ffd293b45..579f3d7fb1ffd90ad6559b466d910a3c3efcde61 100644 --- a/src/time_array.hpp +++ b/src/time_array.hpp @@ -165,7 +165,7 @@ class TimeArray { /** * @brief Access an element */ - INLINE struct time_record const &operator[](std::size_t i) { + INLINE struct time_record const &operator[](std::size_t i) const { #ifdef CSDS_DEBUG_CHECKS if (i >= Size()) { csds_error( diff --git a/src/tools.cpp b/src/tools.cpp index 7e83063c21bb85e491d427fa588a42c5c2ee61e4..8527cd0203b746802ef7b6ad953dc80f72240a8c 100644 --- a/src/tools.cpp +++ b/src/tools.cpp @@ -29,66 +29,6 @@ #include "particle.hpp" #include "reader.hpp" -/** - * @brief debugging function checking the offset and the mask of a record. - * - * Compare the mask with the one pointed by the header. - * if the record is a particle, check the id too. - * - * @param reader The #csds_reader. - * @param offset position of the record. - * - * @return position after the record. - */ -size_t tools_check_record_consistency(LogFile *log, size_t offset) { -#ifndef CSDS_DEBUG_CHECKS - csds_error("Should not check in non debug mode."); -#endif - - const struct header *h = &log->header; - MappedLogFile &map = *log->log; - const size_t init_offset = offset; - - /* read mask + offset. */ - struct record_header header; - offset = map.ReadRecordHeader(offset, header); - - /* set offset after current record. */ - offset += header_get_record_size_from_mask(h, header.mask); - const size_t offset_ret = offset; - - /* If something happened, skip the check. */ - if (header.mask & CSDS_SPECIAL_FLAGS_MASK) { - return offset_ret; - } - - /* get absolute offset. */ - if (header_is_forward(h)) - header.offset += init_offset; - else if (header_is_backward(h)) { - if (init_offset < header.offset) - csds_error("Offset too large for mask: " << header.mask); - header.offset = init_offset - header.offset; - } else { - csds_error("Offset are corrupted."); - } - - if (header.offset == init_offset || header.offset == 0) return offset_ret; - - /* read mask of the pointed record. */ - struct record_header pointed_header; - map.ReadRecordHeader(header.offset, pointed_header); - - /* check if not mixing timestamp and particles. */ - if ((pointed_header.mask != CSDS_TIMESTAMP_MASK && - header.mask == CSDS_TIMESTAMP_MASK) || - (pointed_header.mask == CSDS_TIMESTAMP_MASK && - header.mask != CSDS_TIMESTAMP_MASK)) - csds_error("Error in the offset for mask: " << header.mask); - - return offset_ret; -} - using namespace std::chrono; /** diff --git a/src/tools.hpp b/src/tools.hpp index 94e407814ffd740452d9eaf0d81c5a7cbb0b70b3..76c5ee20a7da7485053ffec45e2d7f06a23b6338 100644 --- a/src/tools.hpp +++ b/src/tools.hpp @@ -47,8 +47,6 @@ struct csds_reader_field { void *second_deriv; }; -size_t tools_check_record_consistency(LogFile *log, size_t offset); - void tools_print_progress( float percentage, const std::chrono::high_resolution_clock::time_point init, const std::string message); diff --git a/tests/testLogfileHeader.cpp b/tests/testLogfileHeader.cpp index d48c83c6ae7decbbf636d2194adc58e69d608371..ffac94d65f53627ec0a98fe94e8e9c3c4f4c480e 100644 --- a/tests/testLogfileHeader.cpp +++ b/tests/testLogfileHeader.cpp @@ -55,24 +55,24 @@ int main(void) { Finally check everything. */ - struct header *h = &logfile.header; + const Header &h = logfile.GetHeader(); message("Checking versions."); - assert(h->major_version == CSDS_MAJOR_VERSION); - assert(h->minor_version == CSDS_MINOR_VERSION); + assert(h.GetMajorVersion() == CSDS_MAJOR_VERSION); + assert(h.GetMinorVersion() == CSDS_MINOR_VERSION); message("Checking number of masks"); /* Compute the number of masks */ int count_fields = 2; // Timestamp + special flags for (int type = 0; type < csds_type_count; type++) { - count_fields += h->fields[type].size(); + count_fields += h.GetFields()[type].size(); /* Remove the copies of the special flags */ - if (h->fields[type].size() != 0) count_fields -= 1; + if (h.GetFields()[type].size() != 0) count_fields -= 1; } const int number_masks_ref = csds_type_count * TEST_NUMBER_MASKS + 2; assert(number_masks_ref == count_fields); message("Checking offset direction"); - assert(h->offset_direction == csds_offset_backward); + assert(h.GetOffsetDirection() == csds_offset_backward); return 0; } diff --git a/tests/testLogfileReader.cpp b/tests/testLogfileReader.cpp index 3d75de432871fb166d0dffc8cb40abbc4e3ab005..ca42622b02395e8f81a52cbbca161729ab412601 100644 --- a/tests/testLogfileReader.cpp +++ b/tests/testLogfileReader.cpp @@ -50,7 +50,7 @@ void check_data(Reader &reader, struct csds_part *parts) { /* Get required structures. */ LogFile &logfile = *reader.mLog; - struct header *h = &reader.mLog->header; + const Header &h = reader.mLog->GetHeader(); /* Index of the fields */ int index_ids = -1; @@ -58,13 +58,14 @@ void check_data(Reader &reader, struct csds_part *parts) { /* Create a particle */ std::vector<CsdsArray> output; - output.reserve(h->fields[csds_type_gas].size()); - for (size_t i = 0; i < h->fields[csds_type_gas].size(); i++) { - output.emplace_back(1, h->fields[csds_type_gas][i]); - if (h->fields[csds_type_gas][i].GetField() == field_enum_particles_ids) { + output.reserve(h.GetFields()[csds_type_gas].size()); + for (size_t i = 0; i < h.GetFields()[csds_type_gas].size(); i++) { + output.emplace_back(1, h.GetFields()[csds_type_gas][i]); + if (h.GetFields()[csds_type_gas][i].GetField() == + field_enum_particles_ids) { index_ids = i; } - if (h->fields[csds_type_gas][i].GetField() == field_enum_coordinates) { + if (h.GetFields()[csds_type_gas][i].GetField() == field_enum_coordinates) { index_coordinates = i; } } @@ -77,16 +78,16 @@ void check_data(Reader &reader, struct csds_part *parts) { const uint64_t id_flag = 5 * number_parts; uint64_t previous_id = id_flag; - size_t offset = logfile.header.offset_first_record; + size_t offset = h.GetOffsetFirstRecord(); size_t offset_time = offset; int step = -1; /* Loop over each record. */ - while (offset < logfile.log->GetFileSize()) { + while (offset < logfile.GetFileSize()) { const bool is_particle = offset != offset_time; struct time_record record; struct record_header header; - logfile.log->ReadRecordHeader(offset, header); + logfile.ReadRecordHeader(offset, header); /* Read the particle */ if (is_particle) { @@ -94,13 +95,13 @@ void check_data(Reader &reader, struct csds_part *parts) { for (auto ¤t : output) { offset_after = csds_particle_read_field( offset, current[0], current.GetField(), - /* derivative */ 0, header, h->fields[csds_type_gas], *logfile.log); + /* derivative */ 0, header, h.GetFields()[csds_type_gas], logfile); } offset = offset_after; } /* Read the time */ else { - offset = logfile.log->ReadTimeRecord(record, offset); + offset = logfile.ReadTimeRecord(record, offset); offset_time += header.offset; } diff --git a/tests/testVirtualReality.cpp b/tests/testVirtualReality.cpp index b7fa8f28b6d1ca4611a63ed4adf7d24babeba253..8026e69399a70a99becf17a2b3d219531260e552 100644 --- a/tests/testVirtualReality.cpp +++ b/tests/testVirtualReality.cpp @@ -91,7 +91,7 @@ int main(void) { /* Create the output */ std::vector<CsdsArray> output; - for (auto field : reader.mLog->header.fields[csds_type_gas]) { + for (auto field : reader.mLog->GetHeader().GetFields()[csds_type_gas]) { if (field.GetField() == field_enum_coordinates || field.GetField() == field_enum_particles_ids) { output.emplace_back(n_tot, field);