Skip to content

Commit

Permalink
Merge pull request #37203 from rosswhitfield/log_compress
Browse files Browse the repository at this point in the history
Add Logarithmic event grouping to CompressEvents
  • Loading branch information
SilkeSchomann committed Apr 30, 2024
2 parents 83f425f + 2af185d commit 1d33ef0
Show file tree
Hide file tree
Showing 7 changed files with 433 additions and 14 deletions.
29 changes: 26 additions & 3 deletions Framework/DataHandling/src/CompressEvents.cpp
Expand Up @@ -12,6 +12,8 @@
#include "MantidKernel/BoundedValidator.h"
#include "MantidKernel/DateAndTimeHelpers.h"
#include "MantidKernel/DateTimeValidator.h"
#include "MantidKernel/EnumeratedString.h"
#include "MantidKernel/ListValidator.h"

#include "tbb/parallel_for.h"

Expand All @@ -26,6 +28,12 @@ using namespace Kernel;
using namespace API;
using namespace DataObjects;

namespace {
const std::vector<std::string> binningModeNames{"Default", "Linear", "Logarithmic"};
enum class BinningMode { DEFAULT, LINEAR, LOGARITHMIC, enum_count };
typedef Mantid::Kernel::EnumeratedString<BinningMode, &binningModeNames> BINMODE;
} // namespace

void CompressEvents::init() {
declareProperty(std::make_unique<WorkspaceProperty<EventWorkspace>>("InputWorkspace", "", Direction::Input),
"The name of the EventWorkspace on which to perform the algorithm");
Expand All @@ -36,10 +44,12 @@ void CompressEvents::init() {
// Tolerance must be >= 0.0
auto mustBePositive = std::make_shared<BoundedValidator<double>>();
mustBePositive->setLower(0.0);
declareProperty(std::make_unique<PropertyWithValue<double>>("Tolerance", 1e-5, mustBePositive, Direction::Input),
declareProperty(std::make_unique<PropertyWithValue<double>>("Tolerance", 1e-5, Direction::Input),
"The tolerance on each event's X value (normally TOF, but may be a "
"different unit if you have used ConvertUnits).\n"
"Any events within Tolerance will be summed into a single event.");
"Any events within Tolerance will be summed into a single event. When compressing where positive is "
"linear tolerance, negative is logorithmic tolerance, and zero indicates that time-of-flight must be "
"identical to compress.");

declareProperty(
std::make_unique<PropertyWithValue<double>>("WallClockTolerance", EMPTY_DBL(), mustBePositive, Direction::Input),
Expand All @@ -54,15 +64,28 @@ void CompressEvents::init() {
"starting filtering. Ignored if WallClockTolerance is not specified. "
"Default is start of run",
Direction::Input);

declareProperty("BinningMode", binningModeNames[size_t(BinningMode::DEFAULT)],
std::make_shared<Mantid::Kernel::StringListValidator>(binningModeNames),
"Binning behavior can be specified in the usual way through sign of tolerance and other properties "
"('Default'); or can be set to one of the allowed binning modes. This will override all other "
"specification or default behavior.");
}

void CompressEvents::exec() {
// Get the input workspace
EventWorkspace_sptr inputWS = getProperty("InputWorkspace");
EventWorkspace_sptr outputWS = getProperty("OutputWorkspace");
const double toleranceTof = getProperty("Tolerance");
double toleranceTof = getProperty("Tolerance");
const double toleranceWallClock = getProperty("WallClockTolerance");
const bool compressFat = !isEmpty(toleranceWallClock);

BINMODE mode = getPropertyValue("BinningMode");
if (mode == BinningMode::LINEAR)
toleranceTof = std::fabs(toleranceTof);
else if (mode == BinningMode::LOGARITHMIC)
toleranceTof = -1. * std::fabs(toleranceTof);

Types::Core::DateAndTime startTime;

if (compressFat) {
Expand Down
73 changes: 72 additions & 1 deletion Framework/DataHandling/test/CompressEventsTest.h
Expand Up @@ -20,6 +20,7 @@ using namespace Mantid::DataHandling;
using namespace Mantid::API;
using namespace Mantid::Geometry;
using namespace Mantid::DataObjects;
using namespace Mantid::Types::Core;

class CompressEventsTest : public CxxTest::TestSuite {
public:
Expand All @@ -32,7 +33,6 @@ class CompressEventsTest : public CxxTest::TestSuite {
void test_InvalidInputs() {
CompressEvents alg;
TS_ASSERT_THROWS_NOTHING(alg.initialize());
TS_ASSERT_THROWS(alg.setPropertyValue("Tolerance", "-1.0"), const std::invalid_argument &);
TS_ASSERT_THROWS_NOTHING(alg.setPropertyValue("Tolerance", "0.0"));
}

Expand Down Expand Up @@ -128,4 +128,75 @@ class CompressEventsTest : public CxxTest::TestSuite {
void test_InPlace_ZeroTolerance_WithPulseTime() {
doTest("CompressEvents_input", "CompressEvents_input", 0.0, 50, .001);
}

void doLogarithmicTest(const std::string &binningMode, const double tolerance, double wallClockTolerance = 0.) {
EventWorkspace_sptr input, output;
EventType eventType = WEIGHTED_NOTIME;
if (wallClockTolerance > 0.)
eventType = WEIGHTED;

/** Create event workspace with:
* 1 pixels (or another number)
* 64 histogrammed bins from 0.0 in steps of 1.0
* 128 events; two in each bin, at time 1.0, 2.0, etc.
* PulseTime = 1 second, 2 seconds, etc.
*/
input = WorkspaceCreationHelper::createEventWorkspace(1, 64, 64, 0, 1, 2);
AnalysisDataService::Instance().addOrReplace("CompressEvents_input", input);

TS_ASSERT_EQUALS(input->getNumberEvents(), 128);
const double inputIntegral = input->getSpectrum(0).integrate(0., 100., true);

CompressEvents alg;
alg.initialize();
alg.setPropertyValue("InputWorkspace", "CompressEvents_input");
alg.setPropertyValue("OutputWorkspace", "CompressEvents_output");
alg.setProperty("Tolerance", tolerance);
alg.setPropertyValue("BinningMode", binningMode);
if (wallClockTolerance > 0.) {
alg.setProperty("WallClockTolerance", wallClockTolerance);
alg.setProperty("StartTime",
"2010-01-01T00:00:00"); // copied from createEventWorkspace
}

TS_ASSERT_THROWS_NOTHING(alg.execute());
TS_ASSERT(alg.isExecuted());

output = AnalysisDataService::Instance().retrieveWS<EventWorkspace>("CompressEvents_output");

TS_ASSERT_EQUALS(output->getNumberEvents(), 7);
TS_ASSERT_EQUALS(output->getEventType(), eventType);
TS_ASSERT_DELTA(output->getSpectrum(0).integrate(0., 100., true), inputIntegral, 1.e-6);

EventList el = output->getSpectrum(0);
TS_ASSERT_DELTA(el.getEvent(0).weight(), 2.0, 1e-6);
TS_ASSERT_DELTA(el.getEvent(0).errorSquared(), 2.0, 1e-6);
TS_ASSERT_DELTA(el.getEvent(0).tof(), 0.5, 1e-6);
for (int i = 1; i < 7; i++) {
TS_ASSERT_DELTA(el.getEvent(i).weight(), 1.0 * pow(2, i), 1e-6);
TS_ASSERT_DELTA(el.getEvent(i).errorSquared(), 1.0 * pow(2, i), 1e-6);
TS_ASSERT_DELTA(el.getEvent(i).tof(), 0.75 * pow(2, i), 1e-6);
}

if (wallClockTolerance > 0.) {
const auto startTime = DateAndTime("2010-01-01T00:00:00");
TS_ASSERT_EQUALS(el.getEvent(0).pulseTime(), startTime);
TS_ASSERT_EQUALS(el.getEvent(1).pulseTime(), startTime + 1.0);
TS_ASSERT_EQUALS(el.getEvent(2).pulseTime(), startTime + 2.5);
TS_ASSERT_EQUALS(el.getEvent(3).pulseTime(), startTime + 5.5);
TS_ASSERT_EQUALS(el.getEvent(4).pulseTime(), startTime + 11.5);
TS_ASSERT_EQUALS(el.getEvent(5).pulseTime(), startTime + 23.5);
TS_ASSERT_EQUALS(el.getEvent(6).pulseTime(), startTime + 47.5);
} else {
const auto timeZero = DateAndTime{0};
for (int i = 0; i < 7; i++) {
TS_ASSERT_EQUALS(el.getEvent(i).pulseTime(), timeZero);
}
}
}

void test_Logarithmic_binning() { doLogarithmicTest("Logarithmic", 1.); }
void test_Logarithmic_binning_default() { doLogarithmicTest("Default", -1.); }
void test_Logarithmic_binning_WithPulseTime() { doLogarithmicTest("Logarithmic", 1., 64); }
void test_Logarithmic_binning_default_WithPulseTime() { doLogarithmicTest("Default", -1., 64); }
};
82 changes: 77 additions & 5 deletions Framework/DataObjects/src/EventList.cpp
Expand Up @@ -1450,7 +1450,7 @@ inline double calcNorm(const double errorSquared) {
* @param events :: input event list.
* @param out :: output WeightedEventNoTime vector.
* @param tolerance :: how close do two event's TOF have to be to be considered
*the same.
*the same. Negative implies log grouping.
*/

template <class T>
Expand All @@ -1462,7 +1462,7 @@ inline void EventList::compressEventsHelper(const std::vector<T> &events, std::v
out.reserve(events.size() / 20);

// The last TOF to which we are comparing.
double lastTof = std::numeric_limits<double>::lowest();
double lastTof = events.front().m_tof;
// For getting an accurate average TOF
double totalTof = 0;
int num = 0;
Expand All @@ -1471,8 +1471,36 @@ inline void EventList::compressEventsHelper(const std::vector<T> &events, std::v
double errorSquared = 0;
double normalization = 0.;

double bin_end = lastTof;
std::function<bool(const double, const double)> compareTof;
std::function<double(const double, double)> next_bin;

if (tolerance < 0) { // log
if (lastTof < 0)
throw std::runtime_error("compressEvents with log binning doesn't work with negative TOF");

if (lastTof == 0)
bin_end = fabs(tolerance);

// for log we do "less than" so that is matches the log binning of the Rebin algorithm
compareTof = [](const double lhs, const double rhs) { return lhs < rhs; };
next_bin = [tolerance](const double lastTof, double bin_end) {
// advance the bin_end until we find the one that this next event falls into
while (lastTof >= bin_end)
bin_end = bin_end * (1 - tolerance);
return bin_end;
};
} else { // linear
// for linear we do "less than or equals" because that is how it was originally implemented
compareTof = [](const double lhs, const double rhs) { return lhs <= rhs; };
next_bin = [tolerance](const double lastTof, double) { return lastTof + tolerance; };
}

// get first bin_end
bin_end = next_bin(lastTof, bin_end);

for (auto it = events.cbegin(); it != events.cend(); it++) {
if ((it->m_tof - lastTof) <= tolerance) {
if (compareTof(it->m_tof, bin_end)) {
// Carry the error and weight
weight += it->weight();
errorSquared += it->errorSquared();
Expand All @@ -1499,6 +1527,8 @@ inline void EventList::compressEventsHelper(const std::vector<T> &events, std::v
weight = it->weight();
errorSquared = it->errorSquared();
lastTof = it->m_tof;

bin_end = next_bin(lastTof, bin_end);
}
}

Expand Down Expand Up @@ -1528,7 +1558,7 @@ inline void EventList::compressFatEventsHelper(const std::vector<T> &events, std
out.reserve(events.size() / 20);

// The last TOF to which we are comparing.
double lastTof = std::numeric_limits<double>::lowest();
double lastTof = events.front().m_tof;
// For getting an accurate average TOF
double totalTof = 0;

Expand Down Expand Up @@ -1558,10 +1588,46 @@ inline void EventList::compressFatEventsHelper(const std::vector<T> &events, std

// bin if the pulses are histogrammed
int64_t lastPulseBin = (it->m_pulsetime.totalNanoseconds() - pulsetimeStart) / pulsetimeDelta;

double bin_end = lastTof;
double tof_min{0};
std::function<bool(const double, const double)> compareTof;
std::function<double(const double, double)> next_bin;

if (tolerance < 0) { // log
// for log we do "less than" so that is matches the log binning of the Rebin algorithm
compareTof = [](const double lhs, const double rhs) { return lhs < rhs; };
next_bin = [tolerance](const double lastTof, double bin_end) {
// advance the bin_end until we find the one that this next event falls into
while (lastTof >= bin_end)
bin_end = bin_end * (1 - tolerance);
return bin_end;
};

// get minimum Tof so that binning is consistent across all pulses
const auto event_min = std::min_element(
events.cbegin(), events.cend(), [](const auto &left, const auto &right) { return left.tof() < right.tof(); });
bin_end = tof_min = event_min->tof();

if (tof_min < 0)
throw std::runtime_error("compressEvents with log binning doesn't work with negative TOF");

if (tof_min == 0)
bin_end = fabs(tolerance);

} else { // linear
// for linear we do "less than or equals" because that is how it was originally implemented
compareTof = [](const double lhs, const double rhs) { return lhs <= rhs; };
next_bin = [tolerance](const double lastTof, double) { return lastTof + tolerance; };
}

// get first bin_end
bin_end = next_bin(lastTof, bin_end);

// loop through events and accumulate weight
for (; it != events.cend(); ++it) {
const int64_t eventPulseBin = (it->m_pulsetime.totalNanoseconds() - pulsetimeStart) / pulsetimeDelta;
if ((eventPulseBin <= lastPulseBin) && (std::fabs(it->m_tof - lastTof) <= tolerance)) {
if ((eventPulseBin <= lastPulseBin) && compareTof(it->m_tof, bin_end)) {
// Carry the error and weight
weight += it->weight();
errorSquared += it->errorSquared();
Expand All @@ -1585,6 +1651,10 @@ inline void EventList::compressFatEventsHelper(const std::vector<T> &events, std
errorSquared);
}
}
if (tolerance < 0 && eventPulseBin != lastPulseBin)
// reset the bin_end for the new pulse bin
bin_end = tof_min;

// Start a new combined object
double norm = calcNorm(it->errorSquared());
totalTof = it->m_tof * norm;
Expand All @@ -1597,6 +1667,8 @@ inline void EventList::compressFatEventsHelper(const std::vector<T> &events, std
pulsetimes.emplace_back(it->m_pulsetime);
pulsetimeWeights.clear();
pulsetimeWeights.emplace_back(norm);

bin_end = next_bin(lastTof, bin_end);
}
}

Expand Down

0 comments on commit 1d33ef0

Please sign in to comment.