diff --git a/.aiexclude b/.aiexclude new file mode 100644 index 000000000..8dc182dc9 --- /dev/null +++ b/.aiexclude @@ -0,0 +1,2 @@ +**/docs/ +**/test/ diff --git a/.clang-format b/.clang-format index 9810f0cad..52d613099 100644 --- a/.clang-format +++ b/.clang-format @@ -1,21 +1,13 @@ --- Language: Cpp -# BasedOnStyle: sphenix AccessModifierOffset: -1 AlignAfterOpenBracket: Align -AlignConsecutiveAssignments: false -AlignConsecutiveDeclarations: false -AlignEscapedNewlinesLeft: true AlignOperands: true AlignTrailingComments: true AllowAllParametersOfDeclarationOnNextLine: true -AllowShortBlocksOnASingleLine: false -AllowShortCaseLabelsOnASingleLine: false AllowShortFunctionsOnASingleLine: All AllowShortIfStatementsOnASingleLine: true AllowShortLoopsOnASingleLine: true -AlwaysBreakAfterDefinitionReturnType: None -AlwaysBreakAfterReturnType: None AlwaysBreakBeforeMultilineStrings: true AlwaysBreakTemplateDeclarations: true BinPackArguments: true @@ -23,68 +15,22 @@ BinPackParameters: true BraceWrapping: AfterClass: true AfterControlStatement: true - AfterEnum: false AfterFunction: true - AfterNamespace: false - AfterObjCDeclaration: false - AfterStruct: false - AfterUnion: false - BeforeCatch: false - BeforeElse: false - IndentBraces: false BreakBeforeBinaryOperators: None BreakBeforeBraces: Stroustrup BreakBeforeTernaryOperators: true -BreakConstructorInitializersBeforeComma: true ColumnLimit: 120 -CommentPragmas: '^ IWYU pragma:' -ConstructorInitializerAllOnOneLineOrOnePerLine: false ConstructorInitializerIndentWidth: 2 ContinuationIndentWidth: 4 Cpp11BracedListStyle: true -DerivePointerAlignment: true -DisableFormat: true -ExperimentalAutoDetectBinPacking: false -ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] -IncludeCategories: - - Regex: '^<.*\.h>' - Priority: 1 - - Regex: '^<.*' - Priority: 2 - - Regex: '.*' - Priority: 3 -IndentCaseLabels: false IndentWidth: 4 -IndentWrappedFunctionNames: false -KeepEmptyLinesAtTheStartOfBlocks: false -MacroBlockBegin: '' -MacroBlockEnd: '' -MaxEmptyLinesToKeep: 1 NamespaceIndentation: All -ObjCBlockIndentWidth: 2 -ObjCSpaceAfterProperty: false -ObjCSpaceBeforeProtocolList: false -PenaltyBreakBeforeFirstCallParameter: 1 -PenaltyBreakComment: 300 -PenaltyBreakFirstLessLess: 120 -PenaltyBreakString: 1000 -PenaltyExcessCharacter: 1000000 -PenaltyReturnTypeOnItsOwnLine: 200 PointerAlignment: Right ReflowComments: true -SortIncludes: false -SpaceAfterCStyleCast: true -SpaceBeforeAssignmentOperators: true -SpaceBeforeParens: ControlStatements -SpaceInEmptyParentheses: false SpacesBeforeTrailingComments: 2 -SpacesInAngles: false -SpacesInContainerLiterals: true -SpacesInCStyleCastParentheses: false -SpacesInParentheses: false -SpacesInSquareBrackets: false Standard: Auto TabWidth: 8 UseTab: Never +DisableFormat: true ... diff --git a/.gitignore b/.gitignore index c90dfd606..00784fa94 100644 --- a/.gitignore +++ b/.gitignore @@ -87,3 +87,11 @@ spng .cache .clangd junk* +downloads +tmp* + +doxy +*.bak +doxy_quick +html +latex diff --git a/Doxyfile b/Doxyfile index 2d6163354..3f30b514e 100644 --- a/Doxyfile +++ b/Doxyfile @@ -177,9 +177,11 @@ SHORT_NAMES = NO # description. If set to NO, the Javadoc-style will behave just like regular Qt- # style comments (thus requiring an explicit @brief command for a brief # description.) +### this is comments like /** ... */ # The default value is: NO. -JAVADOC_AUTOBRIEF = NO +JAVADOC_AUTOBRIEF = YES +CSharp_comments = YES # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If @@ -280,7 +282,7 @@ OPTIMIZE_OUTPUT_VHDL = NO # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. -EXTENSION_MAPPING = +EXTENSION_MAPPING = org=md # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable @@ -753,7 +755,7 @@ WARN_LOGFILE = # spaces. # Note: If this tag is empty the current directory is searched. -INPUT = README.org apps util iface gen sigproc sio sst tbb +INPUT = README.org apps aux cfg clus docs gen hio iface img pgraph pytorch root sigproc sio tbb test util zio # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses @@ -773,7 +775,8 @@ INPUT_ENCODING = UTF-8 # *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, # *.qsf, *.as and *.js. -FILE_PATTERNS = *.c \ +FILE_PATTERNS = *.org \ + *.c \ *.cc \ *.cxx \ *.cpp \ @@ -845,7 +848,7 @@ EXCLUDE_SYMLINKS = NO # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* -EXCLUDE_PATTERNS = */test/* +EXCLUDE_PATTERNS = */test/* **/talks/* # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the @@ -908,7 +911,9 @@ INPUT_FILTER = # filters are used. If the FILTER_PATTERNS tag is empty or if none of the # patterns match the file name, INPUT_FILTER is applied. -FILTER_PATTERNS = +## Run "./run-doxygen" instead of bare "doxygen" to make this line work +## Note, the filter takes the source file as first arg and prints result to stdout +FILTER_PATTERNS = "*.org=run-doxygen org2md" # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER ) will also be used to filter the input files that are used for @@ -930,7 +935,7 @@ FILTER_SOURCE_PATTERNS = # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. -USE_MDFILE_AS_MAINPAGE = docs/README.org +USE_MDFILE_AS_MAINPAGE = README.org #--------------------------------------------------------------------------- # Configuration options related to source browsing diff --git a/README.org b/README.org index aef208370..92ddadd0b 100644 --- a/README.org +++ b/README.org @@ -1,7 +1,12 @@ +#+BEGIN_COMMENT +\mainpage +#+END_COMMENT + #+TITLE: Wire-Cell Toolkit #+SETUPFILE: docs/setup-readme.org + Welcome to the Wire-Cell Toolkit (WCT) source repository at https://github.com/wirecell/wire-cell-toolkit. * Overview diff --git a/apps/src/ConfigDumper.cxx b/apps/src/ConfigDumper.cxx index ef20fe81a..08be6fa8d 100644 --- a/apps/src/ConfigDumper.cxx +++ b/apps/src/ConfigDumper.cxx @@ -63,4 +63,5 @@ void ConfigDumper::execute() } Persist::dump(get(m_cfg, "filename"), cm.all()); + (void)nfailed; // unused, but useful for debugging } diff --git a/aux/inc/WireCellAux/LinterpFunction.h b/aux/inc/WireCellAux/LinterpFunction.h new file mode 100644 index 000000000..a55267a22 --- /dev/null +++ b/aux/inc/WireCellAux/LinterpFunction.h @@ -0,0 +1,46 @@ +#include "WireCellIface/IScalarFunction.h" +#include "WireCellIface/IConfigurable.h" +#include "WireCellUtil/Interpolate.h" + +namespace WireCell::Aux { + + /** A scalar function implemented as linear interpolation on regularly or + * irregularly spaced points. + * + * Extrapolation returns end point values. + */ + class LinterpFunction : public WireCell::IScalarFunction, WireCell::IConfigurable { + public: + virtual ~LinterpFunction(); + + virtual void configure(const WireCell::Configuration& config); + virtual WireCell::Configuration default_configuration() const { + return Configuration{}; // user must supply all + } + + virtual double scalar_function(double x); + private: + + /// Config: values + /// + /// Array of function samples. + /// + /// Config: coords + /// + /// Array of the coordinates at which values are sampled. + /// + /// Config: start + /// + /// The first abscissa value. + /// + /// Config: step + /// + /// Distance between regular samples + /// + /// Note: coords and (start,step) are mutually exclusive. The former + /// implies irregular sample interpolation and the latter regular. + + std::function m_terp; + }; + +} diff --git a/aux/inc/WireCellAux/ParticleInfo.h b/aux/inc/WireCellAux/ParticleInfo.h new file mode 100644 index 000000000..c395ad379 --- /dev/null +++ b/aux/inc/WireCellAux/ParticleInfo.h @@ -0,0 +1,128 @@ +#ifndef WIRECELLAUX_PARTICLEINFO +#define WIRECELLAUX_PARTICLEINFO + +#include "WireCellUtil/Point.h" +#include "WireCellUtil/Units.h" +#include "WireCellUtil/D4Vector.h" +#include +#include + +namespace WireCell::Aux { + + /** + * @brief A comprehensive particle information storage class + * + * This class stores all relevant particle physics information including + * PDG codes, 4-momentum, mass, name, and kinetic energy. It follows + * Wire-Cell toolkit conventions and integrates well with existing + * deposition and clustering infrastructure. + */ + class ParticleInfo { + public: + // Default constructor - creates an invalid/empty particle + ParticleInfo(); + + // Comprehensive constructor + ParticleInfo(int pdg_code, + double mass, + const std::string& name, + double kinetic_energy, + const WireCell::Point& momentum_3vec, + int id = 0, + double charge = 0.0); + + // Constructor from 4-momentum + ParticleInfo(int pdg_code, + double mass, + const std::string& name, + const WireCell::D4Vector& four_momentum, + int id = 0, + double charge = 0.0); + + virtual ~ParticleInfo() = default; + + // Core particle properties + int pdg() const { return m_pdg_code; } + double mass() const { return m_mass; } + const std::string& name() const { return m_name; } + double kinetic_energy() const { return m_kinetic_energy; } + int id() const { return m_id; } + double charge() const { return m_charge; } + + // 4-momentum access + double energy() const { return m_four_momentum.e(); } + WireCell::Point momentum() const { return WireCell::Point(m_four_momentum.px(), m_four_momentum.py(), m_four_momentum.pz()); } + const WireCell::D4Vector& four_momentum() const { return m_four_momentum; } + + // Derived quantities + double momentum_magnitude() const { return m_four_momentum.p(); } + double beta() const { return m_four_momentum.beta(); } // v/c + double gamma() const { return m_four_momentum.gamma(); } // Lorentz factor + double rapidity() const { return m_four_momentum.rapidity(); } // 0.5 * ln((E+pz)/(E-pz)) + + // Utility methods + bool is_valid() const { return m_pdg_code != 0; } + bool is_charged() const { return std::abs(m_charge) > 1e-6; } + bool is_stable() const; // Based on PDG code + + // Setters (for cases where you need to modify after construction) + void set_pdg(int pdg) { m_pdg_code = pdg; } + void set_mass(double mass) { m_mass = mass; update_kinematics(); } + void set_name(const std::string& name) { m_name = name; } + void set_momentum(const WireCell::Point& momentum_3vec); + void set_four_momentum(const WireCell::D4Vector& four_momentum); + void set_kinetic_energy(double ke); + void set_id(int id) { m_id = id; } + void set_charge(double charge) { m_charge = charge; } + + // Static utility methods for PDG lookups + static std::string pdg_to_name(int pdg_code); + static double pdg_to_mass(int pdg_code); + static double pdg_to_charge(int pdg_code); + + // Factory method to create from PDG code + static ParticleInfo from_pdg(int pdg_code, + const WireCell::Point& momentum_3vec, + int id = 0); + + double particle_score() const { return m_particle_score; } + void set_particle_score(double score) { m_particle_score = score; } + + private: + // Core data members + int m_pdg_code; + double m_mass; + std::string m_name; + double m_kinetic_energy; + WireCell::D4Vector m_four_momentum; // (E, px, py, pz) + int m_id; + double m_charge; + double m_particle_score{-1.0}; // Optional score for particle ID confidence + + // Internal helper methods + void update_kinematics(); // Recalculate energy/momentum relationships + void validate_inputs(); // Check for physical consistency + + // Static data for PDG lookups + static const std::map& get_pdg_name_map(); + static const std::map& get_pdg_mass_map(); + static const std::map& get_pdg_charge_map(); + }; + + // Convenience typedefs + using ParticleInfoPtr = std::shared_ptr; + using ParticleInfoVector = std::vector; + using ParticleInfoSelection = std::vector; + + // Helper functions for common particle types + namespace ParticleHelpers { + ParticleInfo electron(const WireCell::Point& momentum, int id = 0); + ParticleInfo muon(const WireCell::Point& momentum, int charge_sign = 1, int id = 0); + ParticleInfo pion_charged(const WireCell::Point& momentum, int charge_sign = 1, int id = 0); + ParticleInfo proton(const WireCell::Point& momentum, int id = 0); + ParticleInfo photon(const WireCell::Point& momentum, int id = 0); + } + +} // namespace WireCell::Aux + +#endif // WIRECELLAUX_PARTICLEINFO \ No newline at end of file diff --git a/aux/inc/WireCellAux/SamplingHelpers.h b/aux/inc/WireCellAux/SamplingHelpers.h index e93d80ee3..5a67a8bf7 100644 --- a/aux/inc/WireCellAux/SamplingHelpers.h +++ b/aux/inc/WireCellAux/SamplingHelpers.h @@ -2,42 +2,85 @@ #include "WireCellUtil/PointTree.h" #include "WireCellUtil/Point.h" #include "WireCellUtil/Units.h" + #include "WireCellIface/IBlob.h" +#include "WireCellIface/IBlobSampler.h" #include "WireCellIface/IBlobSet.h" +#include + namespace WireCell::Aux { + /** Return a "sampling" of a live blob. + + These roll up the relevant fill_*() methods for "live" blobs. + + If ident<0 will use iblob.ident(). + + An empty set of PCs is returned if there are no sample points. + */ + PointCloud::Tree::named_pointclouds_t + sample_live(const IBlobSampler::pointer& sampler, const IBlob::pointer& blob, + const std::vector& angles, + const double tick=500*units::ns, int ident=-1); + + /** Return a "sampling" of a dead blob. + + These roll up the relevant fill_*() methods for "dead" blobs. + */ + PointCloud::Tree::named_pointclouds_t + sample_dead(const IBlob::pointer& blob, const double tick=500*units::ns); + - // Some crufty stuff used in PointTreeBuilding and UbooneClusterSource. - PointCloud::Dataset - make_scalar_dataset(const IBlob::pointer iblob, const Point& center, - const int npoints = 0, const double tick_span = 0.5*units::us); + /** Add per-plane 2D point cloud arrays to the given pc that represent the + * plane coordinates derived from the "x", "y" and "z" arrays in the given + * "pc". The "pattern" argument is used to name the arrays of the 2D PCs. + * The default is likely best kept as-is unless there is some custom needs. + * The first argument to the string interpolation on pattern is the index + * that runs over the angles (ie, plane index). The second argument is the + * 2D coordinate character ('x' or 'y'). + */ + void fill_2dpcs(PointCloud::Dataset& pc, + const std::vector& angles, + const std::string& pattern="2dp%1%_%2%"); + - PointCloud::Dataset make2dds(const PointCloud::Dataset& ds3d, const double angle); + /// Fill various types of per-blob "scalar" info. + void fill_scalar_blob(PointCloud::Dataset& scalar, const IBlob& iblob, const double tick=500*units::ns); + /// Fill "center" related items with values + void fill_scalar_center(PointCloud::Dataset& scalar, const PointCloud::Dataset& pc3d); + /// As above but fill with zeros. + void fill_scalar_center(PointCloud::Dataset& scalar); + /// Transfer "aux" values. See #426. + void fill_scalar_aux(PointCloud::Dataset& scalar, const PointCloud::Dataset& aux); + /// As above but fill with zeros + void fill_scalar_aux(PointCloud::Dataset& scalar); // Calculate the average position of a point cloud tree. WireCell::Point calc_blob_center(const PointCloud::Dataset& ds); // Calculate a dataset of blob corners - PointCloud::Dataset make_corner_dataset(const IBlob::pointer iblob); + PointCloud::Dataset make_corner_dataset(const IBlob& iblob); - double time2drift(const IAnodeFace::pointer anodeface, const double time_offset, const double drift_speed, + double time2drift(IAnodeFace::pointer anodeface, const double time_offset, const double drift_speed, double time); - void add_ctpc(PointCloud::Tree::Points::node_t& root, const IBlobSet::vector ibsv, - const IAnodeFace::pointer iface, const int face = 0, + void add_ctpc(PointCloud::Tree::Points::node_t& root, + const ISlice::vector& slices, + IAnodeFace::pointer iface, const int face = 0, const double time_offset = -1600 * units::us + 6 * units::mm / (1.101 * units::mm / units::us), const double drift_speed = 1.101 * units::mm / units::us, const double tick = 0.5 * units::us, const double dead_threshold = 1e10); - void add_dead_winds(PointCloud::Tree::Points::node_t& root, const IBlobSet::vector ibsv, - const IAnodeFace::pointer iface, const int face = 0, - const double time_offset = -1600 * units::us + 6 * units::mm / (1.101 * units::mm / units::us), - const double drift_speed = 1.101 * units::mm / units::us, - const double tick = 0.5 * units::us, - const double dead_threshold = 1e10 + void add_dead_winds(PointCloud::Tree::Points::node_t& root, + const ISlice::vector& slices, + IAnodeFace::pointer iface, const int face = 0, + const double time_offset = -1600 * units::us + 6 * units::mm / (1.101 * units::mm / units::us), + const double drift_speed = 1.101 * units::mm / units::us, + const double tick = 0.5 * units::us, + const double dead_threshold = 1e10 ); } diff --git a/aux/src/Bee.cxx b/aux/src/Bee.cxx index 167cafe36..c112fee43 100644 --- a/aux/src/Bee.cxx +++ b/aux/src/Bee.cxx @@ -13,6 +13,8 @@ Aux::Bee::Points Aux::Bee::dump(const IBlob::vector& blobs, IBlobSampler::pointe Aux::Bee::Points bee; for (const auto& iblob : blobs) { + + // This does some sampling "by hand". See #430 for details. auto [pc, aux] = sampler->sample_blob(iblob, iblob->ident()); auto x = pc.get("x")->elements(); auto y = pc.get("y")->elements(); diff --git a/aux/src/BoxFiducial.cxx b/aux/src/BoxFiducial.cxx new file mode 100644 index 000000000..20f7c03fa --- /dev/null +++ b/aux/src/BoxFiducial.cxx @@ -0,0 +1,48 @@ +#include "WireCellIface/IFiducial.h" +#include "WireCellIface/IAnodePlane.h" +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" +#include "WireCellUtil/Exceptions.h" +#include "WireCellUtil/BoundingBox.h" + +#include +// #include // only debug + +// Implementation is totally local to this comp. unit so no need for namespacing. +class BoxFiducial; + +WIRECELL_FACTORY(BoxFiducial, BoxFiducial, + WireCell::IFiducial, WireCell::IConfigurable) + + +using namespace WireCell; + +class BoxFiducial : public IFiducial, public IConfigurable { + + BoundingBox m_bb; + +public: + + BoxFiducial() {} + virtual ~BoxFiducial() {} + + virtual Configuration default_configuration() const { + Configuration cfg; + // This is a standard "Ray" form: + // {tail:{x:1,y:2,z:3}, head:{x:10,y:20:z:30}} + cfg["bounds"] = Json::objectValue; + return cfg; + } + + virtual void configure(const Configuration& cfg) { + Ray ray = get(cfg, "bounds"); + m_bb = BoundingBox(ray); + } + + + // IFiducial + virtual bool contained(const Point& point) const { + return m_bb.inside(point); + } +}; diff --git a/aux/src/CompositeFiducial.cxx b/aux/src/CompositeFiducial.cxx new file mode 100644 index 000000000..8f3dc38ab --- /dev/null +++ b/aux/src/CompositeFiducial.cxx @@ -0,0 +1,147 @@ + +#include "WireCellIface/IFiducial.h" +#include "WireCellIface/IConfigurable.h" +#include "WireCellUtil/NamedFactory.h" +#include "WireCellUtil/Point.h" + +#include "WireCellUtil/Exceptions.h" + +#include +#include + +class CompositeFiducial; + +WIRECELL_FACTORY(CompositeFiducial, CompositeFiducial, + WireCell::IFiducial, WireCell::IConfigurable) + +using namespace WireCell; + + +class CompositeFiducial : public WireCell::IFiducial, public WireCell::IConfigurable { +public: + CompositeFiducial(); + virtual ~CompositeFiducial(); + + // IConfigurable + virtual void configure(const WireCell::Configuration& cfg); + virtual WireCell::Configuration default_configuration() const; + + // IFiducial + virtual bool contained(const WireCell::Point& point) const; + +private: + // Vector of child IFiducial components + std::vector m_fiducials; + + // Logic for combining results: "and", "or", "nand", "nor" + std::string m_logic{"and"}; + + // Helper method to apply logic + bool apply_logic(const std::vector& results) const; +}; + + + + +CompositeFiducial::CompositeFiducial() {} + +CompositeFiducial::~CompositeFiducial() {} + +Configuration CompositeFiducial::default_configuration() const { + Configuration cfg; + + // Logic for combining child fiducial results + cfg["logic"] = m_logic; + + // Array of child IFiducial component names to combine + cfg["fiducials"] = Json::arrayValue; + + return cfg; +} + +void CompositeFiducial::configure(const Configuration& cfg) { + m_logic = get(cfg, "logic", m_logic); + + // Validate logic + if (m_logic != "and" && m_logic != "or" && m_logic != "nand" && m_logic != "nor") { + raise("CompositeFiducial: invalid logic '%s', must be 'and', 'or', 'nand', or 'nor'", m_logic); + } + + // Clear existing fiducials + m_fiducials.clear(); + + // Load child fiducial components + const auto& fiducial_names = cfg["fiducials"]; + if (fiducial_names.empty()) { + raise("CompositeFiducial: 'fiducials' array cannot be empty"); + } + + // std::cout << "CompositeFiducial: Loading " << fiducial_names.size() << " child fiducials with logic '" << m_logic << "'" << std::endl; + + for (const auto& name : fiducial_names) { + std::string fiducial_tn = name.asString(); + // std::cout << "CompositeFiducial: Looking for fiducial '" << fiducial_tn << "'" << std::endl; + auto fiducial = Factory::find_tn(fiducial_tn); + if (!fiducial) { + raise("CompositeFiducial: failed to find IFiducial component '%s'", fiducial_tn); + } + // std::cout << "CompositeFiducial: Successfully loaded fiducial '" << fiducial_tn << "'" << std::endl; + m_fiducials.push_back(fiducial); + } +} + +bool CompositeFiducial::contained(const Point& point) const { + if (m_fiducials.empty()) { + return true; // No restrictions if no child fiducials + } + + // Evaluate all child fiducials + std::vector results; + results.reserve(m_fiducials.size()); + + for (size_t i = 0; i < m_fiducials.size(); ++i) { + const auto& fiducial = m_fiducials[i]; + bool result = fiducial->contained(point); + results.push_back(result); + // std::cout << "CompositeFiducial: point " << point << " contained by " + // << (result ? "true" : "false") << " in fiducial[" << i << "]" << std::endl; + } + + // Apply combination logic + return apply_logic(results); +} + +bool CompositeFiducial::apply_logic(const std::vector& results) const { + if (results.empty()) return true; + + if (m_logic == "and") { + // All must be true + for (bool result : results) { + if (!result) return false; + } + return true; + } + else if (m_logic == "or") { + // At least one must be true + for (bool result : results) { + if (result) return true; + } + return false; + } + else if (m_logic == "nand") { + // NOT(all true) = at least one false + for (bool result : results) { + if (!result) return true; + } + return false; + } + else if (m_logic == "nor") { + // NOT(any true) = all false + for (bool result : results) { + if (result) return false; + } + return true; + } + + return true; // Default fallback +} \ No newline at end of file diff --git a/aux/src/DetectorVolumes.cxx b/aux/src/DetectorVolumes.cxx new file mode 100644 index 000000000..fb4a4f800 --- /dev/null +++ b/aux/src/DetectorVolumes.cxx @@ -0,0 +1,441 @@ +#include "WireCellIface/IDetectorVolumes.h" +#include "WireCellIface/IFiducial.h" +#include "WireCellIface/IAnodePlane.h" +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" +#include "WireCellUtil/Exceptions.h" + +#include +// #include // only debug + +// Implementation is totally local to this comp. unit so no need for namespacing. +class DetectorVolumes; +class T0Correction; + +WIRECELL_FACTORY(DetectorVolumes, DetectorVolumes, + WireCell::IDetectorVolumes, WireCell::IConfigurable) + + +using namespace WireCell; + +class DetectorVolumes : public IDetectorVolumes, + public IFiducial, + public IConfigurable { + + public: + + DetectorVolumes() { + } + virtual ~DetectorVolumes() {} + + virtual Configuration default_configuration() const { + Configuration cfg; + + // A list of IAnodePlane "type:name" identifiers to match up to wpids. + cfg["anodes"] = Json::arrayValue; + + // Arbitrary, user-provided, application-specific metadata that is + // provided site-unseen to the clients of DetectorVolumes. + // + // The metadata as a whole must be an object with keys formed either to + // match WirePlaneId::name() or the literal string "default". + // + // The attribute values are not determined by DetectorVolumes but by the + // consuming application (the components for which the user has + // configured to use the DetectorVolumes). + // + // If an attribute key is not found that matches wpid.name() of the + // query then the attribute value "default" is returned. If both do not + // exist then a "null" JSON value is returned. + cfg["metadata"] = Json::objectValue; + + // Add grid configuration parameters with defaults + cfg["grid_size_x"] = 0.0; // If 0, will be determined automatically + cfg["grid_size_y"] = 0.0; // If 0, will be determined automatically + cfg["grid_size_z"] = 0.0; // If 0, will be determined automatically + cfg["grid_fraction"] = 0.5; // Default fraction of average bounding box size + + return cfg; + } + + // Return a new wpid with layer masked to 0 so it represents only + // anode+face. This is the value used to index m_faces. + WirePlaneId wfid(WirePlaneId wpid) const { + return WirePlaneId(WirePlaneLayer_t::kAllLayers, wpid.face(), wpid.apa()); + } + + virtual void configure(const Configuration& cfg) { + m_faces.clear(); + + // Reset spatial structures + m_overall_bb = BoundingBox(); + m_grid.clear(); + + // Store grid configuration parameters + m_config_grid_size_x = cfg.get("grid_size_x", 0.0).asDouble(); + m_config_grid_size_y = cfg.get("grid_size_y", 0.0).asDouble(); + m_config_grid_size_z = cfg.get("grid_size_z", 0.0).asDouble(); + m_grid_fraction = cfg.get("grid_fraction", 0.5).asDouble(); + + for (const auto& janode : cfg["anodes"]) { + const std::string anode_tn = janode.asString(); + auto anode = Factory::find_tn(anode_tn); + + for (auto iface : anode->faces()) { + if (! iface) continue; + + auto planes = iface->planes(); + auto wpid = wfid(planes[0]->planeid()); + if (!wpid.valid()) { + raise("got bogus wpid from anode %s", anode_tn); + } + + // std::cerr << "DetectorVolumes face for: " << wpid << "\n"; + m_faces[wpid.ident()] = iface; + + // Update overall bounding box with this face's sensitive volume + BoundingBox face_bb = iface->sensitive(); + m_overall_bb(face_bb.bounds()); + } + } + initialize_spatial_queries(); + + m_md = cfg["metadata"]; + + Json::FastWriter fastWriter; + SPDLOG_TRACE("metadata: {}", fastWriter.write(m_md)); + } + + + // IFiducial + virtual bool contained(const Point& point) const { + return contained_by(point).valid(); + } + + + // // Rest is IDetectorVolumes + // virtual WirePlaneId contained_by(const Point& point) const { + // // This initial imp is perhaps too slow. There are two options I can + // // think of immediately: + // // + // // 1) Try to divine a way to represent the BBs on a regular grid of + // // boxes. Calculate the 3D grid coordinates of a point directly, eg + // // i=floor((x-o)/w), etc for j/y and k/z. Use ijk to look up iface to + // // do BB.inside() test. + // // + // // 2) Perhaps simpler, construct a k-d tree with BB corners. Query to + // // find closest corner to point. Associate corner back to iface and to + // // BB.inside() test. + + // for (const auto& [wpident, iface] : m_faces) { + // auto bb = iface->sensitive(); + // if (bb.inside(point)) { + // return WirePlaneId(wpident); + // } + // } + // return WirePlaneId(WirePlaneLayer_t::kUnknownLayer, -1, -1); + // } + + IAnodeFace::pointer get_face(WirePlaneId wpid) const { + wpid = wfid(wpid); + if (!wpid.valid()) { + // std::cerr << "get_face false wpid: " << wpid << std::endl; + return nullptr; + } + auto it = m_faces.find(wpid.ident()); + if (it == m_faces.end()) { + // std::cerr << "get_face no face for wpid: " << wpid << std::endl; + return nullptr; + } + return it->second; + } + + IWirePlane::pointer get_plane(WirePlaneId wpid) const { + if (! wpid.valid()) { + // std::cerr << "get_plane invalid wpid: " << wpid << std::endl; + return nullptr; + } + auto iface = get_face(wpid); + if (!iface) { + return nullptr; + } + return iface->planes()[wpid.index()]; + } + + virtual int face_dirx(WirePlaneId wpid) const { + auto iface = get_face(wpid); + if (!iface) return 0; + return iface->dirx(); + } + + virtual Vector wire_direction(WirePlaneId wpid) const { + auto iplane = get_plane(wpid); + if (! iplane) { return Vector(0,0,0); } + return iplane->pimpos()->axis(1); + } + + virtual Vector pitch_vector(WirePlaneId wpid) const { + auto iplane = get_plane(wpid); + if (! iplane) { return Vector(0,0,0); } + const auto* pimpos = iplane->pimpos(); + auto pdir = pimpos->axis(2); + auto pmag = pimpos->region_binning().binsize(); + return pmag*pdir; + } + + virtual BoundingBox inner_bounds(WirePlaneId wpid) const { + auto iface = get_face(wpid); + if (iface) { + return iface->sensitive(); + } + return BoundingBox(); + } + + /// Forward any user-provided, application specific metadata for a + /// particular wpid. + /// TODO: use wpid.ident() = 0 for overall metadata? + virtual Configuration metadata(WirePlaneId wpid) const { + const auto key = wpid.ident() == 0? "overall" : wpid.name(); + if (m_md.isNull()) { + return Json::nullValue; + } + if (! m_md.isMember(key)) { + return m_md["default"]; + } + return m_md[key]; + } + + // Initialize spatial data structures for efficient queries + virtual bool initialize_spatial_queries() { + + // Calculate grid dimensions based on overall bounding box + Point min_point = m_overall_bb.bounds().first; + Point max_point = m_overall_bb.bounds().second; + + // Store origin for grid calculations + m_grid_origin = min_point; + + // Calculate average bounding box size if auto grid sizing is requested + if (m_config_grid_size_x <= 0.0 || m_config_grid_size_y <= 0.0 || m_config_grid_size_z <= 0.0) { + Vector avg_size(0, 0, 0); + int count = 0; + for (const auto& [wpident, iface] : m_faces) { + BoundingBox face_bb = iface->sensitive(); + if (!face_bb.empty()) { + Vector size = face_bb.dimensions(); + avg_size = avg_size + size; + count++; + } + } + + if (count > 0) { + avg_size = avg_size * (1.0 / count); + // Set grid cell size to a fraction of average bounding box size + if (m_config_grid_size_x <= 0.0) { + m_grid_size_x = avg_size.x() * m_grid_fraction; + } else { + m_grid_size_x = m_config_grid_size_x; + } + + if (m_config_grid_size_y <= 0.0) { + m_grid_size_y = avg_size.y() * m_grid_fraction; + } else { + m_grid_size_y = m_config_grid_size_y; + } + + if (m_config_grid_size_z <= 0.0) { + m_grid_size_z = avg_size.z() * m_grid_fraction; + } else { + m_grid_size_z = m_config_grid_size_z; + } + } else { + // Fallback if no valid bounding boxes + m_grid_size_x = 1.0; + m_grid_size_y = 1.0; + m_grid_size_z = 1.0; + } + } else { + // Use configured sizes + m_grid_size_x = m_config_grid_size_x; + m_grid_size_y = m_config_grid_size_y; + m_grid_size_z = m_config_grid_size_z; + } + + // Ensure grid cell sizes are positive + m_grid_size_x = std::max(m_grid_size_x, 0.001*units::m); + m_grid_size_y = std::max(m_grid_size_y, 0.001*units::m); + m_grid_size_z = std::max(m_grid_size_z, 0.001*units::m); + + // Calculate number of cells in each dimension + int nx = std::ceil((max_point.x() - min_point.x()) / m_grid_size_x); + int ny = std::ceil((max_point.y() - min_point.y()) / m_grid_size_y); + int nz = std::ceil((max_point.z() - min_point.z()) / m_grid_size_z); + + // Limit grid cell count to reasonable bounds + const int min_grid_cells = 5; // Minimum useful number of cells per dimension + const int max_grid_cells = 100; // Maximum reasonable number of cells per dimension + + nx = std::max(min_grid_cells, std::min(nx, max_grid_cells)); + ny = std::max(min_grid_cells, std::min(ny, max_grid_cells)); + nz = std::max(min_grid_cells, std::min(nz, max_grid_cells)); + + // Recalculate grid cell size based on new cell counts + m_grid_size_x = (max_point.x() - min_point.x()) / nx; + m_grid_size_y = (max_point.y() - min_point.y()) / ny; + m_grid_size_z = (max_point.z() - min_point.z()) / nz; + + // Ensure at least one cell in each dimension + nx = std::max(nx, 1); + ny = std::max(ny, 1); + nz = std::max(nz, 1); + + // Resize the grid to hold all cells + m_grid.resize(nx); + for (auto& grid_x : m_grid) { + grid_x.resize(ny); + for (auto& grid_y : grid_x) { + grid_y.resize(nz); + } + } + + // Populate the grid with face IDs + for (const auto& [wpident, iface] : m_faces) { + BoundingBox face_bb = iface->sensitive(); + if (face_bb.empty()) continue; + + // Calculate grid cell indices for this face's bounding box + Point bb_min = face_bb.bounds().first; + Point bb_max = face_bb.bounds().second; + + // Add a small epsilon to ensure boundary boxes are included in all relevant cells + double epsilon = 1e-6*units::m; // Small safety margin + + int min_i = std::max(0, static_cast((bb_min.x() - min_point.x() - epsilon) / m_grid_size_x)); + int min_j = std::max(0, static_cast((bb_min.y() - min_point.y() - epsilon) / m_grid_size_y)); + int min_k = std::max(0, static_cast((bb_min.z() - min_point.z() - epsilon) / m_grid_size_z)); + + int max_i = std::min(nx - 1, static_cast((bb_max.x() - min_point.x() + epsilon) / m_grid_size_x)); + int max_j = std::min(ny - 1, static_cast((bb_max.y() - min_point.y() + epsilon) / m_grid_size_y)); + int max_k = std::min(nz - 1, static_cast((bb_max.z() - min_point.z() + epsilon) / m_grid_size_z)); + + // Add the face ID to all cells that overlap with its bounding box + for (int i = min_i; i <= max_i; ++i) { + for (int j = min_j; j <= max_j; ++j) { + for (int k = min_k; k <= max_k; ++k) { + m_grid[i][j][k].push_back(wpident); + } + } + } + } + + return true; + } + + // Optimized implementation using spatial grid + virtual WirePlaneId contained_by(const Point& point) const { + // Quick check if point is in overall volume first + if (!m_overall_bb.inside(point)) { + return WirePlaneId(WirePlaneLayer_t::kUnknownLayer, -1, -1); + } + + // Calculate grid cell indices for the point + int i = static_cast((point.x() - m_grid_origin.x()) / m_grid_size_x); + int j = static_cast((point.y() - m_grid_origin.y()) / m_grid_size_y); + int k = static_cast((point.z() - m_grid_origin.z()) / m_grid_size_z); + + // Check if indices are within grid bounds + if (i >= 0 && i < static_cast(m_grid.size()) && + j >= 0 && j < static_cast(m_grid[0].size()) && + k >= 0 && k < static_cast(m_grid[0][0].size())) { + + // Check faces in this grid cell + for (int wpident : m_grid[i][j][k]) { + auto it = m_faces.find(wpident); + if (it != m_faces.end()) { + auto bb = it->second->sensitive(); + if (bb.inside(point)) { + return WirePlaneId(wpident); + } + } + } + + // Special handling for points near cell boundaries + // Check neighboring cells if we're close to an edge + double eps = 1e-6*units::m; // Small distance from boundary to check neighbors + std::vector> neighbors; + + // Check if we're close to x boundaries + double x_frac = fmod((point.x() - m_grid_origin.x()) / m_grid_size_x, 1.0); + if (x_frac < eps || x_frac > (1.0 - eps)) { + // Add x-neighbors + if (i > 0) neighbors.push_back({i-1, j, k}); + if (i < static_cast(m_grid.size()) - 1) neighbors.push_back({i+1, j, k}); + } + + // Check if we're close to y boundaries + double y_frac = fmod((point.y() - m_grid_origin.y()) / m_grid_size_y, 1.0); + if (y_frac < eps || y_frac > (1.0 - eps)) { + // Add y-neighbors + if (j > 0) neighbors.push_back({i, j-1, k}); + if (j < static_cast(m_grid[0].size()) - 1) neighbors.push_back({i, j+1, k}); + } + + // Check if we're close to z boundaries + double z_frac = fmod((point.z() - m_grid_origin.z()) / m_grid_size_z, 1.0); + if (z_frac < eps || z_frac > (1.0 - eps)) { + // Add z-neighbors + if (k > 0) neighbors.push_back({i, j, k-1}); + if (k < static_cast(m_grid[0][0].size()) - 1) neighbors.push_back({i, j, k+1}); + } + + // Check neighboring cells + for (auto [ni, nj, nk] : neighbors) { + for (int wpident : m_grid[ni][nj][nk]) { + auto it = m_faces.find(wpident); + if (it != m_faces.end()) { + auto bb = it->second->sensitive(); + if (bb.inside(point)) { + return WirePlaneId(wpident); + } + } + } + } + } + + return WirePlaneId(WirePlaneLayer_t::kUnknownLayer, -1, -1); + } + + + virtual const std::map& wpident_faces() const { + return m_faces; + } + +private: + // Map wpid with layer=0 to its face. + std::map m_faces; + + Configuration m_md; + // Overall bounding box containing all sensitive volumes + BoundingBox m_overall_bb; + + // Grid configuration parameters + double m_config_grid_size_x; + double m_config_grid_size_y; + double m_config_grid_size_z; + double m_grid_fraction; + + // Actual grid cell sizes (may be auto-calculated) + double m_grid_size_x; + double m_grid_size_y; + double m_grid_size_z; + + // Grid origin point (minimum corner of overall bounding box) + Point m_grid_origin; + + // 3D grid of face IDs for spatial lookups + // Format: [x][y][z] -> list of wpidents that overlap this cell + std::vector>>> m_grid; +}; + + diff --git a/aux/src/EnvFiducial.cxx b/aux/src/EnvFiducial.cxx new file mode 100644 index 000000000..cd71bd188 --- /dev/null +++ b/aux/src/EnvFiducial.cxx @@ -0,0 +1,63 @@ +#include "WireCellIface/IFiducial.h" +#include "WireCellIface/IAnodePlane.h" +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" +#include "WireCellUtil/Exceptions.h" +#include "WireCellUtil/BoundingBox.h" + +#include +//#include // only debug + +// Implementation is totally local to this comp. unit so no need for namespacing. +class EnvFiducial; + +WIRECELL_FACTORY(EnvFiducial, EnvFiducial, + WireCell::IFiducial, WireCell::IConfigurable) + + +using namespace WireCell; + +/// The EnvFiducial is a simple bounding box that contains all anode sensitive +/// volumes. A point which is inside the gaps between these volumes is inside +/// this fiducial. If you want to exclude the gaps, use the IFiducial interface +/// of the DetectorVolumes component. +class EnvFiducial : public IFiducial, public IConfigurable { + + BoundingBox m_bb; + +public: + + EnvFiducial() {} + virtual ~EnvFiducial() {} + + virtual Configuration default_configuration() const { + Configuration cfg; + // A list of IAnodePlane "type:name" identifiers to match up to wpids. + cfg["anodes"] = Json::arrayValue; + return cfg; + } + + virtual void configure(const Configuration& cfg) { + const auto& janodes = cfg["anodes"]; + + if (janodes.empty()) { + raise("EnvFiducial 'anodes' list is empty, check your configuration"); + } + + m_bb = BoundingBox(); + for (const auto& janode : cfg["anodes"]) { + const std::string anode_tn = janode.asString(); + auto ianode = Factory::find_tn(anode_tn); + for (auto iface : ianode->faces()) { + m_bb(iface->sensitive().bounds()); + } + } + // std::cerr << "EnvFiducial bounds: " << m_bb.bounds() << "\n"; + } + + // IFiducial + virtual bool contained(const Point& point) const { + return m_bb.inside(point); + } +}; diff --git a/aux/src/FrameSync.cxx b/aux/src/FrameSync.cxx index 44e7c09a3..c6560a78c 100644 --- a/aux/src/FrameSync.cxx +++ b/aux/src/FrameSync.cxx @@ -72,6 +72,8 @@ void Aux::FrameSync::flush(input_queues& iqs, output_queues& oqs) // May have more behind the EOS return flush(iqs, oqs); } + + (void)nempty; // suppress unused variable warning } bool Aux::FrameSync::operator()(input_queues& iqs, output_queues& oqs) diff --git a/aux/src/LinterpFunction.cxx b/aux/src/LinterpFunction.cxx new file mode 100644 index 000000000..890a86add --- /dev/null +++ b/aux/src/LinterpFunction.cxx @@ -0,0 +1,43 @@ +#include "WireCellAux/LinterpFunction.h" +#include "WireCellUtil/NamedFactory.h" + +WIRECELL_FACTORY(LinterpFunction, WireCell::Aux::LinterpFunction, + WireCell::IScalarFunction, WireCell::IConfigurable) + +namespace WireCell::Aux { + + /** A scalar function implemented as linear interpolation on regularly spaced points. + * + * Extrapolation returns end point values. + */ + LinterpFunction::~LinterpFunction() + { + } + + void LinterpFunction::configure(const WireCell::Configuration& cfg) + { + auto values = get>(cfg, "values"); + + if (cfg["coords"].isArray()) { + auto coords = get>(cfg, "coords"); + irrterp terp; + int npts = values.size(); + for (int ind=0; ind(cfg, "start"); + auto step = get(cfg, "step"); + m_terp = linterp(values.begin(), values.end(), start, step); + } + } + + double LinterpFunction::scalar_function(double x) + { + return m_terp(x); + } + + +} diff --git a/aux/src/ParticleInfo.cxx b/aux/src/ParticleInfo.cxx new file mode 100644 index 000000000..ee3f52694 --- /dev/null +++ b/aux/src/ParticleInfo.cxx @@ -0,0 +1,277 @@ +#include "WireCellAux/ParticleInfo.h" +#include "WireCellUtil/Exceptions.h" +#include "WireCellUtil/Units.h" + +#include +#include + +using namespace WireCell; +using namespace WireCell::Aux; + +// Default constructor +ParticleInfo::ParticleInfo() + : m_pdg_code(0) + , m_mass(0.0) + , m_name("unknown") + , m_kinetic_energy(0.0) + , m_four_momentum(0.0, 0.0, 0.0, 0.0) + , m_id(0) + , m_charge(0.0) +{ +} + +// Comprehensive constructor +ParticleInfo::ParticleInfo(int pdg_code, + double mass, + const std::string& name, + double kinetic_energy, + const WireCell::Point& momentum_3vec, + int id, + double charge) + : m_pdg_code(pdg_code) + , m_mass(mass) + , m_name(name) + , m_kinetic_energy(kinetic_energy) + , m_id(id) + , m_charge(charge) +{ + // Calculate total energy from kinetic energy and mass + double total_energy = m_kinetic_energy + m_mass; + m_four_momentum.set(total_energy, momentum_3vec.x(), momentum_3vec.y(), momentum_3vec.z()); + validate_inputs(); +} + +// Constructor from 4-momentum +ParticleInfo::ParticleInfo(int pdg_code, + double mass, + const std::string& name, + const WireCell::D4Vector& four_momentum, + int id, + double charge) + : m_pdg_code(pdg_code) + , m_mass(mass) + , m_name(name) + , m_four_momentum(four_momentum) + , m_id(id) + , m_charge(charge) +{ + // Calculate kinetic energy from total energy and mass + m_kinetic_energy = m_four_momentum.e() - m_mass; + validate_inputs(); +} + +bool ParticleInfo::is_stable() const { + // Common stable particles by PDG code + static const std::set stable_pdgs = { + 11, -11, // electron, positron + 12, -12, // electron neutrino, anti-electron neutrino + 13, -13, // muon, anti-muon (long-lived) + 14, -14, // muon neutrino, anti-muon neutrino + 16, -16, // tau neutrino, anti-tau neutrino + 22, // photon + 2112, // neutron (relatively stable) + 2212, // proton + -2212 // anti-proton + }; + return stable_pdgs.find(std::abs(m_pdg_code)) != stable_pdgs.end(); +} + +// Setters with kinematic updates +void ParticleInfo::set_momentum(const WireCell::Point& momentum_3vec) { + m_four_momentum.set(m_four_momentum.e(), momentum_3vec.x(), momentum_3vec.y(), momentum_3vec.z()); + update_kinematics(); +} + +void ParticleInfo::set_four_momentum(const WireCell::D4Vector& four_momentum) { + m_four_momentum = four_momentum; + m_kinetic_energy = m_four_momentum.e() - m_mass; +} + +void ParticleInfo::set_kinetic_energy(double ke) { + m_kinetic_energy = ke; + double total_energy = m_kinetic_energy + m_mass; + + // Recalculate momentum magnitude if needed + double p_mag = std::sqrt(total_energy * total_energy - m_mass * m_mass); + + if (momentum_magnitude() > 0) { + // Scale existing momentum direction + WireCell::Point current_momentum = momentum(); + double current_p_mag = momentum_magnitude(); + double scale = p_mag / current_p_mag; + m_four_momentum.set(total_energy, + current_momentum.x() * scale, + current_momentum.y() * scale, + current_momentum.z() * scale); + } else { + // Set momentum along z-axis if no direction exists + m_four_momentum.set(total_energy, 0, 0, p_mag); + } +} + +// Private helper methods +void ParticleInfo::update_kinematics() { + double p_mag = momentum_magnitude(); + double total_energy = std::sqrt(p_mag * p_mag + m_mass * m_mass); + m_four_momentum.e(total_energy); + m_kinetic_energy = total_energy - m_mass; +} + +void ParticleInfo::validate_inputs() { + if (m_mass < 0.0) { + raise("ParticleInfo: mass cannot be negative"); + } + if (m_four_momentum.e() < m_mass) { + raise("ParticleInfo: total energy cannot be less than rest mass"); + } + if (m_kinetic_energy < 0.0) { + raise("ParticleInfo: kinetic energy cannot be negative"); + } + + // Check energy-momentum relation using D4Vector's mass calculation + double calculated_mass = m_four_momentum.mass(); + if (std::abs(calculated_mass - m_mass) > 1e-6 * m_mass) { + raise("ParticleInfo: energy-momentum relation violated"); + } +} + +// Static utility methods for PDG lookups +std::string ParticleInfo::pdg_to_name(int pdg_code) { + const auto& name_map = get_pdg_name_map(); + auto it = name_map.find(std::abs(pdg_code)); + if (it != name_map.end()) { + return pdg_code < 0 ? "anti-" + it->second : it->second; + } + return "unknown"; +} + +double ParticleInfo::pdg_to_mass(int pdg_code) { + const auto& mass_map = get_pdg_mass_map(); + auto it = mass_map.find(std::abs(pdg_code)); + return it != mass_map.end() ? it->second : 0.0; +} + +double ParticleInfo::pdg_to_charge(int pdg_code) { + const auto& charge_map = get_pdg_charge_map(); + auto it = charge_map.find(pdg_code); // Don't use abs() here as charge depends on sign + return it != charge_map.end() ? it->second : 0.0; +} + +ParticleInfo ParticleInfo::from_pdg(int pdg_code, + const WireCell::Point& momentum_3vec, + int id) { + return ParticleInfo(pdg_code, + pdg_to_mass(pdg_code), + pdg_to_name(pdg_code), + 0.0, // KE will be calculated + momentum_3vec, + id, + pdg_to_charge(pdg_code)); +} + +// Static data for PDG lookups +const std::map& ParticleInfo::get_pdg_name_map() { + static const std::map pdg_names = { + {11, "electron"}, + {12, "electron_neutrino"}, + {13, "muon"}, + {14, "muon_neutrino"}, + {15, "tau"}, + {16, "tau_neutrino"}, + {22, "photon"}, + {111, "pi0"}, + {211, "pi_plus"}, + {311, "K0"}, + {321, "K_plus"}, + {2112, "neutron"}, + {2212, "proton"}, + {3122, "lambda"}, + {3222, "sigma_plus"}, + {3112, "sigma_minus"}, + {3322, "xi_minus"}, + {3334, "omega_minus"} + }; + return pdg_names; +} + +const std::map& ParticleInfo::get_pdg_mass_map() { + static const std::map pdg_masses = { + {11, 0.511 * units::MeV}, // electron + {12, 0.0}, // electron neutrino (massless) + {13, 105.658 * units::MeV}, // muon + {14, 0.0}, // muon neutrino (massless) + {15, 1776.86 * units::MeV}, // tau + {16, 0.0}, // tau neutrino (massless) + {22, 0.0}, // photon + {111, 134.977 * units::MeV}, // pi0 + {211, 139.570 * units::MeV}, // pi_plus + {311, 497.648 * units::MeV}, // K0 + {321, 493.677 * units::MeV}, // K_plus + {2112, 939.565 * units::MeV}, // neutron + {2212, 938.272 * units::MeV}, // proton + {3122, 1115.683 * units::MeV}, // lambda + {3222, 1189.37 * units::MeV}, // sigma_plus + {3112, 1197.449 * units::MeV}, // sigma_minus + {3322, 1321.71 * units::MeV}, // xi_minus + {3334, 1672.45 * units::MeV} // omega_minus + }; + return pdg_masses; +} + +const std::map& ParticleInfo::get_pdg_charge_map() { + static const std::map pdg_charges = { + {11, -1.0}, // electron + {-11, 1.0}, // positron + {12, 0.0}, // electron neutrino + {-12, 0.0}, // anti-electron neutrino + {13, -1.0}, // muon + {-13, 1.0}, // anti-muon + {14, 0.0}, // muon neutrino + {-14, 0.0}, // anti-muon neutrino + {15, -1.0}, // tau + {-15, 1.0}, // anti-tau + {16, 0.0}, // tau neutrino + {-16, 0.0}, // anti-tau neutrino + {22, 0.0}, // photon + {111, 0.0}, // pi0 + {211, 1.0}, // pi_plus + {-211, -1.0}, // pi_minus + {311, 0.0}, // K0 + {321, 1.0}, // K_plus + {-321, -1.0}, // K_minus + {2112, 0.0}, // neutron + {2212, 1.0}, // proton + {-2212, -1.0}, // anti-proton + {3122, 0.0}, // lambda + {3222, 1.0}, // sigma_plus + {3112, -1.0}, // sigma_minus + {3322, -1.0}, // xi_minus + {3334, -1.0} // omega_minus + }; + return pdg_charges; +} + +// Helper functions for common particle types +namespace WireCell::Aux::ParticleHelpers { + + ParticleInfo electron(const WireCell::Point& momentum, int id) { + return ParticleInfo::from_pdg(11, momentum, id); + } + + ParticleInfo muon(const WireCell::Point& momentum, int charge_sign, int id) { + return ParticleInfo::from_pdg(charge_sign < 0 ? 13 : -13, momentum, id); + } + + ParticleInfo pion_charged(const WireCell::Point& momentum, int charge_sign, int id) { + return ParticleInfo::from_pdg(charge_sign > 0 ? 211 : -211, momentum, id); + } + + ParticleInfo proton(const WireCell::Point& momentum, int id) { + return ParticleInfo::from_pdg(2212, momentum, id); + } + + ParticleInfo photon(const WireCell::Point& momentum, int id) { + return ParticleInfo::from_pdg(22, momentum, id); + } + +} \ No newline at end of file diff --git a/aux/src/PolyFiducial.cxx b/aux/src/PolyFiducial.cxx new file mode 100644 index 000000000..c6670d62a --- /dev/null +++ b/aux/src/PolyFiducial.cxx @@ -0,0 +1,146 @@ +/** An envelope modeled with polygons stacked along a given axis. + + The envelope is described a list of polygonal slabs. + + Each slab is defined by: + + - two coordinate values along the axis giving the span of the slab. + + - a set of 2D points (the polygon) in the plane orthogonal to the axis. + + The 2D points are ordered according to the right hand rule. If axis is the + X axis then the points are ordered (y,z). If axis is the Y axis (z,x). If + axis is the Z axis then (x,y). + + Note, slabs are checked for point containment in the order that they are + provided. A point is contained in the envelope as soon as the first slab is + found to contain the point. + + + This corresponds to "ToyFiducial" from WC prototype. +*/ + +#include "WireCellIface/IFiducial.h" +#include "WireCellIface/IAnodePlane.h" +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" +#include "WireCellUtil/Exceptions.h" +#include "WireCellUtil/BoundingBox.h" + +#include +// #include // only debug + +// Implementation is totally local to this comp. unit so no need for namespacing. +class PolyFiducial; + +WIRECELL_FACTORY(PolyFiducial, PolyFiducial, + WireCell::IFiducial, WireCell::IConfigurable) + + +using namespace WireCell; + +// "ray casting algorithm" aka "pnpoly" +static bool is_inside(double x, double y, const std::vector& cx, const std::vector& cy) { + bool inside = false; + const size_t n = cx.size(); + for (size_t i=0, j=n-1; i y) != (cy[j] > y)) && + // Calculate x-coordinate of intersection with the ray + (x < (cx[j] - cx[i]) * (y - cy[i]) / (cy[j] - cy[i]) + cx[i])) { + inside = !inside; + } + } + return inside; +} + + +struct PolySlab { + BoundingBox bb; // bounding box of 3D points of the slab + std::vector a,b; // 2D points. + + bool inside(const Point& p, int axis=0) const { + if (! bb.inside(p)) return false; + + const int i = axis; // along axis + const int j = (i+1)%3; // along transverse coord a. + const int k = (i+2)%3; // along transverse coord b. + + return is_inside(p[j], p[k], a, b); + } + +}; + +class PolyFiducial : public IFiducial, public IConfigurable { + + int m_axis{0}; + BoundingBox m_bb; + std::vector m_slabs; + +public: + + PolyFiducial() {} + virtual ~PolyFiducial() {} + + virtual Configuration default_configuration() const { + Configuration cfg; + // The axis of the slabs. x=0, y=1, z=2 + cfg["axis"] = 0; + + // Each slab is an object like: + // {min:0, max:1, corners:[ [a1,b1], [a2,b2], ...]} + // min/max give coordinate values along the axis. + // corners is array of pairs of coordinates in the transverse plane. + cfg["slabs"] = Json::arrayValue; + return cfg; + } + + virtual void configure(const Configuration& cfg) { + m_axis = get(cfg, "axis", m_axis); + + const int i = m_axis; // along axis + const int j = (i+1)%3; // along transverse coord a. + const int k = (i+2)%3; // along transverse coord b. + + m_slabs.clear(); + for (const auto& jslab : cfg["slabs"]) { + PolySlab slab; + + Point p1,p2; + p1[i] = get(jslab, "min",0); + p2[i] = get(jslab, "max",0); + if (p1[i] == p2[i]) { + raise("poly slab has no width"); + } + + const auto& jcorners = jslab["corners"]; + if (jcorners.size() < 3) { + raise("poly slab must have at least 3 corners"); + } + + for (const auto& jab : jcorners) { + p1[j] = p2[j] = jab[0].asDouble(); + p1[k] = p2[k] = jab[1].asDouble(); + + slab.bb(p1); + slab.bb(p2); + m_bb(slab.bb.bounds()); + slab.a.push_back(p1[j]); + slab.b.push_back(p1[k]); + } + m_slabs.push_back(slab); + } + } + + + // IFiducial + virtual bool contained(const Point& point) const { + if (! m_bb.inside(point)) return false; + + for (const auto& slab : m_slabs) { + if (slab.inside(point, m_axis)) return true; + } + return false; + } +}; diff --git a/aux/src/SamplingHelpers.cxx b/aux/src/SamplingHelpers.cxx index 5c081a6dd..361782a5a 100644 --- a/aux/src/SamplingHelpers.cxx +++ b/aux/src/SamplingHelpers.cxx @@ -3,23 +3,92 @@ using namespace WireCell; using WireCell::PointCloud::Dataset; using WireCell::PointCloud::Array; -WireCell::PointCloud::Dataset -Aux::make_scalar_dataset(const IBlob::pointer iblob, const Point& center, - const int npoints, const double tick) + + +PointCloud::Tree::named_pointclouds_t +Aux::sample_live(const IBlobSampler::pointer& sampler, const IBlob::pointer& iblob, + const std::vector& angles, const double tick, int ident) +{ + PointCloud::Tree::named_pointclouds_t pcs; + + if (ident<0) ident = iblob->ident(); + + auto [pc3d, aux] = sampler->sample_blob(iblob, ident); + if (pc3d.size_major() == 0) { + return pcs; + } + fill_2dpcs(pc3d, angles); + + Dataset scalar; + fill_scalar_blob(scalar, *iblob, tick); + fill_scalar_aux(scalar, aux); + fill_scalar_center(scalar, pc3d); + + pcs.emplace("scalar", std::move(scalar)); + pcs.emplace("3d", std::move(pc3d)); + + return pcs; +} + +PointCloud::Tree::named_pointclouds_t Aux::sample_dead(const IBlob::pointer& iblob, const double tick) +{ + PointCloud::Tree::named_pointclouds_t pcs; + Dataset scalar; + Aux::fill_scalar_blob(scalar, *iblob, tick); + Aux::fill_scalar_aux(scalar); + Aux::fill_scalar_center(scalar); + + pcs.emplace("scalar", scalar); + pcs.emplace("corner", make_corner_dataset(*iblob)); + + return pcs; +} + + +void Aux::fill_2dpcs(PointCloud::Dataset& pc, const std::vector& angles, const std::string& pattern) +{ + const size_t npoints = pc.size_major(); + if (! npoints) { + return; + } + + const auto x = pc.get("x")->elements(); + const auto y = pc.get("y")->elements(); + const auto z = pc.get("z")->elements(); + + std::vector x2d(npoints); + std::vector y2d(npoints); + + for (size_t ind=0; indvalue()})); - ds.add("face",Array({(int)iblob->face()->which()})); - ds.add("center_x", Array({(double)center.x()})); - ds.add("center_y", Array({(double)center.y()})); - ds.add("center_z", Array({(double)center.z()})); - ds.add("npoints", Array({(int)npoints})); - const auto& islice = iblob->slice(); + scalar.add("charge", Array({(double)iblob.value()})); + WirePlaneId wpid(kAllLayers, iblob.face()->which(), iblob.face()->anode()); + scalar.add("wpid",Array({(int)wpid.ident()})); + + const auto& islice = iblob.slice(); // fixme: possible risk of roundoff error + truncation makes _min == _max? - ds.add("slice_index_min", Array({(int)(islice->start()/tick)})); // unit: tick - ds.add("slice_index_max", Array({(int)((islice->start()+islice->span())/tick)})); - const auto& shape = iblob->shape(); + scalar.add("slice_index_min", Array({(int)(islice->start()/tick)})); // unit: tick + scalar.add("slice_index_max", Array({(int)((islice->start()+islice->span())/tick)})); + const auto& shape = iblob.shape(); const auto& strips = shape.strips(); /// ASSUMPTION: is this always true? std::unordered_map layer_names = { @@ -31,41 +100,62 @@ Aux::make_scalar_dataset(const IBlob::pointer iblob, const Point& center, if(layer_names.find(strip.layer) == layer_names.end()) { continue; } - ds.add(layer_names[strip.layer]+"_wire_index_min", Array({(int)strip.bounds.first})); - ds.add(layer_names[strip.layer]+"_wire_index_max", Array({(int)strip.bounds.second})); + scalar.add(layer_names[strip.layer]+"_wire_index_min", Array({(int)strip.bounds.first})); + scalar.add(layer_names[strip.layer]+"_wire_index_max", Array({(int)strip.bounds.second})); } - return ds; } - -WireCell::PointCloud::Dataset Aux::make2dds (const Dataset& ds3d, const double angle) { - Dataset ds; - const auto& x = ds3d.get("x")->elements(); - const auto& y = ds3d.get("y")->elements(); - const auto& z = ds3d.get("z")->elements(); - std::vector x2d(x.size()); - std::vector y2d(y.size()); - for (size_t ind=0; ind("empty 'aux' PC. you probably fell victim to issue #426"); + } + const std::vector auxnames = { + "max_wire_interval", "min_wire_interval", "max_wire_type", "min_wire_type", + }; + for (const auto& auxname : auxnames) { + scalar.add(auxname, *aux.get(auxname)); + } +} +void Aux::fill_scalar_aux(PointCloud::Dataset& scalar) +{ + const std::vector auxnames = { + "max_wire_interval", "min_wire_interval", "max_wire_type", "min_wire_type", + }; + for (const auto& auxname : auxnames) { + scalar.add(auxname, Array({0})); } - ds.add("x", Array(x2d)); - ds.add("y", Array(y2d)); - return ds; } // Calculate the average position of a point cloud tree. Point Aux::calc_blob_center(const Dataset& ds) { - const auto& arr_x = ds.get("x")->elements(); - const auto& arr_y = ds.get("y")->elements(); - const auto& arr_z = ds.get("z")->elements(); - const size_t len = arr_x.size(); + const size_t len = ds.size_major(); if(len == 0) { - raise("empty point cloud"); + raise("calc_blob_center: empty point cloud has no center"); } + const auto arr_x = ds.get("x")->elements(); + const auto arr_y = ds.get("y")->elements(); + const auto arr_z = ds.get("z")->elements(); Point ret(0,0,0); for (size_t ind=0; indshape(); + const auto& shape = iblob.shape(); const auto& crossings = shape.corners(); - const auto& anodeface = iblob->face(); + const auto& anodeface = iblob.face(); const auto& coords = anodeface->raygrid(); // ray center @@ -132,10 +222,10 @@ Dataset Aux::make_corner_dataset(const IBlob::pointer iblob) return ds; } -double Aux::time2drift(const IAnodeFace::pointer anodeface, const double time_offset, const double drift_speed, double time) { +double Aux::time2drift(IAnodeFace::pointer anodeface, const double time_offset, const double drift_speed, double time) { // std::cout << "time2drift: " << time << " " << time_offset << " " << drift_speed << std::endl; - const Pimpos* colpimpos = anodeface->planes()[2]->pimpos(); - double xsign = colpimpos->axis(0)[0]; + // const Pimpos* colpimpos = anodeface->planes()[2]->pimpos(); + double xsign = anodeface->dirx(); double xorig = anodeface->planes()[2]->wires().front()->center().x(); const double drift = (time + time_offset)*drift_speed; /// TODO: how to determine xsign? @@ -145,9 +235,11 @@ double Aux::time2drift(const IAnodeFace::pointer anodeface, const double time_of template using mapfp_t = std::unordered_map>; -void Aux::add_ctpc(PointCloud::Tree::Points::node_t& root, const WireCell::IBlobSet::vector ibsv, - const IAnodeFace::pointer iface, const int face, const double time_offset, const double drift_speed, - const double tick, const double dead_threshold) +void Aux::add_ctpc( + PointCloud::Tree::Points::node_t& root, + const WireCell::ISlice::vector& slices, + IAnodeFace::pointer iface, const int face, const double time_offset, const double drift_speed, + const double tick, const double dead_threshold) { mapfp_t> ds_x, ds_y, ds_charge, ds_charge_err; mapfp_t> ds_cident, ds_wind, ds_slice_index; @@ -166,51 +258,51 @@ void Aux::add_ctpc(PointCloud::Tree::Points::node_t& root, const WireCell::IBlob } size_t nslices = 0; - for (const auto& ibs : ibsv) { - const auto& slice = ibs->slice(); - { - // auto& slice = std::get(cgnode.ptr); - ++nslices; - const auto slice_index = slice->start()/tick; - const auto& activity = slice->activity(); - for (const auto& [ichan, charge] : activity) { - if(charge.uncertainty() > dead_threshold) { - // if (charge.value() >0) - // std::cout << "Test: dead_threshold " << dead_threshold << " charge.uncertainty() " << charge.uncertainty() << " " << charge.value() << " " << ichan << " " << slice_index << std::endl; - continue; - } - const auto& cident = ichan->ident(); - const auto& wires = ichan->wires(); - for (const auto& wire : wires) { - const auto& wind = wire->index(); - const auto& plane = wire->planeid().index(); - // log->debug("slice {} chan {} charge {} wind {} plane {} face {}", slice_index, cident, charge, wind, plane, wire->planeid().face()); - // const auto& face = wire->planeid().face(); - // const auto& face = m_face; - /// FIXME: is this the way to get face? + for (auto slice : slices) { + // auto& slice = std::get(cgnode.ptr); + ++nslices; + const auto slice_index = slice->start()/tick; + const auto& activity = slice->activity(); + for (const auto& [ichan, charge] : activity) { + if(charge.uncertainty() > dead_threshold) { + // if (charge.value() >0) + // std::cout << "Test: dead_threshold " << dead_threshold << " charge.uncertainty() " << charge.uncertainty() << " " << charge.value() << " " << ichan << " " << slice_index << std::endl; + continue; + } + const auto& cident = ichan->ident(); + const auto& wires = ichan->wires(); + for (const auto& wire : wires) { + const auto& wind = wire->index(); + const auto& plane = wire->planeid().index(); + // log->debug("slice {} chan {} charge {} wind {} plane {} face {}", slice_index, cident, charge, wind, plane, wire->planeid().face()); + // const auto& face = wire->planeid().face(); + // const auto& face = m_face; + /// FIXME: is this the way to get face? // std::cout << "Test: " << slice->start() << " " << slice_index << " " << tp.time_offset << " " << tp.drift_speed << std::endl; - const auto& x = time2drift(iface, time_offset, drift_speed, slice->start()); - const double y = pitch_mags.at(face).at(plane)* (wind +0.5) + proj_centers.at(face).at(plane); // the additon of 0.5 is to match with the convetion of WCP (X. Q.) - - // if (abs(wind-815) < 2 or abs(wind-1235) < 2 or abs(wind-1378) < 2) { - // log->debug("slice {} chan {} charge {} wind {} plane {} face {} x {} y {}", slice_index, cident, charge, - // wind, plane, face, x, y); - // } - ds_x[face][plane].push_back(x); - ds_y[face][plane].push_back(y); - ds_charge[face][plane].push_back(charge.value()); - ds_charge_err[face][plane].push_back(charge.uncertainty()); - ds_cident[face][plane].push_back(cident); - ds_wind[face][plane].push_back(wind); - ds_slice_index[face][plane].push_back(slice_index); - } + const auto& x = time2drift(iface, time_offset, drift_speed, slice->start()); + const double y = pitch_mags.at(face).at(plane)* (wind +0.5) + proj_centers.at(face).at(plane); // the additon of 0.5 is to match with the convetion of WCP (X. Q.) + + // if (abs(wind-815) < 2 or abs(wind-1235) < 2 or abs(wind-1378) < 2) { + // log->debug("slice {} chan {} charge {} wind {} plane {} face {} x {} y {}", slice_index, cident, charge, + // wind, plane, face, x, y); + // } + ds_x[face][plane].push_back(x); + ds_y[face][plane].push_back(y); + ds_charge[face][plane].push_back(charge.value()); + ds_charge_err[face][plane].push_back(charge.uncertainty()); + ds_cident[face][plane].push_back(cident); + ds_wind[face][plane].push_back(wind); + ds_slice_index[face][plane].push_back(slice_index); } - // log->debug("ds_x.size() {}", ds_x.size()); } - } + // log->debug("ds_x.size() {}", ds_x.size()); + } // loop over slices + // log->debug("got {} slices", nslices); + std::vector plane_names = {"U", "V", "W"}; + for (const auto& [face, planes] : ds_x) { for (const auto& [plane, x] : planes) { @@ -231,7 +323,7 @@ void Aux::add_ctpc(PointCloud::Tree::Points::node_t& root, const WireCell::IBlob ds.add("cident", Array(ds_cident[face][plane])); ds.add("wind", Array(ds_wind[face][plane])); ds.add("slice_index", Array(ds_slice_index[face][plane])); - const std::string ds_name = String::format("ctpc_f%dp%d", face, plane); + const std::string ds_name = String::format("ctpc_a%df%dp%d", 0, face, plane_names[plane]); // root->insert(Points(named_pointclouds_t{{ds_name, std::move(ds)}})); root.value.local_pcs().emplace(ds_name, ds); // log->debug("added point cloud {} with {} points", ds_name, x.size()); @@ -240,10 +332,14 @@ void Aux::add_ctpc(PointCloud::Tree::Points::node_t& root, const WireCell::IBlob // for (const auto& [name, pc] : root->value.local_pcs()) { // log->debug("contains point cloud {} with {} points", name, pc.get("x")->size_major()); // } + + (void)nslices; // unused, but useful for debugging } -void Aux::add_dead_winds(PointCloud::Tree::Points::node_t& root, const IBlobSet::vector ibsv, - const IAnodeFace::pointer iface, const int face , +void Aux::add_dead_winds( + PointCloud::Tree::Points::node_t& root, + const ISlice::vector& slices, + IAnodeFace::pointer iface, const int face , const double time_offset , const double drift_speed , const double tick, const double dead_threshold){ @@ -255,32 +351,33 @@ void Aux::add_dead_winds(PointCloud::Tree::Points::node_t& root, const IBlobSet: mapfp_t> xbegs, xends; mapfp_t> winds; - for (const auto& ibs : ibsv) { - const auto& slice = ibs->slice(); - { - // const auto& slice_index = slice->start()/tick; - const auto& activity = slice->activity(); - for (const auto& [ichan, charge] : activity) { - if(charge.uncertainty() < dead_threshold) continue; - const auto& wires = ichan->wires(); - for (const auto& wire : wires) { - const auto& wind = wire->index(); - const auto& plane = wire->planeid().index(); - // const auto& x = time2drift(iface, time_offset, drift_speed, slice->start()); - const auto& xbeg = time2drift(iface, time_offset, drift_speed, slice->start()); - const auto& xend = time2drift(iface, time_offset, drift_speed, slice->start() + slice->span()); - - auto& dead_winds = map_dead_winds[std::make_pair(face, plane)]; - if (dead_winds.find(wind) == dead_winds.end()) { - dead_winds[wind] = {std::min(xbeg,xend)-0.1*units::cm, std::max(xbeg,xend) + 0.1*units::cm}; - } else { - const auto& [xbeg_now, xend_now] = dead_winds[wind]; - dead_winds[wind] = {std::min(std::min(xbeg,xend)-0.1*units::cm, xbeg_now), std::max(std::max(xbeg,xend) + 0.1*units::cm, xend_now)}; - } - faces.insert(face); - planes.insert(plane); + const int apa = iface->anode(); + + for (auto slice : slices) { + // const auto& slice_index = slice->start()/tick; + const auto& activity = slice->activity(); + for (const auto& [ichan, charge] : activity) { + // std::cout << "Test: dead_threshold " << dead_threshold << " charge.uncertainty() " << charge.uncertainty() << " " << charge.value() << " " << ichan->ident() << " " << slice->start() << std::endl; + if(charge.uncertainty() < dead_threshold) continue; + const auto& wires = ichan->wires(); + for (const auto& wire : wires) { + const auto& wind = wire->index(); + const auto& plane = wire->planeid().index(); + // const auto& x = time2drift(iface, time_offset, drift_speed, slice->start()); + const auto& xbeg = time2drift(iface, time_offset, drift_speed, slice->start()); + const auto& xend = time2drift(iface, time_offset, drift_speed, slice->start() + slice->span()); + + auto& dead_winds = map_dead_winds[std::make_pair(face, plane)]; + if (dead_winds.find(wind) == dead_winds.end()) { + dead_winds[wind] = {std::min(xbeg,xend)-0.1*units::cm, std::max(xbeg,xend) + 0.1*units::cm}; + } else { + const auto& [xbeg_now, xend_now] = dead_winds[wind]; + dead_winds[wind] = {std::min(std::min(xbeg,xend)-0.1*units::cm, xbeg_now), std::max(std::max(xbeg,xend) + 0.1*units::cm, xend_now)}; } + faces.insert(face); + planes.insert(plane); + } } } @@ -294,13 +391,15 @@ void Aux::add_dead_winds(PointCloud::Tree::Points::node_t& root, const IBlobSet: } } } + std::vector plane_names = {"U", "V", "W"}; + for (const auto& face : faces) { for (const auto& plane : planes) { Dataset ds; ds.add("xbeg", Array(xbegs[face][plane])); ds.add("xend", Array(xends[face][plane])); ds.add("wind", Array(winds[face][plane])); - const std::string ds_name = String::format("dead_winds_f%dp%d", face, plane); + const std::string ds_name = String::format("dead_winds_a%df%dp%d", apa, face, plane_names[plane]); root.value.local_pcs().emplace(ds_name, ds); // log->debug("added point cloud {} with {} points", ds_name, xbeg.size()); } diff --git a/aux/src/TensorDMpointtree.cxx b/aux/src/TensorDMpointtree.cxx index e167ebbdd..141d574fe 100644 --- a/aux/src/TensorDMpointtree.cxx +++ b/aux/src/TensorDMpointtree.cxx @@ -124,6 +124,10 @@ std::unique_ptr WireCell::Aux::TensorDM::as_pctree(const ITensor::vector& tens, const std::string& datapath) { + /// TODO: FIXME: this works, but need to understand why + if (tens.size() <= 2) { + return std::make_unique(); + } TensorIndex ti(tens); return as_pctree(ti, datapath); } @@ -161,7 +165,10 @@ WireCell::Aux::TensorDM::as_pctree(const TensorIndex& ti, // Loop cross product of (PC name,node) for (const auto& [pcname, pcds] : pointclouds) { - + if (!lpcmaps_ds.get(pcname)) { + /// TODO: @BV what does this mean? + continue; + } // Get local PC map vector as a span on DS array. auto lpcmap = lpcmaps_ds.get(pcname)->elements(); diff --git a/aux/test/doctest_fiducials.cxx b/aux/test/doctest_fiducials.cxx new file mode 100644 index 000000000..953e679ce --- /dev/null +++ b/aux/test/doctest_fiducials.cxx @@ -0,0 +1,82 @@ +#include "WireCellUtil/doctest.h" +#include "WireCellUtil/Logging.h" +#include "WireCellUtil/PluginManager.h" +#include "WireCellUtil/NamedFactory.h" +#include "WireCellUtil/Point.h" +#include "WireCellUtil/Persist.h" +#include "WireCellIface/IFiducial.h" +#include "WireCellIface/IConfigurable.h" +using namespace WireCell; +using spdlog::debug; + +TEST_CASE("aux boxfiducial") { + PluginManager& pm = PluginManager::instance(); + pm.add("WireCellAux"); + + { + auto icfg = Factory::lookup("BoxFiducial"); + auto cfg = icfg->default_configuration(); + cfg["bounds"]["tail"]["x"] = 0; + cfg["bounds"]["tail"]["y"] = 0; + cfg["bounds"]["tail"]["z"] = 0; + cfg["bounds"]["head"]["x"] = 10; + cfg["bounds"]["head"]["y"] = 10; + cfg["bounds"]["head"]["z"] = 10; + icfg->configure(cfg); + } + + { + auto fv = Factory::lookup_tn("BoxFiducial"); + + CHECK(fv->contained(Point(0,0,0))); + CHECK(fv->contained(Point(10,10,10))); + CHECK(! fv->contained(Point(10.1,10.1,10.1))); + CHECK(! fv->contained(Point(-0.1,-0.1,-0.1))); + } + +} + + +TEST_CASE("aux polyfiducial") { + PluginManager& pm = PluginManager::instance(); + pm.add("WireCellAux"); + + const std::string jsonnet_config = R"( +[ + { // each slab + local npts = n + 3, + local ang = (2*3.1415)/npts, + + min: n, + max: n+1, + corners:[ + [std.cos(ang*i), std.sin(ang*i)] + for i in std.range(0, npts-1)] + } for n in std.range(0,5) +] +)"; + + { + auto icfg = Factory::lookup("PolyFiducial"); + auto cfg = icfg->default_configuration(); + cfg["slabs"] = Persist::loads(jsonnet_config); + icfg->configure(cfg); + } + + { + auto fv = Factory::lookup_tn("PolyFiducial"); + + CHECK(! fv->contained(Point(-1,0,0))); // before on axis + CHECK( fv->contained(Point( 0,0,0))); // low edge on axis + CHECK( fv->contained(Point( 6,0,0))); // high edge on axis + CHECK(! fv->contained(Point(6.1,0,0))); // after on axis + + // first slab is a triangle with a corner at (y=1,z=0) + CHECK( fv->contained(Point(0.5, 0.9999, 0)) ); + CHECK(! fv->contained(Point(0.5, 1.0001, 0)) ); + CHECK(! fv->contained(Point(0.5, 1.0, 0.1)) ); + + } + +} + diff --git a/aux/test/doctest_linterpfunction.cxx b/aux/test/doctest_linterpfunction.cxx new file mode 100644 index 000000000..efa0be621 --- /dev/null +++ b/aux/test/doctest_linterpfunction.cxx @@ -0,0 +1,56 @@ +#include "WireCellUtil/doctest.h" +#include "WireCellUtil/Logging.h" +#include "WireCellUtil/Configuration.h" +#include "WireCellAux/LinterpFunction.h" + +using namespace WireCell; +using namespace WireCell::Aux; + +using spdlog::debug; + +TEST_CASE("aux linterp function") { + + Configuration reg_cfg; + reg_cfg["values"][0] = 0.0; // 0 = x + reg_cfg["values"][1] = 2.0; // 1 + reg_cfg["values"][2] = 3.0; // 2 + reg_cfg["values"][3] = -1.0; // 3 + Configuration irr_cfg = reg_cfg; + irr_cfg["coords"][0] = 0.0; // = x + irr_cfg["coords"][1] = 10.0; + irr_cfg["coords"][2] = 11.0; + irr_cfg["coords"][3] = 20.; + + reg_cfg["start"] = 0.0; + reg_cfg["step"] = 1.0; + + LinterpFunction reg, irr; + + { + auto tmp = reg.default_configuration(); + reg.configure(update(tmp, reg_cfg)); + } + { + auto tmp = irr.default_configuration(); + irr.configure(update(tmp, irr_cfg)); + } + + // first point exact + REQUIRE(0.0 == reg.scalar_function(0.0)); + REQUIRE(0.0 == irr.scalar_function(0.0)); + + // second point exact + REQUIRE(2.0 == reg.scalar_function(1.0)); + REQUIRE(2.0 == irr.scalar_function(10.0)); + + // extrapolation + REQUIRE(0.0 == reg.scalar_function(-1.0)); + REQUIRE(0.0 == irr.scalar_function(-1.0)); + REQUIRE(-1.0 == reg.scalar_function(+4.0)); + REQUIRE(-1.0 == irr.scalar_function(30.0)); + + // midway interpolation + REQUIRE(1.0 == reg.scalar_function(0.5)); + REQUIRE(1.0 == irr.scalar_function(5.0)); + +} diff --git a/cfg/pgrapher/common/clus.jsonnet b/cfg/pgrapher/common/clus.jsonnet new file mode 100644 index 000000000..b466c51fc --- /dev/null +++ b/cfg/pgrapher/common/clus.jsonnet @@ -0,0 +1,364 @@ +// This file provides some helper functions to configure components from WCT +// "clus/" sub-package. In particular, to configure MultiAlgBlobClustering +// (MABC) and its pipeline of "clustering method" components. + +local wc = import "wirecell.jsonnet"; + +{ + /// Create a "factory" object for creating Clustering* "method" components + /// (eg ClusteringLiveDead). + /// + /// The clustering_methods() function takes a number of "general" arguments + /// with default values. Some are common to all Clustering* method + /// components (like "prefix" to which individual object names are appended) + /// while others may be ignored by some Clustering* method components. + /// + /// This function returns an object with a number of elements, each + /// providing a function to construct a specific Clustering* component. + /// Each of these constructor functions accept the set of "specific" + /// arguments with default values that are relevant to the particular + /// Clustering* component. The "specific" arguments are named to match the + /// names of the configuration parameters that they pass. Eg, + /// "dead_live_overlap_offset". + /// + /// Users may override either the general or specific default values as + /// needed for their particular needs. + /// + /// Users note: The factory object keywords are generally matching the name + /// of their implementation (.cxx) source file name. This generally (with + /// some exceptions) the class name with "Clustering" part removed and the + /// remaining name converted from CamelCase to snake_case. + /// + /// Developers note: As new Clustering* components are developed, developers + /// should extend the factory object. + /// + /// Example use: + /// + /// local cm = clus.cluster_methods("all", dv, pcts); + /// local cm_objs = [ + /// cm.live_dead(), // "ClusteringLiveDead:all", defaults okay + /// cm.regular("one"), // "ClusteringRegular:allone", must make names unique + /// // "ClusteringRegular:alltwo", because we have a second one: + /// cm.regular("two", length_cut=30*wc.cm, flag_enable_extend=true), + /// // Use generic() if config support not yet added. + /// // This makes a tn of "ClusterNewType:allnew". + /// ]; + /// local mabc = g.pnode({ + /// type: "MultiAlgBlobClustering", + /// data: { + /// clustering_methods = wc.tns(cm_objs); + /// ... + /// }, + /// }, nin=1, nout=1, uses=cm_objs + [...]); // include objects that MABC "uses" directly + + clustering_methods(prefix="", detector_volumes=null, pc_transforms=null, fiducial=null, + pc_name="3d", coords=["x", "y", "z"] ) :: { + // abbreviations covering commonalities across different Clustering* method components. + local dv_tn = wc.tn(detector_volumes), + local dv_cfg = {detector_volumes: dv_tn}, + local fiducial_tn = wc.tn(fiducial), + local fiducial_cfg = { fiducial: fiducial_tn }, + local pcts_tn = wc.tn(pc_transforms), + local pcts_cfg = {pc_transforms: pcts_tn}, + local scope_cfg = {pc_name: pc_name, coords: coords}, + + // Use "parent" inside of a function to call sibling functions. + local parent = self, + + tagger_flag_transfer(name="", enable_debug=false) :: { + type: "ClusteringTaggerFlagTransfer", + name: prefix+name, + data: { + enable_debug: enable_debug, + }, + }, + + clustering_recovering_bundle(name="") :: { + type: "ClusteringRecoveringBundle", + name: prefix + name, + data: { + grouping: "live", // Which grouping to process + array_name: "isolated", // Array name for pcarray lookup + pcarray_name: "perblob", // PCArray name for blob separation + }, + }, + + tagger_check_stm(name="", trackfitting_config_file="", particle_dataset="", recombination_model="") :: { + type: "TaggerCheckSTM", + name: prefix + name, + data: { + grouping: "live", // Which grouping to process + trackfitting_config_file: trackfitting_config_file, + particle_dataset: particle_dataset, + recombination_model: recombination_model, + } + dv_cfg + pcts_cfg + }, + + pointed(name="", groupings=["live"]) :: { + type: "ClusteringPointed", + name: prefix+name, + data: { + groupings: groupings, + }, + }, + + test(name="") :: { + type: "ClusteringTest", + name: prefix+name, + data: dv_cfg + pcts_cfg, + uses: [detector_volumes, pc_transforms], + }, + + ctpointcloud(name="") :: { + type: "ClusteringCTPointcloud", + name: prefix+name, + data: dv_cfg + pcts_cfg, + uses: [detector_volumes, pc_transforms], + }, + + live_dead(name="", dead_live_overlap_offset=2) :: { + type: "ClusteringLiveDead", + name: prefix+name, + data: { + dead_live_overlap_offset: dead_live_overlap_offset, + } + dv_cfg + scope_cfg, + uses: [detector_volumes], + }, + + extend(name="", flag=0, length_cut=150*wc.cm, num_try=0, length_2_cut=3*wc.cm, num_dead_try=3) :: { + type: "ClusteringExtend", + name: prefix+name, + data: { + flag: flag, + length_cut: length_cut, + num_try: num_try, + length_2_cut: length_2_cut, + num_dead_try: num_dead_try, + } + dv_cfg + scope_cfg, + uses: [detector_volumes], + }, + + + regular(name="", length_cut=45*wc.cm, flag_enable_extend=true) :: { + type: "ClusteringRegular", + name: prefix+name, + data: { + length_cut: length_cut, + flag_enable_extend: flag_enable_extend, + } + dv_cfg + scope_cfg, + uses: [detector_volumes], + }, + + parallel_prolong(name="", length_cut=35*wc.cm) :: { + type: "ClusteringParallelProlong", + name: prefix+name, + data: { + length_cut: length_cut, + } + dv_cfg + scope_cfg, + uses: [detector_volumes], + }, + + close(name="", length_cut=1*wc.cm) :: { + type: "ClusteringClose", + name: prefix+name, + data: { + length_cut: length_cut, + } + scope_cfg, + }, + + extend_loop(name="", num_try=0) :: { + type: "ClusteringExtendLoop", + name: prefix+name, + data: { + num_try: num_try, + } + dv_cfg + scope_cfg, + uses: [detector_volumes], + }, + + separate(name="", use_ctpc=true) :: { + type: "ClusteringSeparate", + name: prefix+name, + data: { + use_ctpc: use_ctpc, + } + dv_cfg + pcts_cfg + scope_cfg, + uses: [detector_volumes, pc_transforms], + }, + + connect1(name="") :: { + type: "ClusteringConnect1", + name: prefix+name, + data: dv_cfg + scope_cfg, + uses: [detector_volumes], + }, + + deghost(name="", use_ctpc=true, length_cut=0) :: { + type: "ClusteringDeghost", + name: prefix+name, + data: { + use_ctpc: use_ctpc, + length_cut: length_cut, + } + dv_cfg + pcts_cfg + scope_cfg, + uses: [detector_volumes, pc_transforms], + }, + + isolated(name="") :: { + type: "ClusteringIsolated", + name: prefix+name, + data: dv_cfg + scope_cfg, + }, + + examine_bundles(name="") :: { + type: "ClusteringExamineBundles", + name: prefix+name, + data: dv_cfg + pcts_cfg + scope_cfg, + uses: [detector_volumes, pc_transforms], + }, + + examine_x_boundary(name="") :: { + type: "ClusteringExamineXBoundary", + name: prefix+name, + data: dv_cfg + scope_cfg, + uses: [detector_volumes], + }, + + protect_overclustering(name="") :: { + type: "ClusteringProtectOverclustering", + name: prefix+name, + data: dv_cfg + pcts_cfg + scope_cfg, + uses: [detector_volumes, pc_transforms], + }, + + neutrino(name="", num_try=1) :: { + type: "ClusteringNeutrino", + name: prefix+name, + data: { + num_try: num_try, + } + dv_cfg + scope_cfg, + uses: [detector_volumes], + }, + + switch_scope(name="", correction_name="T0Correction") :: { + type: "ClusteringSwitchScope", + name: prefix+name, + data: { + correction_name: correction_name, + } + pcts_cfg + scope_cfg, + uses: [pc_transforms], + }, + + // This configures RetileCluster, a per-cluster helper for + // ClusteringRetile as well as others. Use the sampler() function to + // provide properly formed elements to the array-of-object argument + // "samplers". + retiler(name="", anodes=[], samplers=[], cut_time_low=-1e9, cut_time_high=1e9) :: { + local sampler_objs = [s.sobj for s in samplers], + local sampler_cfgs = [{name:wc.tn(s.sobj), apa:s.apa, face:s.face} for s in samplers], + type: "RetileCluster", + name: prefix+name, + data: { + cut_time_low: cut_time_low, + cut_time_high: cut_time_high, + anodes: wc.tns(anodes), + samplers: sampler_cfgs, + } + dv_cfg + pcts_cfg, + uses: [detector_volumes, pc_transforms]+anodes+sampler_objs, + }, + + // Use the sampler() function to provide properly formed elements to the + // array-of-object argument "samplers". + retile(name="", retiler={}) :: { + // local sampler_objs = [s.sobj for s in samplers], + // local sampler_cfgs = [{name:wc.tn(s.sobj), apa:s.apa, face:s.face} for s in samplers], + // local rc = parent.retiler(name, anodes, samplers, cut_time_low, cut_time_high), + type: "ClusteringRetile", + name: prefix+name, + data: { + retiler: wc.tn(retiler), + } + scope_cfg, + uses: [retiler], + }, + + improve_cluster_1(name="", anodes=[], samplers=[]) :: { + local sampler_objs = [s.sobj for s in samplers], + local sampler_cfgs = [{name:wc.tn(s.sobj), apa:s.apa, face:s.face} for s in samplers], + type: "ImproveCluster_1", + name: prefix+name, + data: { + anodes: wc.tns(anodes), + samplers: sampler_cfgs, + } + dv_cfg + pcts_cfg, + uses: [detector_volumes, pc_transforms]+anodes+sampler_objs, + }, + + // This configures ImproveCluster_2, which inherits from ImproveCluster_1 + // and adds advanced Steiner tree improvements. + improve_cluster_2(name="", anodes=[], samplers=[], verbose=false) :: { + local sampler_objs = [s.sobj for s in samplers], + local sampler_cfgs = [{name:wc.tn(s.sobj), apa:s.apa, face:s.face} for s in samplers], + type: "ImproveCluster_2", + name: prefix+name, + data: { + anodes: wc.tns(anodes), + samplers: sampler_cfgs, + verbose: verbose, + } + dv_cfg + pcts_cfg, + uses: [detector_volumes, pc_transforms]+anodes+sampler_objs, + }, + + + // Use an ImproveCluster_1 retiler for clustering retile operations + improve_retile_1(name="", improver={}) :: { + type: "ClusterImprove_1", + name: prefix+name, + data: { + retiler: wc.tn(improver), + } + scope_cfg, + uses: [improver], + }, + + // Use an ImproveCluster_2 retiler for clustering retile operations + improve_retile_2(name="", improver={}) :: { + type: "ClusterImprove_2", + name: prefix+name, + data: { + retiler: wc.tn(improver), + } + scope_cfg, + uses: [improver], + }, + + // Run steiner-related on clusters in grouping, saving graph to them of the given name. + steiner(name="", retiler={}, grouping="live", graph="steiner") :: { + type: "CreateSteinerGraph", + name: prefix+name, + data: { + grouping: grouping, + graph: graph, + retiler: wc.tn(retiler), + } + dv_cfg + pcts_cfg, + uses: [detector_volumes, pc_transforms, retiler] + }, + + // Add a "FiducialUtils" to a grouping. + fiducialutils(name="", live_grouping="live", dead_grouping="dead", target_grouping="live") :: { + type: "MakeFiducialUtils", + name: prefix+name, + data: { + live: live_grouping, + dead: dead_grouping, + target: target_grouping, + } + dv_cfg + fiducial_cfg + pcts_cfg + }, + + }, // clustering_methods(), + + /// Use this function to provide the elements of retile's "samplers" + /// array-of-objects parameter. It requires a configuration object + /// for an IBlobSampler component as first argument. + sampler(sampler_object, apa=0, face=0) :: { sobj:sampler_object, apa: apa, face: face}, + + test: { + cm : $.clustering_methods(detector_volumes={type:"DetectorVolumes", name:""}), + ld : self.cm.live_dead(), + } + +} diff --git a/cfg/pgrapher/common/funcs.jsonnet b/cfg/pgrapher/common/funcs.jsonnet index 5ca75110f..47663d230 100644 --- a/cfg/pgrapher/common/funcs.jsonnet +++ b/cfg/pgrapher/common/funcs.jsonnet @@ -6,6 +6,7 @@ local g = import "pgraph.jsonnet"; { fanpipe :: g.fan.pipe, fansink :: g.fan.sink, + fanout :: g.fan.fanout, // a multi-layer fanout-pipelines-fanin structure // nnodes: number of nodes per layer @@ -86,7 +87,7 @@ local g = import "pgraph.jsonnet"; ], local fin_node = g.intern( innodes = fin_layers[fin_nlayers-1], - centernodes = if fin_nlayers == 2 then [] else std.flattenArrays([fin_layers[i] for i in std.range(1,fout_nlayers-2)]), + centernodes = if fin_nlayers == 2 then [] else std.flattenArrays([fin_layers[i] for i in std.range(1,fin_nlayers-2)]), outnodes = fin_layers[0], edges = std.flattenArrays( [ @@ -159,9 +160,53 @@ local g = import "pgraph.jsonnet"; // connect comb_fan_out-piples ret : g.intern( innodes = [fout_node], - centernodes = pipelines, - outnodes = [], + centernodes = [], + outnodes = pipelines, edges = [g.edge(fout_node,pipelines[n],n,0) for n in std.range(0,npipe-1)] ), }.ret, + + multifanin :: function( fin, fin_nnodes=[1,8,16], fin_multi=[8,2,7], + name='multifanin', outtags=[], tag_rules=null ) { + local fin_nlayers = std.length(fin_multi), + assert fin_nlayers >= 2 : "fin_nlayers should be >= 2", + + // similarly build the multi-layer fan in combo node + // note the backward layer counting + local fin_layer(ilayer,nnodes,nmulti) = { + ret : [ + g.pnode({ + type: fin, + name: name+"_fin_%d"%ilayer + "_%d"%inode, + data: { + multiplicity: nmulti, + tags: outtags, + tag_rules: [tag_rules for irule in std.range(0,nmulti-1)], + }}, nin=nmulti, nout=1 + ) for inode in std.range(0,nnodes-1)], + }.ret, + local fin_layers = [ + fin_layer(ilayer, + fin_nnodes[ilayer], + fin_multi[ilayer]) + for ilayer in std.range(0,fin_nlayers-1) + ], + local fin_node = g.intern( + innodes = fin_layers[fin_nlayers-1], + centernodes = if fin_nlayers == 2 then [] else std.flattenArrays([fin_layers[i] for i in std.range(1,fin_nlayers-2)]), + outnodes = fin_layers[0], + edges = std.flattenArrays( + [ + [ + g.edge( + fin_layers[ilayer][inode], + fin_layers[ilayer-1][std.floor(inode/fin_multi[ilayer-1])], + 0, + inode%fin_multi[ilayer-1]) + for inode in std.range(0,fin_nnodes[ilayer]-1)] + for ilayer in std.range(1,fin_nlayers-1)]) + ), + + ret : fin_node, + }.ret, } diff --git a/cfg/pgrapher/experiment/dune-vd/clus.jsonnet b/cfg/pgrapher/experiment/dune-vd/clus.jsonnet new file mode 100644 index 000000000..ae2b8b209 --- /dev/null +++ b/cfg/pgrapher/experiment/dune-vd/clus.jsonnet @@ -0,0 +1,617 @@ +local wc = import "wirecell.jsonnet"; +local g = import "pgraph.jsonnet"; +local f = import 'pgrapher/common/funcs.jsonnet'; +// local params = import "pgrapher/experiment/dune-vd/params.jsonnet"; +// local tools_maker = import 'pgrapher/common/tools.jsonnet'; +// local tools = tools_maker(params({})); +// local anodes = tools.anodes; + + +local time_offset = 0 * wc.us; +local drift_speed = 1.6 * wc.mm / wc.us; +local bee_dir = "data"; +local bee_zip = "mabc.zip"; + +local initial_index = "0"; +local initial_runNo = "1"; +local initial_subRunNo = "1"; +local initial_eventNo = "1"; +local index = std.parseInt(initial_index); +local LrunNo = std.parseInt(initial_runNo); +local LsubRunNo = std.parseInt(initial_subRunNo); +local LeventNo = std.parseInt(initial_eventNo); + + +local common_coords = ["x", "y", "z"]; + +local geom_helper = { + type: "SimpleClusGeomHelper", + name: "uboone", + data: { + a0f0: { + face: 0, + pitch_u: 3 * wc.mm, + pitch_v: 3 * wc.mm, + pitch_w: 3 * wc.mm, + angle_u: 1.0472, // 60 degrees + angle_v: -1.0472, // -60 degrees + angle_w: 0, // 0 degrees + drift_speed: drift_speed*1, + tick: 0.5 * wc.us, // 0.5 mm per tick + tick_drift: self.drift_speed * self.tick, + time_offset: time_offset, + nticks_live_slice: 4, + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_ymin: -99.5 * wc.cm, + FV_ymax: 101.5 * wc.cm, + FV_zmin: 15 * wc.cm, + FV_zmax: 1022 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + FV_ymin_margin: 2.5 * wc.cm, + FV_ymax_margin: 2.5 * wc.cm, + FV_zmin_margin: 3 * wc.cm, + FV_zmax_margin: 3 * wc.cm + }, + a1f0: self.a0f0 {}, + a2f0: self.a0f0 {}, + a3f0: self.a0f0 {}, + a4f0: self.a0f0 {}, + a5f0: self.a0f0 {}, + a6f0: self.a0f0 {}, + a7f0: self.a0f0 {}, + a8f0: self.a0f0 {}, + a9f0: self.a0f0 {}, + a10f0: self.a0f0 {}, + a11f0: self.a0f0 {}, + a12f0: self.a0f0 {}, + a13f0: self.a0f0 {}, + a14f0: self.a0f0 {}, + a15f0: self.a0f0 {}, + a16f0: self.a0f0 {}, + a17f0: self.a0f0 {}, + a18f0: self.a0f0 {}, + a19f0: self.a0f0 {}, + a20f0: self.a0f0 {}, + a21f0: self.a0f0 {}, + a22f0: self.a0f0 {}, + a23f0: self.a0f0 {}, + a0f1: self.a0f0 {face: 1}, + a1f1: self.a0f0 {face: 1}, + a2f1: self.a0f0 {face: 1}, + a3f1: self.a0f0 {face: 1}, + a4f1: self.a0f0 {face: 1}, + a5f1: self.a0f0 {face: 1}, + a6f1: self.a0f0 {face: 1}, + a7f1: self.a0f0 {face: 1}, + a8f1: self.a0f0 {face: 1}, + a9f1: self.a0f0 {face: 1}, + a10f1: self.a0f0 {face: 1}, + a11f1: self.a0f0 {face: 1}, + a12f1: self.a0f0 {face: 1}, + a13f1: self.a0f0 {face: 1}, + a14f1: self.a0f0 {face: 1}, + a15f1: self.a0f0 {face: 1}, + a16f1: self.a0f0 {face: 1}, + a17f1: self.a0f0 {face: 1}, + a18f1: self.a0f0 {face: 1}, + a19f1: self.a0f0 {face: 1}, + a20f1: self.a0f0 {face: 1}, + a21f1: self.a0f0 {face: 1}, + a22f1: self.a0f0 {face: 1}, + a23f1: self.a0f0 {face: 1}, + } +}; + +local clus_per_face ( + anode, + face, + dump = true, + ) = +{ + + // Note, the "sampler" must be unique to the "sampling". + local bs_live = { + type: "BlobSampler", + name: "%s-%d"%[anode.name, face], + data: { + drift_speed: drift_speed, + time_offset: time_offset, + strategy: [ + // "center", + // "corner", + // "edge", + // "bounds", + "stepped", + // {name:"grid", step:1, planes:[0,1]}, + // {name:"grid", step:1, planes:[1,2]}, + // {name:"grid", step:1, planes:[2,0]}, + // {name:"grid", step:2, planes:[0,1]}, + // {name:"grid", step:2, planes:[1,2]}, + // {name:"grid", step:2, planes:[2,0]}, + ], + // extra: [".*"] // want all the extra + extra: [".*wire_index", "wpid"] // + // extra: [] // + }}, + local bs_dead = { + type: "BlobSampler", + name: "%s-%d"%[anode.name, face], + data: { + strategy: [ + "center", + ], + extra: [".*"] // want all the extra + }}, + + + local detector_volumes = + { + "type": "DetectorVolumes", + "name": "dv-%s-%d"%[anode.name, face], + "data": { + "anodes": [wc.tn(anode)], + metadata: + {overall: { + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_ymin: -99.5 * wc.cm, + FV_ymax: 101.5 * wc.cm, + FV_zmin: 15 * wc.cm, + FV_zmax: 1022 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + FV_ymin_margin: 2.5 * wc.cm, + FV_ymax_margin: 2.5 * wc.cm, + FV_zmin_margin: 3 * wc.cm, + FV_zmax_margin: 3 * wc.cm, + vertical_dir: [0,1,0], + beam_dir: [0,0,1] + }} + + { + [ "a" + std.toString(a.data.ident) + "f0pA" ]: { + drift_speed: drift_speed, + tick: 0.5 * wc.us, // 0.5 mm per tick + tick_drift: self.drift_speed * self.tick, + time_offset: time_offset, + nticks_live_slice: 4, + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + } for a in [anode] + } + + { + [ "a" + std.toString(a.data.ident) + "f1pA" ]: { + drift_speed: drift_speed, + tick: 0.5 * wc.us, // 0.5 mm per tick + tick_drift: self.drift_speed * self.tick, + time_offset: time_offset, + nticks_live_slice: 4, + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + } for a in [anode] + } + } + }, + + local cluster_scope_filter_live = g.pnode({ + type: "ClusterScopeFilter", + name: "csf-live-%s-%d"%[anode.name, face], + data: { + face_index: face, + } + }, nin=1, nout=1, uses=[]), + + local cluster_scope_filter_dead = g.pnode({ + type: "ClusterScopeFilter", + name: "csf-dead-%s-%d"%[anode.name, face], + data: { + face_index: face, + } + }, nin=1, nout=1, uses=[]), + + local ptb = g.pnode({ + type: "PointTreeBuilding", + name: "%s-%d"%[anode.name, face], + data: { + samplers: { + "3d": wc.tn(bs_live), + "dead": wc.tn(bs_dead), + }, + multiplicity: 2, + tags: ["live", "dead"], + anode: wc.tn(anode), + face: face, + geom_helper: wc.tn(geom_helper), + detector_volumes: wc.tn(detector_volumes), + } + }, nin=2, nout=1, uses=[bs_live, bs_dead, detector_volumes]), + + local cluster2pct = g.intern( + innodes = [cluster_scope_filter_live, cluster_scope_filter_dead], + centernodes = [], + outnodes = [ptb], + edges = [ + g.edge(cluster_scope_filter_live, ptb, 0, 0), + g.edge(cluster_scope_filter_dead, ptb, 0, 1) + ] + ), + // local cluster2pct = ptb, + + local mabc = g.pnode({ + type: "MultiAlgBlobClustering", + name: "%s-%d"%[anode.name, face], + data: { + inpath: "pointtrees/%d", + outpath: "pointtrees/%d", + // grouping2file_prefix: "grouping%s-%d"%[anode.name, face], + perf: true, + bee_dir: bee_dir, // "data/0/0", // not used + bee_zip: "mabc-%s-face%d.zip"%[anode.name, face], + bee_detector: "sbnd", + initial_index: index, // New RSE configuration + use_config_rse: true, // Enable use of configured RSE + runNo: LrunNo, + subRunNo: LsubRunNo, + eventNo: LeventNo, + save_deadarea: true, + anodes: [wc.tn(anode)], + face: face, + geom_helper: wc.tn(geom_helper), + detector_volumes: wc.tn(detector_volumes), + func_cfgs: [ + // {name: "clustering_test", detector_volumes: wc.tn(detector_volumes)}, + // {name: "clustering_ctpointcloud", detector_volumes: wc.tn(detector_volumes)}, + // {name: "clustering_switch_scope", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: ["x", "y", "z"], correction_name: "T0Correction"}, + {name: "clustering_live_dead", dead_live_overlap_offset: 2, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + {name: "clustering_extend", flag: 4, length_cut: 60 * wc.cm, num_try: 0, length_2_cut: 15 * wc.cm, num_dead_try: 1, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + {name: "clustering_regular", length_cut: 60*wc.cm, flag_enable_extend: false, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + {name: "clustering_regular", length_cut: 30*wc.cm, flag_enable_extend: true, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + {name: "clustering_parallel_prolong", length_cut: 35*wc.cm, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_close", length_cut: 1.2*wc.cm, pc_name: "3d", coords: common_coords}, + // {name: "clustering_extend_loop", num_try: 3, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_separate", use_ctpc: true, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_connect1", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_deghost", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_examine_x_boundary", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_protect_overclustering", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_neutrino", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_isolated", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + ], + } + }, nin=1, nout=1, uses=[geom_helper]), + + local sink = g.pnode({ + type: "TensorFileSink", + name: "clus_per_face-%s-%d"%[anode.name, face], + data: { + outname: "trash-%s-face%d.tar.gz"%[anode.name, face], + prefix: "clustering_", // json, numpy, dummy + dump_mode: true, + } + }, nin=1, nout=0), + + local end = if dump + then g.pipeline([mabc, sink]) + else g.pipeline([mabc]), + + ret :: g.pipeline([cluster2pct, end], "clus_per_face-%s-%d"%[anode.name, face]) +}.ret; + +local clus_per_apa ( + anode, + dump = true, + ) = +{ + local cfout_live = g.pnode({ + type:'ClusterFanout', + name: 'clus_per_apa-cfout_live-%s'%anode.name, + data: { + multiplicity: 2 + }}, nin=1, nout=2), + + local cfout_dead = g.pnode({ + type:'ClusterFanout', + name: 'clus_per_apa-cfout_dead-%s'%anode.name, + data: { + multiplicity: 2 + }}, nin=1, nout=2), + + local per_face_pipes = [ + clus_per_face(anode, face=0, dump=false), + clus_per_face(anode, face=1, dump=false), + ], + + local pcmerging = g.pnode({ + type: "PointTreeMerging", + name: "%s"%[anode.name], + data: { + multiplicity: 2, + inpath: "pointtrees/%d", + outpath: "pointtrees/%d", + } + }, nin=2, nout=1), + + local detector_volumes = + { + "type": "DetectorVolumes", + "name": "dv-%s"%[anode.name], + "data": { + "anodes": [wc.tn(anode)], + metadata: + {overall: { + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_ymin: -99.5 * wc.cm, + FV_ymax: 101.5 * wc.cm, + FV_zmin: 15 * wc.cm, + FV_zmax: 1022 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + FV_ymin_margin: 2.5 * wc.cm, + FV_ymax_margin: 2.5 * wc.cm, + FV_zmin_margin: 3 * wc.cm, + FV_zmax_margin: 3 * wc.cm, + vertical_dir: [0,1,0], + beam_dir: [0,0,1] + }} + + { + [ "a" + std.toString(a.data.ident) + "f0pA" ]: { + drift_speed: drift_speed, + tick: 0.5 * wc.us, // 0.5 mm per tick + tick_drift: self.drift_speed * self.tick, + time_offset: time_offset, + nticks_live_slice: 4, + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + } for a in [anode] + } + + { + [ "a" + std.toString(a.data.ident) + "f1pA" ]: { + drift_speed: drift_speed, + tick: 0.5 * wc.us, // 0.5 mm per tick + tick_drift: self.drift_speed * self.tick, + time_offset: time_offset, + nticks_live_slice: 4, + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + } for a in [anode] + } + } + }, + + local mabc = g.pnode({ + type: "MultiAlgBlobClustering", + name: "clus_per_apa-%s"%[anode.name], + data: { + inpath: "pointtrees/%d", + outpath: "pointtrees/%d", + // grouping2file_prefix: "grouping%s-%d"%[anode.name, face], + perf: true, + bee_dir: bee_dir, // "data/0/0", // not used + bee_zip: "mabc-%s.zip"%[anode.name], + bee_detector: "sbnd", + initial_index: index, // New RSE configuration + use_config_rse: true, // Enable use of configured RSE + runNo: LrunNo, + subRunNo: LsubRunNo, + eventNo: LeventNo, + save_deadarea: true, + anodes: [wc.tn(anode)], + // face: face, + geom_helper: wc.tn(geom_helper), + detector_volumes: wc.tn(detector_volumes), + func_cfgs: [ + // {name: "clustering_test", detector_volumes: wc.tn(detector_volumes)}, + // {name: "clustering_ctpointcloud", detector_volumes: wc.tn(detector_volumes)}, + // {name: "clustering_switch_scope", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: ["x", "y", "z"], correction_name: "T0Correction"}, + // {name: "clustering_live_dead", dead_live_overlap_offset: 2, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_extend", flag: 4, length_cut: 60 * wc.cm, num_try: 0, length_2_cut: 15 * wc.cm, num_dead_try: 1, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_regular", length_cut: 60*wc.cm, flag_enable_extend: false, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_regular", length_cut: 30*wc.cm, flag_enable_extend: true, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_parallel_prolong", length_cut: 35*wc.cm, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_close", length_cut: 1.2*wc.cm, pc_name: "3d", coords: common_coords}, + // {name: "clustering_extend_loop", num_try: 3, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_separate", use_ctpc: true, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_connect1", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_deghost", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_examine_x_boundary", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_protect_overclustering", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_neutrino", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_isolated", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + ], + } + }, nin=1, nout=1, uses=[geom_helper, detector_volumes]), + + local sink = g.pnode({ + type: "TensorFileSink", + name: "clus_per_apa-%s"%[anode.name], + data: { + outname: "trash-%s.tar.gz"%[anode.name], + prefix: "clustering_", // json, numpy, dummy + dump_mode: true, + } + }, nin=1, nout=0), + + local end = if dump + then g.pipeline([mabc, sink]) + else g.pipeline([mabc]), + + ret :: g.intern( + innodes = [cfout_live, cfout_dead], + centernodes = per_face_pipes + [pcmerging], + outnodes = [end], + edges = [ + g.edge(cfout_live, per_face_pipes[0], 0, 0), + g.edge(cfout_dead, per_face_pipes[0], 0, 1), + g.edge(cfout_live, per_face_pipes[1], 1, 0), + g.edge(cfout_dead, per_face_pipes[1], 1, 1), + g.edge(per_face_pipes[0], pcmerging, 0, 0), + g.edge(per_face_pipes[1], pcmerging, 0, 1), + g.edge(pcmerging, end, 0, 0), + ] + ), +}.ret; + +local clus_all_apa ( + anodes, + dump = true, + ) = { + local nanodes = std.length(anodes), + local pcmerging = g.pnode({ + type: "PointTreeMerging", + name: "clus_all_apa", + data: { + multiplicity: nanodes, + inpath: "pointtrees/%d", + outpath: "pointtrees/%d", + } + }, nin=nanodes, nout=1), + local detector_volumes = + { + "type": "DetectorVolumes", + "name": "clus_all_apa", + "data": { + "anodes": [wc.tn(anode) for anode in anodes], + metadata: + {overall: { + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_ymin: -99.5 * wc.cm, + FV_ymax: 101.5 * wc.cm, + FV_zmin: 15 * wc.cm, + FV_zmax: 1022 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + FV_ymin_margin: 2.5 * wc.cm, + FV_ymax_margin: 2.5 * wc.cm, + FV_zmin_margin: 3 * wc.cm, + FV_zmax_margin: 3 * wc.cm, + vertical_dir: [0,1,0], + beam_dir: [0,0,1] + }} + + { + [ "a" + std.toString(a.data.ident) + "f0pA" ]: { + drift_speed: drift_speed, + tick: 0.5 * wc.us, // 0.5 mm per tick + tick_drift: self.drift_speed * self.tick, + time_offset: time_offset, + nticks_live_slice: 4, + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + } for a in anodes + } + + { + [ "a" + std.toString(a.data.ident) + "f1pA" ]: { + drift_speed: drift_speed, + tick: 0.5 * wc.us, // 0.5 mm per tick + tick_drift: self.drift_speed * self.tick, + time_offset: time_offset, + nticks_live_slice: 4, + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + } for a in anodes + } + } + }, + local mabc = g.pnode({ + type: "MultiAlgBlobClustering", + name: "clus_all_apa", + data: { + inpath: "pointtrees/%d", + outpath: "pointtrees/%d", + // grouping2file_prefix: "grouping%s-%d"%[anode.name, face], + perf: true, + bee_dir: bee_dir, // "data/0/0", // not used + bee_zip: "mabc-all-apa.zip", + bee_detector: "sbnd", + initial_index: index, // New RSE configuration + use_config_rse: true, // Enable use of configured RSE + runNo: LrunNo, + subRunNo: LsubRunNo, + eventNo: LeventNo, + save_deadarea: true, + anodes: [wc.tn(a) for a in anodes], + // face: face, + geom_helper: wc.tn(geom_helper), + detector_volumes: wc.tn(detector_volumes), + bee_points_sets: [ // New configuration for multiple bee points sets + { + name: "img", // Name of the bee points set + detector: "uboone", // Detector name + algorithm: "img", // Algorithm identifier + pcname: "3d", // Which scope to use + coords: ["x", "y", "z"], // Coordinates to use + individual: false // Whether to output as a whole or individual APA/Face + }, + { + name: "clustering", // Name of the bee points set + detector: "uboone", // Detector name + algorithm: "clustering", // Algorithm identifier + pcname: "3d", // Which scope to use + coords: ["x_t0cor", "y", "z"], // Coordinates to use + individual: true // Output individual APA/Face + } + ], + func_cfgs: [ + // {name: "clustering_test", detector_volumes: wc.tn(detector_volumes)}, + // {name: "clustering_ctpointcloud", detector_volumes: wc.tn(detector_volumes)}, + // {name: "clustering_switch_scope", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: ["x", "y", "z"], correction_name: "T0Correction"}, + // {name: "clustering_live_dead", dead_live_overlap_offset: 2, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_extend", flag: 4, length_cut: 60 * wc.cm, num_try: 0, length_2_cut: 15 * wc.cm, num_dead_try: 1, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_regular", length_cut: 60*wc.cm, flag_enable_extend: false, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_regular", length_cut: 30*wc.cm, flag_enable_extend: true, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_parallel_prolong", length_cut: 35*wc.cm, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + {name: "clustering_close", length_cut: 1.2*wc.cm, pc_name: "3d", coords: common_coords}, + // {name: "clustering_extend_loop", num_try: 3, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_separate", use_ctpc: true, detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_connect1", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_deghost", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_examine_x_boundary", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_protect_overclustering", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_neutrino", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + // {name: "clustering_isolated", detector_volumes: wc.tn(detector_volumes), pc_name: "3d", coords: common_coords}, + ], + }, + }, nin=1, nout=1, uses=[geom_helper, detector_volumes]), + + local sink = g.pnode({ + type: "TensorFileSink", + name: "clus_all_apa", + data: { + outname: "trash-all-apa.tar.gz", + prefix: "clustering_", // json, numpy, dummy + dump_mode: true, + } + }, nin=1, nout=0), + local end = if dump + then g.pipeline([mabc, sink]) + else g.pipeline([mabc]), + ret :: g.intern( + innodes = [pcmerging], + centernodes = [], + outnodes = [end], + edges = [ + g.edge(pcmerging, end, 0, 0), + ] + ), +}.ret; + + +function () { + per_face(anode, face=0, dump=true) :: clus_per_face(anode, face=face, dump=dump), + per_apa(anode, dump=true) :: clus_per_apa(anode, dump=dump), + all_apa(anodes, dump=true) :: clus_all_apa(anodes, dump=dump), +} \ No newline at end of file diff --git a/cfg/pgrapher/experiment/dune-vd/img.jsonnet b/cfg/pgrapher/experiment/dune-vd/img.jsonnet index 9003e5dd9..714afecd7 100644 --- a/cfg/pgrapher/experiment/dune-vd/img.jsonnet +++ b/cfg/pgrapher/experiment/dune-vd/img.jsonnet @@ -103,6 +103,7 @@ local img = { data: { tick_span: span, wiener_tag: "wiener%d" % anode.data.ident, + summary_tag: "wiener%d" % anode.data.ident, charge_tag: "gauss%d" % anode.data.ident, error_tag: "gauss_error%d" % anode.data.ident, anode: wc.tn(anode), @@ -179,7 +180,7 @@ local img = { local tilings = [$.tiling(anode, name+"_%d"%n) for n in iota], local multipass = [g.pipeline([slicings[n],tilings[n]]) for n in iota], - ret: f.fanpipe("FrameFanout", multipass, "BlobSetMerge", "multi_masked_slicing_tiling"), + ret: f.fanpipe("FrameFanout", multipass, "BlobSetMerge", "multi_masked_slicing_tiling_%s"%name), }.ret, local clustering_policy = "uboone", // uboone, simple @@ -295,7 +296,7 @@ local img = { }; function() { - local imgpipe (anode, multi_slicing) = + local imgpipe (anode, multi_slicing, add_dump = true) = if multi_slicing == "single" then g.pipeline([ // img.slicing(anode, anode.name, 109, active_planes=[0,1,2], masked_planes=[],dummy_planes=[]), // 109*22*4 @@ -304,34 +305,43 @@ function() { img.tiling(anode, anode.name), img.solving(anode, anode.name), // img.clustering(anode, anode.name), - img.dump(anode, anode.name, params.lar.drift_speed),]) + ] + if add_dump then [ + img.dump(anode, anode.name, params.lar.drift_speed),] else []) else if multi_slicing == "active" then g.pipeline([ img.multi_active_slicing_tiling(anode, anode.name+"-ms-active", 4), img.solving(anode, anode.name+"-ms-active"), // img.clustering(anode, anode.name+"-ms-active"), - img.dump(anode, anode.name+"-ms-active", params.lar.drift_speed)]) + ] + if add_dump then [ + img.dump(anode, anode.name+"-ms-active", params.lar.drift_speed),] else []) else if multi_slicing == "masked" then g.pipeline([ img.multi_masked_2view_slicing_tiling(anode, anode.name+"-ms-masked", 500), img.clustering(anode, anode.name+"-ms-masked"), - img.dump(anode, anode.name+"-ms-masked", params.lar.drift_speed)]) + ] + if add_dump then [ + img.dump(anode, anode.name+"-ms-masked", params.lar.drift_speed),] else []) else { + local st = if multi_slicing == "multi-2view" + then img.multi_active_slicing_tiling(anode, anode.name+"-ms-active", 4) + else g.pipeline([ + img.slicing(anode, anode.name, 4, active_planes=[0,1,2], masked_planes=[],dummy_planes=[]), // 109*22*4 + img.tiling(anode, anode.name),]), local active_fork = g.pipeline([ - img.multi_active_slicing_tiling(anode, anode.name+"-ms-active", 4), + st, img.solving(anode, anode.name+"-ms-active"), - img.dump(anode, anode.name+"-ms-active", params.lar.drift_speed), - ]), + ] + if add_dump then [ + img.dump(anode, anode.name+"-ms-active", params.lar.drift_speed),] else []), local masked_fork = g.pipeline([ img.multi_masked_2view_slicing_tiling(anode, anode.name+"-ms-masked", 500), // 109, 1744 (total 9592) img.clustering(anode, anode.name+"-ms-masked"), - img.dump(anode, anode.name+"-ms-masked", params.lar.drift_speed), - ]), - ret: g.fan.fanout("FrameFanout",[active_fork,masked_fork], "fan_active_masked"), + ] + if add_dump then [ + img.dump(anode, anode.name+"-ms-masked", params.lar.drift_speed),] else []), + ret: g.fan.fanout("FrameFanout",[active_fork,masked_fork], "fan_active_masked-%s"%anode.name), }.ret, - per_anode(anode) :: g.pipeline([ + + per_anode(anode, multi_slicing = "single", add_dump = true) :: g.pipeline([ img.pre_proc(anode, anode.name), - imgpipe(anode, "single"), + imgpipe(anode, multi_slicing, add_dump), ], "per_anode"), } \ No newline at end of file diff --git a/cfg/pgrapher/experiment/dune-vd/sim.jsonnet b/cfg/pgrapher/experiment/dune-vd/sim.jsonnet index 837ef0ce5..eb42af458 100644 --- a/cfg/pgrapher/experiment/dune-vd/sim.jsonnet +++ b/cfg/pgrapher/experiment/dune-vd/sim.jsonnet @@ -49,6 +49,7 @@ function(params, tools) { data: { anodes_tn: [wc.tn(anode) for anode in tools.anodes], }, + uses: [anode for anode in tools.anodes], }, local add_noise = function(model, name="") g.pnode({ diff --git a/cfg/pgrapher/experiment/dune-vd/wct-depo-sim-img-fans.jsonnet b/cfg/pgrapher/experiment/dune-vd/wct-depo-sim-img-fans.jsonnet index c6fa92436..23b05b223 100644 --- a/cfg/pgrapher/experiment/dune-vd/wct-depo-sim-img-fans.jsonnet +++ b/cfg/pgrapher/experiment/dune-vd/wct-depo-sim-img-fans.jsonnet @@ -21,7 +21,7 @@ local fcl_params = { wires: 'dunevd10kt_3view_30deg_v5_refactored_1x8x6ref.json.bz2', ncrm: 24, use_dnnroi: false, - process_crm: 'full', + process_crm: 'test1', //'full', 'test1' }; local params_maker = if fcl_params.ncrm ==320 then import 'pgrapher/experiment/dune-vd/params-10kt.jsonnet' @@ -50,7 +50,7 @@ local tools = if fcl_params.process_crm == "partial" then tools_all {anodes: [tools_all.anodes[n] for n in std.range(32, 79)]} else if fcl_params.process_crm == "test1" -then tools_all {anodes: [tools_all.anodes[n] for n in [0,1,4,5]]} +then tools_all {anodes: [tools_all.anodes[n] for n in [5,8,9,12,13,16,20]]} else if fcl_params.process_crm == "test2" then tools_all {anodes: [tools_all.anodes[n] for n in std.range(0, 7)]} else tools_all; @@ -99,7 +99,23 @@ local reframers_sp = [ local img = import 'pgrapher/experiment/dune-vd/img.jsonnet'; local img_maker = img(); -local img_pipes = [img_maker.per_anode(a) for a in tools.anodes]; +local img_pipes = [img_maker.per_anode(a, "multi-3view", add_dump = false) for a in tools.anodes]; + +local clus = import 'pgrapher/experiment/dune-vd/clus.jsonnet'; +local clus_maker = clus(); +// local clus_pipes = [clus_maker.per_volume(tools.anodes[0], face=0, dump=true), clus_maker.per_volume(tools.anodes[1], face=1, dump=true)]; +local clus_pipes = [clus_maker.per_volume(tools.anodes[n], face=0, dump=true) for n in std.range(0, std.length(tools.anodes) - 1)]; + +local img_clus_pipe = [g.intern( + innodes = [img_pipes[n]], + centernodes = [], + outnodes = [clus_pipes[n]], + edges = [ + g.edge(img_pipes[n], clus_pipes[n], p, p) + for p in std.range(0, 1) + ] +) +for n in std.range(0, std.length(tools.anodes) - 1)]; local magoutput = 'mag-sim-sp.root'; local magnify = import 'pgrapher/experiment/dune-vd/magnify-sinks.jsonnet'; @@ -148,7 +164,7 @@ local parallel_pipes = [ // sinks.decon_pipe[n], // sinks.debug_pipe[n], // use_roi_debug_mode=true in sp.jsonnet // g.pnode({type: "DumpFrames", name: "dumpframes-%d"%tools.anodes[n].data.ident}, nin = 1, nout=0) - img_pipes[n], + img_clus_pipe[n], ], 'parallel_pipe_%d' % n) for n in std.range(0, std.length(tools.anodes) - 1)]; @@ -191,7 +207,7 @@ local make_switch_pipe = function(d2f, anode ) { }.ret1; local switch_pipes = [ - g.pipeline([make_switch_pipe(parallel_pipes[n], tools.anodes[n]), img_pipes[n]]) + g.pipeline([make_switch_pipe(parallel_pipes[n], tools.anodes[n]), img_clus_pipe[n]]) for n in std.range(0, std.length(tools.anodes) - 1) ]; @@ -199,7 +215,7 @@ local switch_pipes = [ local parallel_graph = if fcl_params.process_crm == "test1" // then f.multifanpipe('DepoSetFanout', parallel_pipes, 'FrameFanin', [1,4], [4,1], [1,4], [4,1], 'sn_mag', outtags, tag_rules) -then f.multifanout('DepoSetFanout', parallel_pipes, [1,4], [4,1], 'sn_mag', tag_rules) +then f.multifanout('DepoSetFanout', parallel_pipes, [1,7], [7,1], 'sn_mag', tag_rules) else if fcl_params.process_crm == "test2" then f.multifanpipe('DepoSetFanout', parallel_pipes, 'FrameFanin', [1,8], [8,1], [1,8], [8,1], 'sn_mag', outtags, tag_rules) // else f.multifanout('DepoSetFanout', switch_pipes, [1,4], [4,6], 'sn_mag', tag_rules); @@ -215,6 +231,7 @@ local sink = sim.frame_sink; // Final pipeline ////////////////////////////////////////////////////////////////////////////// local graph = g.pipeline([depo_source, setdrifter, parallel_graph], "main"); // no Fanin +// local graph = g.pipeline([depo_source, setdrifter, parallel_pipes[0]], "main"); // no Fanin // local graph = g.pipeline([depo_source, setdrifter, parallel_graph, sink], "main"); // ending with Fanin local app = { @@ -227,7 +244,7 @@ local app = { local cmdline = { type: "wire-cell", data: { - plugins: ["WireCellGen", "WireCellPgraph", "WireCellSio", "WireCellSigProc", "WireCellImg", "WireCellRoot", "WireCellTbb"/*, "WireCellCuda"*/], + plugins: ["WireCellGen", "WireCellPgraph", "WireCellSio", "WireCellSigProc", "WireCellImg", "WireCellRoot", "WireCellTbb", "WireCellClus"], apps: ["TbbFlow"] } }; diff --git a/cfg/pgrapher/experiment/pdhd/chndb-base.jsonnet b/cfg/pgrapher/experiment/pdhd/chndb-base.jsonnet new file mode 100644 index 000000000..bfb506878 --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/chndb-base.jsonnet @@ -0,0 +1,126 @@ +// Base channel noise DB object configuration for microboone +// This does not include any run dependent RMS cuts. +// See chndb.jsonnet + +local handmade = import 'chndb-resp.jsonnet'; +local wc = import 'wirecell.jsonnet'; + +function(params, anode, field, n, rms_cuts=[]) + { + anode: wc.tn(anode), + field_response: wc.tn(field), + + tick: params.daq.tick, + + // This sets the number of frequency-domain bins used in the noise + // filtering. It is not necessarily true that the time-domain + // waveforms have the same number of ticks. This must be non-zero. + nsamples: params.nf.nsamples, + + // groups: [std.range(n * 2560 + u * 40, n * 2560 + (u + 1) * 40 - 1) for u in std.range(0, 19)] + // + [std.range(n * 2560 + 800 + v * 40, n * 2560 + 800 + (v + 1) * 40 - 1) for v in std.range(0, 19)] + // + [std.range(n * 2560 + 1600 + w * 48, n * 2560 + 1600 + (w + 1) * 48 - 1) for w in std.range(0, 19)], + + // Half-FEMB grouping + groups: [std.range(n * 2560 + u * 20, n * 2560 + (u + 1) * 20 - 1) for u in std.range(0, 39)] + + [std.range(n * 2560 + 800 + v * 20, n * 2560 + 800 + (v + 1) * 20 - 1) for v in std.range(0, 39)] + + [std.range(n * 2560 + 1600 + w * 24, n * 2560 + 1600 + (w + 1) * 24 - 1) for w in std.range(0, 39)], + + + // Externally determined "bad" channels. + bad: [], + + // Overide defaults for specific channels. If an info is + // mentioned for a particular channel in multiple objects in this + // list then last mention wins. + channel_info: [ + + // First entry provides default channel info across ALL + // channels. Subsequent entries override a subset of channels + // with a subset of these entries. There's no reason to + // repeat values found here in subsequent entries unless you + // wish to change them. + { + channels: std.range(n * 2560, (n + 1) * 2560 - 1), + nominal_baseline: 2048.0, // adc count + gain_correction: 1.0, // unitless + response_offset: 0.0, // ticks? + pad_window_front: 10, // ticks? + pad_window_back: 10, // ticks? + decon_limit: 0.02, + decon_limit1: 0.09, + adc_limit: 60, // 15, + min_adc_limit: 200, // 50, + roi_min_max_ratio: 0.8, // default 0.8 + min_rms_cut: 1.0, // units??? + max_rms_cut: 30.0, // units??? + + // parameter used to make "rcrc" spectrum + rcrc: 1.1 * wc.millisecond, // 1.1 for collection, 3.3 for induction + rc_layers: 1, // default 2 + + // parameters used to make "config" spectrum + reconfig: {}, + + // list to make "noise" spectrum mask + freqmasks: [], + + // field response waveform to make "response" spectrum. + response: {}, + + }, + + { + //channels: { wpid: wc.WirePlaneId(wc.Ulayer) }, + channels: std.range(n * 2560, n * 2560 + 800- 1), + freqmasks: [ + { value: 1.0, lobin: 0, hibin: $.nsamples - 1 }, + { value: 0.0, lobin: 169, hibin: 173 }, + { value: 0.0, lobin: 513, hibin: 516 }, + ], + /// this will use an average calculated from the anode + // response: { wpid: wc.WirePlaneId(wc.Ulayer) }, + /// this uses hard-coded waveform. + response: { waveform: handmade.u_resp, waveformid: wc.Ulayer }, + response_offset: 120, // offset of the negative peak + pad_window_front: 20, + decon_limit: 0.02, + decon_limit1: 0.07, + roi_min_max_ratio: 3.0, + }, + + { + //channels: { wpid: wc.WirePlaneId(wc.Vlayer) }, + channels: std.range(n * 2560 + 800, n * 2560 + 1600- 1), + freqmasks: [ + { value: 1.0, lobin: 0, hibin: $.nsamples - 1 }, + { value: 0.0, lobin: 169, hibin: 173 }, + { value: 0.0, lobin: 513, hibin: 516 }, + ], + /// this will use an average calculated from the anode + // response: { wpid: wc.WirePlaneId(wc.Vlayer) }, + /// this uses hard-coded waveform. + response: { waveform: handmade.v_resp, waveformid: wc.Vlayer }, + response_offset: 124, + decon_limit: 0.01, + decon_limit1: 0.08, + roi_min_max_ratio: 1.5, + }, + + // local freqbinner = wc.freqbinner(params.daq.tick, params.nf.nsamples); + // local harmonic_freqs = [f*wc.kilohertz for f in + // // [51.5, 102.8, 154.2, 205.5, 256.8, 308.2, 359.2, 410.5, 461.8, 513.2, 564.5, 615.8] + // [51.5, 77.2, 102.8, 128.5, 154.2, 180.0, 205.5, 231.5, 256.8, 282.8, 308.2, 334.0, 359.2, 385.5, 410.5, 461.8, 513.2, 564.5, 615.8, 625.0] + // ]; + + { + //channels: { wpid: wc.WirePlaneId(wc.Wlayer) }, + channels: std.range(n * 2560 + 1600, n * 2560 + 2560- 1), + nominal_baseline: 400.0, + decon_limit: 0.05, + decon_limit1: 0.08, + // freqmasks: freqbinner.freqmasks(harmonic_freqs, 5.0*wc.kilohertz), + }, + + ] + rms_cuts, + } diff --git a/cfg/pgrapher/experiment/pdhd/chndb-resp.jsonnet b/cfg/pgrapher/experiment/pdhd/chndb-resp.jsonnet new file mode 100644 index 000000000..7659e456f --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/chndb-resp.jsonnet @@ -0,0 +1,102 @@ +// +// Warning: the offset value changed for the new field response, +// please change accordingly in chndb-base.jsonnet +// +// Total field response +// exported from FieldResponse, e.g., +// in OmnibusSigproc.cxx: +// auto const& pr = fravg.planes[2]; // V plane +// std::vector waveform(6000, 0.0); +// for (auto const& path : pr.paths) { +// auto const& current = path.current; +// const size_t nsamp = std::min(6000, (int)current.size()); +// for (size_t ind=0; ind IFrame + pre_proc :: function(anode, aname = "") { + + local waveform_map = { + type: 'WaveformMap', + name: 'wfm', + data: { + filename: "microboone-charge-error.json.bz2", + }, uses: [],}, + + local charge_err = g.pnode({ + type: 'ChargeErrorFrameEstimator', + name: "cefe-"+aname, + data: { + intag: "gauss%d" % anode.data.ident, + outtag: "gauss_error%d" % anode.data.ident, + anode: wc.tn(anode), + rebin: 4, // this number should be consistent with the waveform_map choice + fudge_factors: [2.31, 2.31, 1.1], // fudge factors for each plane [0,1,2] + time_limits: [12, 800], // the unit of this is in ticks + errors: wc.tn(waveform_map), + }, + }, nin=1, nout=1, uses=[waveform_map, anode]), + + local cmm_mod = g.pnode({ + type: 'CMMModifier', + name: "cmm-mod-"+aname, + data: { + cm_tag: "bad", + trace_tag: "gauss%d" % anode.data.ident, + anode: wc.tn(anode), + // start: 0, // start veto ... + // end: 9592, // end of veto + // ncount_cont_ch: 2, + // cont_ch_llimit: [296, 2336+4800 ], // veto if continues bad channels + // cont_ch_hlimit: [671, 2463+4800 ], + // ncount_veto_ch: 1, + // veto_ch_llimit: [3684], // direct veto these channels + // veto_ch_hlimit: [3699], + // dead_ch_ncount: 10, + // dead_ch_charge: 1000, + // ncount_dead_ch: 2, + // dead_ch_llimit: [2160, 2080], // veto according to the charge size for dead channels + // dead_ch_hlimit: [2176, 2096], + ncount_org: 1, // organize the dead channel ranges according to these boundaries + org_llimit: [0], // must be ordered ... + org_hlimit: [8500], // must be ordered ... + }, + }, nin=1, nout=1, uses=[anode]), + + local frame_quality_tagging = g.pnode({ + type: 'FrameQualityTagging', + name: "frame-qual-tag-"+aname, + data: { + trace_tag: "gauss%d" % anode.data.ident, + anode: wc.tn(anode), + nrebin: 4, // rebin count ... + length_cut: 3, + time_cut: 3, + ch_threshold: 100, + n_cover_cut1: 12, + n_fire_cut1: 14, + n_cover_cut2: 6, + n_fire_cut2: 6, + fire_threshold: 0.22, + n_cover_cut3: [1200, 1200, 1800 ], + percent_threshold: [0.25, 0.25, 0.2 ], + threshold1: [300, 300, 360 ], + threshold2: [150, 150, 180 ], + min_time: 3180, + max_time: 7870, + flag_corr: 1, + }, + }, nin=1, nout=1, uses=[anode]), + + local frame_masking = g.pnode({ + type: 'FrameMasking', + name: "frame-masking-"+aname, + data: { + cm_tag: "bad", + trace_tags: ['gauss%d' % anode.data.ident,'wiener%d' % anode.data.ident,], + anode: wc.tn(anode), + }, + }, nin=1, nout=1, uses=[anode]), + + ret: g.pipeline([cmm_mod, frame_masking, charge_err], "uboone-preproc"), + }.ret, + + // A functio that sets up slicing for an APA. + slicing :: function(anode, aname, span=4, active_planes=[0,1,2], masked_planes=[], dummy_planes=[]) { + ret: g.pnode({ + type: "MaskSlices", + name: "slicing-"+aname, + data: { + tick_span: span, + wiener_tag: "wiener%d" % anode.data.ident, + charge_tag: "gauss%d" % anode.data.ident, + error_tag: "gauss_error%d" % anode.data.ident, + anode: wc.tn(anode), + min_tbin: 0, + max_tbin: 8500, + active_planes: active_planes, + masked_planes: masked_planes, + dummy_planes: dummy_planes, + // nthreshold: [1e-6, 1e-6, 1e-6], + nthreshold: [3.6, 3.6, 3.6], + }, + }, nin=1, nout=1, uses=[anode]), + }.ret, + + // A function sets up tiling for an APA incuding a per-face split. + tiling :: function(anode, aname) { + + local slice_fanout = g.pnode({ + type: "SliceFanout", + name: "slicefanout-" + aname, + data: { multiplicity: 2 }, + }, nin=1, nout=2), + + local tilings = [g.pnode({ + type: "GridTiling", + name: "tiling-%s-face%d"%[aname, face], + data: { + anode: wc.tn(anode), + face: face, + nudge: 1e-2, + } + }, nin=1, nout=1, uses=[anode]) for face in [0,1]], + + local blobsync = g.pnode({ + type: "BlobSetSync", + name: "blobsetsync-" + aname, + data: { multiplicity: 2 } + }, nin=2, nout=1), + + // one face + // ret : tilings[0], + // two faces + ret: g.intern( + innodes=[slice_fanout], + outnodes=[blobsync], + centernodes=tilings, + edges= + [g.edge(slice_fanout, tilings[n], n, 0) for n in [0,1]] + + [g.edge(tilings[n], blobsync, 0, n) for n in [0,1]], + name='tiling-' + aname), + }.ret, + + // + multi_active_slicing_tiling :: function(anode, name, span=4) { + local active_planes = [[0,1,2],[0,1],[1,2],[0,2],], + local masked_planes = [[],[2],[0],[1]], + local iota = std.range(0,std.length(active_planes)-1), + local slicings = [$.slicing(anode, name+"_%d"%n, span, active_planes[n], masked_planes[n]) + for n in iota], + local tilings = [$.tiling(anode, name+"_%d"%n) + for n in iota], + local multipass = [g.pipeline([slicings[n],tilings[n]]) for n in iota], + ret: f.fanpipe("FrameFanout", multipass, "BlobSetMerge", "multi_active_slicing_tiling"), + }.ret, + + // + multi_masked_2view_slicing_tiling :: function(anode, name, span=500) { + local dummy_planes = [[2],[0],[1]], + local masked_planes = [[0,1],[1,2],[0,2]], + local iota = std.range(0,std.length(dummy_planes)-1), + local slicings = [$.slicing(anode, name+"_%d"%n, span, + active_planes=[],masked_planes=masked_planes[n], dummy_planes=dummy_planes[n]) + for n in iota], + local tilings = [$.tiling(anode, name+"_%d"%n) + for n in iota], + local multipass = [g.pipeline([slicings[n],tilings[n]]) for n in iota], + ret: f.fanpipe("FrameFanout", multipass, "BlobSetMerge", "multi_masked_slicing_tiling"), + }.ret, + + local clustering_policy = "uboone", // uboone, simple + + // Just clustering + clustering :: function(anode, aname, spans=1.0) { + ret : g.pnode({ + type: "BlobClustering", + name: "blobclustering-" + aname, + data: { spans : spans, policy: clustering_policy } + }, nin=1, nout=1), + }.ret, + + // in: IBlobSet out: ICluster + solving :: function(anode, aname, solving_type = "simple") { + + local bc = g.pnode({ + type: "BlobClustering", + name: "blobclustering-" + aname, + data: { policy: "uboone" } + }, nin=1, nout=1), + + local gc = g.pnode({ + type: "GlobalGeomClustering", + name: "global-clustering-" + aname, + data: { policy: "uboone" } + }, nin=1, nout=1), + + solving :: function(suffix = "1st") { + local bg = g.pnode({ + type: "BlobGrouping", + name: "blobgrouping-" + aname + suffix, + data: { + } + }, nin=1, nout=1), + local cs1 = g.pnode({ + type: "ChargeSolving", + name: "cs1-" + aname + suffix, + data: { + weighting_strategies: ["uniform"], //"uniform", "simple", "uboone" + solve_config: "uboone", + whiten: true, + } + }, nin=1, nout=1), + local cs2 = g.pnode({ + type: "ChargeSolving", + name: "cs2-" + aname + suffix, + data: { + weighting_strategies: ["uboone"], //"uniform", "simple", "uboone" + solve_config: "uboone", + whiten: true, + } + }, nin=1, nout=1), + local local_clustering = g.pnode({ + type: "LocalGeomClustering", + name: "local-clustering-" + aname + suffix, + data: { + dryrun: false, + } + }, nin=1, nout=1), + // ret: g.pipeline([bg, cs1],"cs-pipe"+aname+suffix), + ret: g.pipeline([bg, cs1, local_clustering, cs2],"cs-pipe"+aname+suffix), + }.ret, + + global_deghosting :: function(suffix = "1st") { + ret: g.pnode({ + type: "ProjectionDeghosting", + name: "ProjectionDeghosting-" + aname + suffix, + data: { + dryrun: false, + } + }, nin=1, nout=1), + }.ret, + + local_deghosting :: function(config_round = 1, suffix = "1st", good_blob_charge_th=300) { + ret: g.pnode({ + type: "InSliceDeghosting", + name: "inslice_deghosting-" + aname + suffix, + data: { + dryrun: false, + config_round: config_round, + good_blob_charge_th: good_blob_charge_th, + } + }, nin=1, nout=1), + }.ret, + + local gd1 = self.global_deghosting("1st"), + local cs1 = self.solving("1st"), + local ld1 = self.local_deghosting(1,"1st"), + + local gd2 = self.global_deghosting("2nd"), + local cs2 = self.solving("2nd"), + local ld2 = self.local_deghosting(2,"2nd"), + + local cs3 = self.solving("3rd"), + local ld3 = self.local_deghosting(3,"3rd"), + + ret: + if solving_type == "full" + then g.pipeline([bc, gd1, cs1, ld1, gd2, cs2, ld2, cs3, ld3, gc],"uboone-solving") + else g.pipeline([bc, cs1, ld1, gc],"simple-solving"), + }.ret, + + dump :: function(anode, aname, drift_speed) { + local cs = g.pnode({ + type: "ClusterFileSink", + name: "clustersink-"+aname, + data: { + outname: "clusters-apa-"+aname+".tar.gz", + format: "json", // json, numpy, dummy + } + }, nin=1, nout=0), + ret: cs + }.ret, +}; + +function() { + local imgpipe (anode, multi_slicing) = + if multi_slicing == "single" + then g.pipeline([ + // img.slicing(anode, anode.name, 109, active_planes=[0,1,2], masked_planes=[],dummy_planes=[]), // 109*22*4 + // img.slicing(anode, anode.name, 1916, active_planes=[], masked_planes=[0,1],dummy_planes=[2]), // 109*22*4 + img.slicing(anode, anode.name, 4, active_planes=[0,1,2], masked_planes=[],dummy_planes=[]), // 109*22*4 + img.tiling(anode, anode.name), + img.solving(anode, anode.name, "full"), + // img.clustering(anode, anode.name), + img.dump(anode, anode.name, params.lar.drift_speed),]) + else if multi_slicing == "pdhd1" + then g.pipeline([ + // img.slicing(anode, anode.name, 109, active_planes=[0,1,2], masked_planes=[],dummy_planes=[]), // 109*22*4 + // img.slicing(anode, anode.name, 1916, active_planes=[], masked_planes=[0,1],dummy_planes=[2]), // 109*22*4 + img.slicing(anode, anode.name, 4, active_planes=[0,1], masked_planes=[],dummy_planes=[2]), // 109*22*4 + img.tiling(anode, anode.name), + img.solving(anode, anode.name, "full"), + // img.clustering(anode, anode.name), + img.dump(anode, anode.name, params.lar.drift_speed),]) + else if multi_slicing == "active" + then g.pipeline([ + img.multi_active_slicing_tiling(anode, anode.name+"-ms-active", 4), + img.solving(anode, anode.name+"-ms-active"), + // img.clustering(anode, anode.name+"-ms-active"), + img.dump(anode, anode.name+"-ms-active", params.lar.drift_speed)]) + else if multi_slicing == "masked" + then g.pipeline([ + img.multi_masked_2view_slicing_tiling(anode, anode.name+"-ms-masked", 500), + img.clustering(anode, anode.name+"-ms-masked"), + img.dump(anode, anode.name+"-ms-masked", params.lar.drift_speed)]) + else { + local active_fork = g.pipeline([ + img.multi_active_slicing_tiling(anode, anode.name+"-ms-active", 4), + img.solving(anode, anode.name+"-ms-active", "full"), + img.dump(anode, anode.name+"-ms-active", params.lar.drift_speed), + ]), + local masked_fork = g.pipeline([ + img.multi_masked_2view_slicing_tiling(anode, anode.name+"-ms-masked", 500), // 109, 1744 (total 9592) + img.clustering(anode, anode.name+"-ms-masked"), + img.dump(anode, anode.name+"-ms-masked", params.lar.drift_speed), + ]), + ret: g.fan.fanout("FrameFanout",[active_fork,masked_fork], "fan_active_masked"), + }.ret, + + per_anode(anode, pipe_type = "multi") :: g.pipeline([ + img.pre_proc(anode, anode.name), + imgpipe(anode, pipe_type), + ], "per_anode"), +} \ No newline at end of file diff --git a/cfg/pgrapher/experiment/pdhd/magnify-sinks.jsonnet b/cfg/pgrapher/experiment/pdhd/magnify-sinks.jsonnet new file mode 100644 index 000000000..0e39083ea --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/magnify-sinks.jsonnet @@ -0,0 +1,137 @@ +// This provides multiple MagnifySink for e.g. protoDUNE + +local g = import 'pgraph.jsonnet'; +local wc = import 'wirecell.jsonnet'; + +// multiple MagnifySink +// tagn (n = 0, 1, ... 5) for anode[n] +// FrameFanin tags configured in sim.jsonnet +function(tools, outputfile) { + + local nanodes = std.length(tools.anodes), + + local magorig = [ + g.pnode({ + type: 'MagnifySink', + name: 'magorig%d' % n, + data: { + output_filename: outputfile, + root_file_mode: 'UPDATE', + frames: ['orig%d' % n], + trace_has_tag: false, // traces from source have NO tag + anode: wc.tn(tools.anodes[n]), + }, + }, nin=1, nout=1) + for n in std.range(0, nanodes - 1) + ], + + local magraw = [ + g.pnode({ + type: 'MagnifySink', + name: 'magraw%d' % n, + data: { + output_filename: outputfile, + root_file_mode: 'UPDATE', + frames: ['raw%d' % n], + trace_has_tag: true, + cmmtree: [["noisy", "T_noisy%d"%n], + ["sticky", "T_stky%d"%n], + ["ledge", "T_ldg%d"%n], + ["harmonic", "T_hm%d"%n] ], // maskmap in nf.jsonnet + anode: wc.tn(tools.anodes[n]), + }, + }, nin=1, nout=1) + for n in std.range(0, nanodes - 1) + ], + + local magdecon = [ + g.pnode({ + type: 'MagnifySink', + name: 'magdecon%d' % n, + data: { + output_filename: outputfile, + root_file_mode: 'UPDATE', + frames: ['gauss%d' % n, 'wiener%d' % n], + trace_has_tag: true, + anode: wc.tn(tools.anodes[n]), + }, + }, nin=1, nout=1) + for n in std.range(0, nanodes - 1) + ], + + local magdebug = [ + g.pnode({ + type: 'MagnifySink', + name: 'magdebug%d' % n, + data: { + output_filename: outputfile, + root_file_mode: 'UPDATE', + frames: ['tight_lf%d' %n, 'loose_lf%d' %n, 'cleanup_roi%d' %n, + 'break_roi_1st%d' %n, 'break_roi_2nd%d' %n, + 'shrink_roi%d' %n, 'extend_roi%d' %n, 'mp2_roi%d' %n, 'mp3_roi%d' %n], + trace_has_tag: true, + anode: wc.tn(tools.anodes[n]), + }, + }, nin=1, nout=1) + for n in std.range(0, nanodes - 1) + ], + + local magtruth = [ + g.pnode({ + type: 'MagnifySink', + name: 'magtruth%d' % n, + data: { + output_filename: outputfile, + root_file_mode: 'UPDATE', + frames: ['deposplat%d' % n], + trace_has_tag: true, + anode: wc.tn(tools.anodes[n]), + }, + }, nin=1, nout=1) + for n in std.range(0, nanodes - 1) + ], + + + local magthr = [ + g.pnode({ + type: 'MagnifySink', + name: 'magthr%d' % n, + data: { + output_filename: outputfile, + root_file_mode: 'UPDATE', + summaries: ['threshold%d' % n], // note that if tag set, each apa should have a tag set for FrameFanin + summary_operator: { ['threshold%d' % n]: 'set' }, // []: obj comprehension + anode: wc.tn(tools.anodes[n]), + }, + }, nin=1, nout=1) + for n in std.range(0, nanodes - 1) + ], + + local magdnnroi = [ + g.pnode({ + type: 'MagnifySink', + name: 'magdnnroi%d' % n, + data: { + output_filename: outputfile, + root_file_mode: 'UPDATE', + frames: ['dnnsp%d' %n], + trace_has_tag: true, + anode: wc.tn(tools.anodes[n]), + }, + }, nin=1, nout=1) + for n in std.range(0, nanodes - 1) + ], + + + return: { + truth_pipe: [g.pipeline([magtruth[n]], name='magtruthpipe%d' % n) for n in std.range(0, nanodes - 1)], + orig_pipe: [g.pipeline([magorig[n]], name='magorigpipe%d' % n) for n in std.range(0, nanodes - 1)], + raw_pipe: [g.pipeline([magraw[n]], name='magrawpipe%d' % n) for n in std.range(0, nanodes - 1)], + decon_pipe: [g.pipeline([magdecon[n]], name='magdeconpipe%d' % n) for n in std.range(0, nanodes - 1)], + debug_pipe: [g.pipeline([magdebug[n]], name='magdebugpipe%d' % n) for n in std.range(0, nanodes - 1)], + threshold_pipe: [g.pipeline([magthr[n]], name='magthrpipe%d' % n) for n in std.range(0, nanodes - 1)], + dnnroi_pipe: [g.pipeline([magdnnroi[n]], name='magdnnroipipe%d' % n) for n in std.range(0, nanodes - 1)], + }, + + +}.return diff --git a/cfg/pgrapher/experiment/pdhd/nf.jsonnet b/cfg/pgrapher/experiment/pdhd/nf.jsonnet new file mode 100644 index 000000000..3c9e3e0fa --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/nf.jsonnet @@ -0,0 +1,59 @@ +// This provides some noise filtering related pnodes, + +local g = import 'pgraph.jsonnet'; +local wc = import 'wirecell.jsonnet'; + +local default_dft = { type: 'FftwDFT' }; + +function(params, anode, chndbobj, n, name='', dft=default_dft) { + local single = { + type: 'PDHDOneChannelNoise', + name: name, + uses: [dft, chndbobj, anode], + data: { + noisedb: wc.tn(chndbobj), + anode: wc.tn(anode), + dft: wc.tn(dft), + }, + }, + local grouped = { + type: 'PDHDCoherentNoiseSub', + name: name, + uses: [dft, chndbobj, anode], + data: { + noisedb: wc.tn(chndbobj), + anode: wc.tn(anode), + dft: wc.tn(dft), + rms_threshold: 0.0, + }, + }, + + local obnf = g.pnode({ + type: 'OmnibusNoiseFilter', + name: name, + data: { + + // Nonzero forces the number of ticks in the waveform + nticks: 0, + + // channel bin ranges are ignored + // only when the channelmask is merged to `bad` + // maskmap: {sticky: "bad", ledge: "bad", noisy: "bad"}, + channel_filters: [ + wc.tn(single), + ], + grouped_filters: [ + wc.tn(grouped), + ], + channel_status_filters: [ + ], + noisedb: wc.tn(chndbobj), + // intraces: 'orig%d' % n, // frame tag get all traces + intraces: 'orig', // frame tag get all traces + outtraces: 'raw%d' % n, + }, + }, uses=[chndbobj, anode, single, grouped], nin=1, nout=1), + + + pipe: g.pipeline([obnf], name=name), +}.pipe diff --git a/cfg/pgrapher/experiment/pdhd/params.jsonnet b/cfg/pgrapher/experiment/pdhd/params.jsonnet new file mode 100644 index 000000000..434fa6edf --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/params.jsonnet @@ -0,0 +1,173 @@ +// ProtoDUNE-SP specific parameters. This file inerets from the +// generic set of parameters and overrides things specific to PDSP. + +local wc = import "wirecell.jsonnet"; +local base = import "pgrapher/common/params.jsonnet"; + +base { + // This section will be overwritten in simparams.jsonnet + det : { + + // The "faces" is consumed by, at least, the Drifter and + // AnodePlane. The "wires" number is used to set + // AnodePlane.ident used to lookup the anode in WireSchema. + // It corresponds to the anode number. + + // Numbers determined from protodunehd_v6 gdml + local apa_cpa = 3.5734*wc.m, + local cpa_thick = 3.175*wc.mm, // 1/8", from Bo Yu (BNL) and confirmed with LArSoft + local apa_w2w = 85.87*wc.mm, + local plane_gap = 4.76*wc.mm, + local apa_g2g = apa_w2w + 6*plane_gap, + + // The "anode" cut off plane, here measured from APA + // centerline, determines how close to the wires do we + // consider any depo. Anything closer will simply be + // discarded, else it will either be drifted or "backed up" to + // the response plane. This is somewhat arbitrary choice. + // Placing it w/in the response plane means any depos that are + // "backed up" won't have proper field response. But, the + // tighter this is made, the less volume is simulated. + // local apa_plane = 0.5*apa_g2g, // pick it to be at the grid wires + local apa_plane = 0.5*apa_g2g - plane_gap, // pick it to be at the first induction wires + + // The "response" plane is where the field response functions + // start. Garfield calcualtions start somewhere relative to + // something, here's where that is made concrete. This MUST + // match what field response functions also used. + response_plane: 10*wc.cm, // relative to collection wires + local res_plane = 0.5*apa_w2w + self.response_plane, + + // The cathode plane is like the anode cut off plane. Any + // depo not between the two is dropped prior to drifting. + local cpa_plane = apa_cpa - 0.5*cpa_thick, + + + // The volumes are then defined in terms of these above + // numbers. You can use "wirecell-util wires-info" or + // "wirecell-util wires-volumes" or others to understand the + // mapping of anode number to the 6 locations in X and Z. For + // Larsoft wires the numbering is column major starting at + // small X and Z so the centerline is -/+/-/+/-/+. Also + // important is that the faces are listed "front" first. + // Front is the one with the more positive X coordinates and + // if we want to ignore a face it is made null. + volumes: [ + { + local sign = 2*(n%2)-1, + local centerline = sign*apa_cpa, + wires: n, // anode number + name: "apa%d"%n, + faces: + // top, front face is against cryo wall + if sign > 0 + then [ + null, + { + anode: centerline - apa_plane, + response: centerline - res_plane, + cathode: centerline - cpa_plane, + } + ] + // bottom, back face is against cryo wall + else [ + { + anode: centerline + apa_plane, + response: centerline + res_plane, + cathode: centerline + cpa_plane, + }, + null + ], + } for n in std.range(0,3)], + + // This describes some rough, overall bounding box. It's not + // directly needed but can be useful on the Jsonnet side, for + // example when defining some simple kinematics. It is + // represented by a ray going from extreme corners of a + // rectangular solid. Again "wirecell-util wires-info" helps + // to choose something. + bounds : { + tail: wc.point(-4.0, 0.0, 0.0, wc.m), + head: wc.point(+4.0, 6.1, 7.0, wc.m), + } + }, + + daq: super.daq { + nticks: 6000, + }, + + adc: super.adc { + resolution: 14, + // reuse ProtoDUNE SP values + baselines: [1003.4*wc.millivolt,1003.4*wc.millivolt,507.7*wc.millivolt], + fullscale: [0.2*wc.volt, 1.6*wc.volt], + }, + + elecs: [ + super.elec { + // The FE amplifier gain in units of Voltage/Charge. + // gain : 14.0*wc.mV/wc.fC, + // gain : std.extVar("elecGain")*wc.mV/wc.fC, + + // The shaping (aka peaking) time of the amplifier shaper. + shaping : 2.2*wc.us, + } + for n in std.range(0,3) + ], + + elec: $.elecs[0], // nominal + + sim: super.sim { + + // For running in LArSoft, the simulation must be in fixed time mode. + fixed: true, + + // The "absolute" time (ie, in G4 time) that the lower edge of + // of final readout tick #0 should correspond to. This is a + // "fixed" notion. + local tick0_time = -250*wc.us, + + // Open the ductor's gate a bit early. + local response_time_offset = $.det.response_plane / $.lar.drift_speed, + local response_nticks = wc.roundToInt(response_time_offset / $.daq.tick), + + ductor : { + nticks: $.daq.nticks + response_nticks, + readout_time: self.nticks * $.daq.tick, + start_time: tick0_time - response_time_offset, + }, + + // To counter the enlarged duration of the ductor, a Reframer + // chops off the little early, extra time. Note, tags depend on how + reframer: { + tbin: response_nticks, + nticks: $.daq.nticks, + } + + }, + + files: { + wires: "protodunehd-wires-larsoft-v1.json.bz2", + + fields: [ + "np04hd-garfield-6paths-mcmc-bestfit.json.bz2", + "dune-garfield-1d565.json.bz2", + "dune-garfield-1d565.json.bz2", + "dune-garfield-1d565.json.bz2", + ], + + fltresp: "protodunehd-field-response-filters.json.bz2", + + // Noise models for different FE amplifier gains + // Note: set gain value accordingly in the field of elecs + // noise: "protodunehd-noise-spectra-14mVfC-v1.json.bz2", + // noise: "protodunehd-noise-spectra-7d8mVfC-v1.json.bz2", + noise: if $.elec.gain > 8*wc.mV/wc.fC then "protodunehd-noise-spectra-14mVfC-v1.json.bz2" + else "protodunehd-noise-spectra-7d8mVfC-v1.json.bz2", + + + chresp: null, + }, + +} + diff --git a/cfg/pgrapher/experiment/pdhd/sim.jsonnet b/cfg/pgrapher/experiment/pdhd/sim.jsonnet new file mode 100644 index 000000000..dfa0f04a8 --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/sim.jsonnet @@ -0,0 +1,88 @@ +local wc = import "wirecell.jsonnet"; +local g = import "pgraph.jsonnet"; +local f = import "pgrapher/common/funcs.jsonnet"; +local sim_maker = import "pgrapher/common/sim/nodes.jsonnet"; + + +// return some nodes, includes base sim nodes. +function(params, tools) { + local sim = sim_maker(params, tools), + + local nanodes = std.length(tools.anodes), + + // I rue the day that we must have an (anode) X (field) cross product! + // local ductors = sim.make_detector_ductors("nominal", tools.anodes, tools.pirs[0]), + + + local zippers = [sim.make_depozipper("depozipper-"+tools.anodes[n].name, tools.anodes[n], tools.pirs[n]) + for n in std.range(0, nanodes-1)], + local transforms = [sim.make_depotransform("depotransform-"+tools.anodes[n].name, tools.anodes[n], tools.pirs[n]) + for n in std.range(0, nanodes-1)], + local depos2traces = transforms, + //local depos2traces = zippers, + + local digitizers = [ + sim.digitizer(tools.anodes[n], name="digitizer-" + tools.anodes[n].name, tag="orig") // tag for frames + for n in std.range(0,nanodes-1)], + + local reframers = [ + g.pnode({ + type: 'Reframer', + name: 'reframer-'+tools.anodes[n].name, + data: { + anode: wc.tn(tools.anodes[n]), + tags: [], // ?? what do? + fill: 0.0, + tbin: params.sim.reframer.tbin, + toffset: 0, + nticks: params.sim.reframer.nticks, + }, + }, nin=1, nout=1) for n in std.range(0, nanodes-1)], + + + // fixme: see https://github.com/WireCell/wire-cell-gen/issues/29 + local make_noise_model = function(anode, csdb=null) { + type: "EmpiricalNoiseModel", + name: "empericalnoise-" + anode.name, + data: { + anode: wc.tn(anode), + dft: wc.tn(tools.dft), + chanstat: if std.type(csdb) == "null" then "" else wc.tn(csdb), + spectra_file: params.files.noise, + nsamples: params.daq.nticks, + period: params.daq.tick, + wire_length_scale: 1.0*wc.cm, // optimization binning + }, + uses: [anode, tools.dft] + if std.type(csdb) == "null" then [] else [csdb], + }, + local noise_models = [make_noise_model(anode) for anode in tools.anodes], + + + local add_noise = function(model) g.pnode({ + type: "AddNoise", + name: "addnoise-" + model.name, + data: { + rng: wc.tn(tools.random), + dft: wc.tn(tools.dft), + model: wc.tn(model), + nsamples: params.daq.nticks, + replacement_percentage: 0.02, // random optimization + }}, nin=1, nout=1, uses=[tools.random, tools.dft, model]), + + local noises = [add_noise(model) for model in noise_models], + + local outtags = ["orig%d"%n for n in std.range(0, nanodes-1)], + + ret : { + + signal_pipelines: [g.pipeline([depos2traces[n], reframers[n], digitizers[n]], + name="simsigpipe-" + tools.anodes[n].name) for n in std.range(0, nanodes-1)], + + splusn_pipelines: [g.pipeline([depos2traces[n], reframers[n], noises[n], digitizers[n]], + name="simsignoipipe-" + tools.anodes[n].name) for n in std.range(0, nanodes-1)], + + signal: f.fanpipe('DepoSetFanout', self.signal_pipelines, 'FrameFanin', "simsignalgraph", outtags), + splusn: f.fanpipe('DepoSetFanout', self.splusn_pipelines, 'FrameFanin', "simsplusngraph", outtags), + + } + sim, // tack on base for user sugar. +}.ret diff --git a/cfg/pgrapher/experiment/pdhd/simparams.jsonnet b/cfg/pgrapher/experiment/pdhd/simparams.jsonnet new file mode 100644 index 000000000..41b604557 --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/simparams.jsonnet @@ -0,0 +1,260 @@ +// Here we override params.jsonnet to provide simulation-specific params. + +local base = import 'pgrapher/experiment/pdhd/params.jsonnet'; +local wc = import 'wirecell.jsonnet'; + +base { + lar: super.lar { + // be sure you really want to have this. default value: 8 ms + // lifetime: 35.0*wc.ms, + }, + + // redefine the detector volumes with the cryostat side included + det : { + + // The "faces" is consumed by, at least, the Drifter and + // AnodePlane. The "wires" number is used to set + // AnodePlane.ident used to lookup the anode in WireSchema. + // It corresponds to the anode number. + + // Numbers determined from protodunehd_v6 gdml + local apa_cpa = 3.5734*wc.m, + local cpa_thick = 3.175*wc.mm, // 1/8", from Bo Yu (BNL) and confirmed with LArSoft + local apa_w2w = 85.87*wc.mm, + local plane_gap = 4.76*wc.mm, + local apa_g2g = apa_w2w + 6*plane_gap, + + // The "anode" cut off plane, here measured from APA + // centerline, determines how close to the wires do we + // consider any depo. Anything closer will simply be + // discarded, else it will either be drifted or "backed up" to + // the response plane. This is somewhat arbitrary choice. + // Placing it w/in the response plane means any depos that are + // "backed up" won't have proper field response. But, the + // tighter this is made, the less volume is simulated. + // local apa_plane = 0.5*apa_g2g, // pick it to be at the grid wires + local apa_plane = 0.5*apa_g2g - plane_gap, // pick it to be at the first induction wires + + // The "response" plane is where the field response functions + // start. Garfield calcualtions start somewhere relative to + // something, here's where that is made concrete. This MUST + // match what field response functions also used. + response_plane: 10*wc.cm, // relative to collection wires + local res_plane = 0.5*apa_w2w + self.response_plane, + + // The cathode plane is like the anode cut off plane. Any + // depo not between the two is dropped prior to drifting. + local cpa_plane = apa_cpa - 0.5*cpa_thick, + + + // The volumes are then defined in terms of these above + // numbers. You can use "wirecell-util wires-info" or + // "wirecell-util wires-volumes" or others to understand the + // mapping of anode number to the 6 locations in X and Z. For + // Larsoft wires the numbering is column major starting at + // small X and Z so the centerline is -/+/-/+/-/+. Also + // important is that the faces are listed "front" first. + // Front is the one with the more positive X coordinates and + // if we want to ignore a face it is made null. + volumes: [ + { + local sign = 2*(n%2)-1, + local centerline = sign*apa_cpa, + wires: n, // anode number + name: "apa%d"%n, + faces: + // top, front face is against cryo wall + if sign > 0 + then [ + { + anode: centerline + apa_plane, + response: centerline + res_plane, + cathode: centerline + cpa_plane, + }, + { + anode: centerline - apa_plane, + response: centerline - res_plane, + cathode: centerline - cpa_plane, + } + ] + // bottom, back face is against cryo wall + else [ + { + anode: centerline + apa_plane, + response: centerline + res_plane, + cathode: centerline + cpa_plane, + }, + { + anode: centerline - apa_plane, + response: centerline - res_plane, + cathode: centerline - cpa_plane, + } + + ], + } for n in std.range(0,3)], + + // This describes some rough, overall bounding box. It's not + // directly needed but can be useful on the Jsonnet side, for + // example when defining some simple kinematics. It is + // represented by a ray going from extreme corners of a + // rectangular solid. Again "wirecell-util wires-info" helps + // to choose something. + bounds : { + tail: wc.point(-4.0, 0.0, 0.0, wc.m), + head: wc.point(+4.0, 6.1, 7.0, wc.m), + } + }, + + daq: super.daq { + + // Number of readout ticks. See also sim.response.nticks. + // In MB LArSoft simulation, they expect a different number of + // ticks than acutal data. + nticks: 6000, + }, + + // These parameters only make sense for running WCT simulation on + // microboone in larsoft. The "trigger" section is not + // "standard". This section is just a set up for use below in + // "sim". There is no trigger, per se, in the simulation but + // rather a contract between the generators of energy depositions + // (ie, LarG4) and the drift and induction simulation (WCT). For + // details of this contract see: + // https://microboone-docdb.fnal.gov/cgi-bin/private/ShowDocument?docid=12290 + //trigger : { + + // // A hardware trigger occurs at some "absolute" time but near + // // 0.0 for every "event". It is measured in "simulation" time + // // which is the same clock used for expressing energy + // // deposition times. The following values are from table 3 of + // // DocDB 12290. + // hardware: { + // times: { + + // none: 0.0, + + // // BNB hardware trigger time. Note interactions + // // associated with BNB neutrinos should all be produced + // // starting in the beam gate which begins at 3125ns and is + // // 1600ns in duration. + // bnb : -31.25*wc.ns, + + + // // Like above but for NUMI. It's gate is 9600ns long starting + // // at 4687.5ns. + // numi : -54.8675*wc.ns, + + // ext : -414.0625*wc.ns, + // + // mucs: -405.25*wc.ns, + // }, + + // // Select on of the trigger types + // type: "bnb", + + // time: self.times[self.type], + // }, + + // // Measured relative to the hardware trigger time above is a + // // time offset to the time that the first tick of the readout + // // should sample. This is apparently fixed for all hardware + // // trigger types (?). + // time_offset: -1.6*wc.ms, + + // time: self.hardware.time + self.time_offset, + //}, + + sim: super.sim { + + // For running in LArSoft, the simulation must be in fixed time mode. + fixed: true, + continuous: false, + fluctuate: true, + + //ductor : super.ductor { + // start_time: $.daq.start_time - $.elec.fields.drift_dt + $.trigger.time, + //}, + + + // Additional e.g. 10 us time difference is due to the larger drift velocity + // in Garfield field response where the collection plane peak + // at around 81 us instead of response_plane (10 cm to Y plane) /drift_speed. + // Assuming a constant drift velocity, this correction is needed. + // Interplane timeoffset still holds and will be intrinsically taken into account + // in the 2D decon. + // ATTENTION: when response variation (sys_status) turned on, an offset is needed. + // smearing function is centralized at t=0 instead of starting from t=0 + //reframer: super.reframer{ + // tbin: if $.sys_status == true + // then (81*wc.us-($.sys_resp.start))/($.daq.tick) + // else (81*wc.us)/($.daq.tick), + // nticks: $.daq.nticks, + // toffset: if $.sys_status == true + // then $.elec.fields.drift_dt - 81*wc.us + $.sys_resp.start + // else $.elec.fields.drift_dt - 81*wc.us, + //}, + + }, + // This is a non-standard, MB-specific variable. Each object + // attribute holds an array of regions corresponding to a + // particular set of field response functions. A region is + // defined as an array of trios: plane, min and max wire index. + // Each trio defines a swath in the transverse plane bounded by + // the min/max wires. A region is finally the intersection or + // overlap of all its trios in the transverse plane. + //shorted_regions : { + // uv: [ + // [ { plane:0, min:296, max:296 } ], + // [ { plane:0, min:298, max:315 } ], + // [ { plane:0, min:317, max:317 } ], + // [ { plane:0, min:319, max:327 } ], + // [ { plane:0, min:336, max:337 } ], + // [ { plane:0, min:343, max:345 } ], + // [ { plane:0, min:348, max:351 } ], + // [ { plane:0, min:376, max:400 } ], + // [ { plane:0, min:410, max:445 } ], + // [ { plane:0, min:447, max:484 } ], + // [ { plane:0, min:501, max:503 } ], + // [ { plane:0, min:505, max:520 } ], + // [ { plane:0, min:522, max:524 } ], + // [ { plane:0, min:536, max:559 } ], + // [ { plane:0, min:561, max:592 } ], + // [ { plane:0, min:595, max:598 } ], + // [ { plane:0, min:600, max:632 } ], + // [ { plane:0, min:634, max:652 } ], + // [ { plane:0, min:654, max:654 } ], + // [ { plane:0, min:656, max:671 } ], + // ], + // vy: [ + // [ { plane:2, min:2336, max:2399 } ], + // [ { plane:2, min:2401, max:2414 } ], + // [ { plane:2, min:2416, max:2463 } ], + // ], + //}, + + //files: super.files{ + // chresp: null, + //}, + + // This sets a relative gain at the input to the ADC. Note, if + // you are looking to fix SimDepoSource, you are in the wrong + // place. See the "scale" parameter of wcls.input.depos() defined + // in pgrapher/common/ui/wcls/nodes.jsonnet. + // elec: super.elec { + // postgain: 1.0, + // shaping: 2.2 * wc.us, + // }, + + sys_status: false, + sys_resp: { + // overall_short_padding should take into account this offset "start". + start: -10 * wc.us, + magnitude: 1.0, + time_smear: 1.0 * wc.us, + }, + + rc_resp: { + width: 1.1*wc.ms, + rc_layers: 0, + } +} diff --git a/cfg/pgrapher/experiment/pdhd/sp-filters.jsonnet b/cfg/pgrapher/experiment/pdhd/sp-filters.jsonnet new file mode 100644 index 000000000..bd3f2f2b4 --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/sp-filters.jsonnet @@ -0,0 +1,125 @@ +// WARNING: the SP C++ code has a lot of hard coded names for various +// filter components. Until this is cleaned up, one MUST configure +// the filter competents with matching type names and not change their +// instance names. + + +local wc = import 'wirecell.jsonnet'; + +local lf(name, data={}) = { + type: 'LfFilter', + name: name, + data: { + max_freq: 1 * wc.megahertz, + tau: 0.0 * wc.megahertz, + } + data, +}; +local hf(name, data={}) = { + type: 'HfFilter', + name: name, + data: { + max_freq: 1 * wc.megahertz, + sigma: 0.0 * wc.megahertz, + power: 2, + flag: true, + } + data, +}; +// All "wire" filters are Hf with different base values. +local wf(name, data={}) = { + type: 'HfFilter', + name: name, + data: { + max_freq: 1, // warning: units + power: 2, + flag: false, + sigma: 0.0, // caller should provide + } + data, +}; + +// Zeus take my eyes! Magic numbers are everywhere! +/** + * Default SP parameters (till May 2019) + */ +// [ +// lf('ROI_tight_lf', { tau: 0.02 * wc.megahertz }), // 0.02 -> 0.027 +// lf('ROI_tighter_lf', { tau: 0.1 * wc.megahertz }), // 0.1 -> 0.075 +// lf('ROI_loose_lf', { tau: 0.0025 * wc.megahertz }), // 0.0025 -> 0.004 +// +// hf('Gaus_tight'), +// hf('Gaus_wide', { sigma: 1.11408e-01 * wc.megahertz }), +// hf('Wiener_tight_U', { +// sigma: 5.75416e+01 / 800.0 * 2 * wc.megahertz, +// power: 4.10358e+00, +// }), +// hf("Wiener_tight_V", { sigma: 5.99306e+01/800.0*2 * wc.megahertz, +// power: 4.20820e+00 }), +// hf('Wiener_tight_W', { +// sigma: 5.88802e+01 / 800.0 * 2 * wc.megahertz, +// power: 4.17455e+00, +// }), +// hf('Wiener_wide_U', { +// sigma: 1.78695e+01 / 200.0 * 2 * wc.megahertz, +// power: 5.33129e+00, +// }), +// hf("Wiener_wide_V", { sigma: 1.84666e+01/200.0*2 * wc.megahertz, +// power: 5.60489e+00 }), +// hf('Wiener_wide_W', { +// sigma: 1.83044e+01 / 200.0 * 2 * wc.megahertz, +// power: 5.44945e+00, +// }), +// +// wf('Wire_ind', { sigma: 1.0 / wc.sqrtpi * 1.4 }), +// wf('Wire_col', { sigma: 1.0 / wc.sqrtpi * 3.0 }), +// ] + +/** + * Optimized SP parameters (May 2019) + * Associated tuning in sp.jsonnet + */ +[ + lf('ROI_loose_lf', { tau: 0.002 * wc.megahertz }), // 0.0025 + lf('ROI_tight_lf', { tau: 0.016 * wc.megahertz }), // 0.02 + lf('ROI_tighter_lf', { tau: 0.08 * wc.megahertz }), // 0.1 + + hf('Gaus_tight'), + hf('Gaus_wide', { sigma: 0.12 * wc.megahertz }), + + + hf('Wiener_tight_U', { + sigma: 0.221933 * wc.megahertz, + power: 6.55413,}), + hf("Wiener_tight_V", { + sigma: 0.222723 * wc.megahertz, + power: 8.75998 }), + hf('Wiener_tight_W', { + sigma: 0.225567 * wc.megahertz, + power: 3.47846,}), + + hf('Wiener_tight_U_APA1', { + sigma: 0.203451 * wc.megahertz, + power: 5.78093,}), + hf("Wiener_tight_V_APA1", { + sigma: 0.160191 * wc.megahertz, + power: 3.54835 }), + hf('Wiener_tight_W_APA1', { + // sigma: 0.125448 * wc.megahertz, + sigma: 0.06 * wc.megahertz, + power: 5.27080,}), + + + hf('Wiener_wide_U', { + sigma: 0.186765 * wc.megahertz, + power: 5.05429, + }), + hf("Wiener_wide_V", { + sigma: 0.1936 * wc.megahertz, + power: 5.77422, + }), + hf('Wiener_wide_W', { + sigma: 0.175722 * wc.megahertz, + power: 4.37928, + }), + + wf('Wire_ind', { sigma: 1.0 / wc.sqrtpi * 0.75 }), + wf('Wire_col', { sigma: 1.0 / wc.sqrtpi * 10.0 }), +] diff --git a/cfg/pgrapher/experiment/pdhd/sp.jsonnet b/cfg/pgrapher/experiment/pdhd/sp.jsonnet new file mode 100644 index 000000000..2c6d98da3 --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/sp.jsonnet @@ -0,0 +1,106 @@ +// This provides signal processing related pnodes, + +local g = import 'pgraph.jsonnet'; +local wc = import 'wirecell.jsonnet'; + +// BIG FAT FIXME: we are taking from uboone. If PDSP needs tuning do +// four things: 0) read this comment, 1) cp this file into pdsp/, 2) +// fix the import and 3) delete this comment. +local spfilt = import 'pgrapher/experiment/pdhd/sp-filters.jsonnet'; + +function(params, tools, override = {}) { + + local pc = tools.perchanresp_nameuses, + + local resolution = params.adc.resolution, + local fullscale = params.adc.fullscale[1] - params.adc.fullscale[0], + local ADC_mV_ratio = ((1 << resolution) - 1 ) / fullscale, + + // pDSP needs a per-anode sigproc + make_sigproc(anode, name=null):: g.pnode({ + type: 'OmnibusSigProc', + name: + if std.type(name) == 'null' + then anode.name + 'sigproc%d' % anode.data.ident + else name, + + data: { + /** + * Default SP parameters (till May 2019) + */ + // anode: wc.tn(anode), + // field_response: wc.tn(tools.field), + // per_chan_resp: pc.name, + // fft_flag: 0, // 1 is faster but higher memory, 0 is slightly slower but lower memory + // postgain: 1, // default 1.2 + // ADC_mV: 4096 / (1400.0 * wc.mV), // default 4096/2000 + // r_fake_signal_low_th: 400, // default 500 + // r_fake_signal_high_th: 800, // default 1000 + // r_fake_signal_low_th_ind_factor: 1.5, // default 1 + // r_fake_signal_high_th_ind_factor: 1.5, // default 1 + // troi_col_th_factor: 5.0, // default 5 + // troi_ind_th_factor: 3.5, // default 3 + // r_th_factor: 3.5, // default 3 + + /** + * Optimized SP parameters (May 2019) + * Associated tuning in sp-filters.jsonnet + */ + anode: wc.tn(anode), + dft: wc.tn(tools.dft), + field_response: wc.tn(tools.fields[anode.data.ident]), + elecresponse: wc.tn(tools.elec_resp), + ftoffset: 0.0, // default 0.0 + ctoffset: 1.0*wc.microsecond, // default -8.0 + per_chan_resp: pc.name, + fft_flag: 0, // 1 is faster but higher memory, 0 is slightly slower but lower memory + postgain: 1.0, // default 1.2 + ADC_mV: ADC_mV_ratio, // 4096 / (1400.0 * wc.mV), + troi_col_th_factor: 2.5, // default 5 + troi_ind_th_factor: 3.0, // default 3 + lroi_rebin: 6, // default 6 + lroi_th_factor: 3.5, // default 3.5 + lroi_th_factor1: 0.7, // default 0.7 + lroi_jump_one_bin: 1, // default 0 + + r_th_factor: 3.0, // default 3 + r_fake_signal_low_th: 375, // default 500 + r_fake_signal_high_th: 750, // default 1000 + r_fake_signal_low_th_ind_factor: 1.0, // default 1 + r_fake_signal_high_th_ind_factor: 1.0, // default 1 + r_th_peak: 3.0, // default 3.0 + r_sep_peak: 6.0, // default 6.0 + r_low_peak_sep_threshold_pre: 1200, // default 1200 + + + // frame tags + wiener_tag: 'wiener%d' % anode.data.ident, + wiener_threshold_tag: 'threshold%d' % anode.data.ident, + decon_charge_tag: 'decon_charge%d' % anode.data.ident, + gauss_tag: 'gauss%d' % anode.data.ident, + + use_roi_debug_mode: false, + tight_lf_tag: 'tight_lf%d' % anode.data.ident, + loose_lf_tag: 'loose_lf%d' % anode.data.ident, + cleanup_roi_tag: 'cleanup_roi%d' % anode.data.ident, + break_roi_loop1_tag: 'break_roi_1st%d' % anode.data.ident, + break_roi_loop2_tag: 'break_roi_2nd%d' % anode.data.ident, + shrink_roi_tag: 'shrink_roi%d' % anode.data.ident, + extend_roi_tag: 'extend_roi%d' % anode.data.ident, + + use_multi_plane_protection: false, + mp3_roi_tag: 'mp3_roi%d' % anode.data.ident, + mp2_roi_tag: 'mp2_roi%d' % anode.data.ident, + + isWrapped: false, + // process_planes: if anode.data.ident==0 then [0, 1] else [0, 1, 2], + plane2layer: if anode.data.ident==0 then [0,2,1] else [0,1,2], + + Wiener_tight_filters: if anode.data.ident==0 + then ["Wiener_tight_U_APA1", "Wiener_tight_W_APA1", "Wiener_tight_V_APA1"] // ind, ind, col + else ["Wiener_tight_U", "Wiener_tight_V", "Wiener_tight_W"], + + } + override, + }, nin=1, nout=1, uses=[anode, tools.dft, tools.field, tools.fields[1], tools.fields[2], tools.fields[3], tools.elec_resp] + pc.uses + spfilt), + +} diff --git a/cfg/pgrapher/experiment/pdhd/wcls-nf-dnnsp-img.jsonnet b/cfg/pgrapher/experiment/pdhd/wcls-nf-dnnsp-img.jsonnet new file mode 100644 index 000000000..5f3555af3 --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/wcls-nf-dnnsp-img.jsonnet @@ -0,0 +1,286 @@ + +local reality = std.extVar('reality'); +local sigoutform = std.extVar('signal_output_form'); // eg "sparse" or "dense" + + +local wc = import 'wirecell.jsonnet'; +local f = import "pgrapher/common/funcs.jsonnet"; +local g = import 'pgraph.jsonnet'; + +local raw_input_label = std.extVar('raw_input_label'); // eg "daq" + + +local data_params = import 'params.jsonnet'; +local simu_params = import 'simparams.jsonnet'; +local params = if reality == 'data' then data_params else simu_params; + + +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local tools = tools_maker(params); + +local wcls_maker = import 'pgrapher/ui/wcls/nodes.jsonnet'; +local wcls = wcls_maker(params, tools); + +//local chndb_maker = import "pgrapher/experiment/pdsp/chndb.jsonnet"; + +local sp_maker = import 'pgrapher/experiment/pdhd/sp.jsonnet'; + + +// Collect the WC/LS input converters for use below. Make sure the +// "name" argument matches what is used in the FHiCL that loads this +// file. In particular if there is no ":" in the inputer then name +// must be the emtpy string. +local wcls_input = { + adc_digits: g.pnode({ + type: 'wclsRawFrameSource', + name: '', + data: { + art_tag: raw_input_label, + frame_tags: ['orig'], // this is a WCT designator + // nticks: params.daq.nticks, + }, + }, nin=0, nout=1), + +}; + +// Collect all the wc/ls output converters for use below. Note the +// "name" MUST match what is used in theh "outputers" parameter in the +// FHiCL that loads this file. +local mega_anode = { + type: 'MegaAnodePlane', + name: 'meganodes', + data: { + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + }, +}; + +local wcls_output = { + // The noise filtered "ADC" values. These are truncated for + // art::Event but left as floats for the WCT SP. Note, the tag + // "raw" is somewhat historical as the output is not equivalent to + // "raw data". + nf_digits: g.pnode({ + type: 'wclsFrameSaver', + name: 'nfsaver', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: true, // true means save as RawDigit, else recob::Wire + frame_tags: ['raw'], + // nticks: params.daq.nticks, + chanmaskmaps: ['bad'], + }, + }, nin=1, nout=1, uses=[mega_anode]), + + + // The output of signal processing. Note, there are two signal + // sets each created with its own filter. The "gauss" one is best + // for charge reconstruction, the "wiener" is best for S/N + // separation. Both are used in downstream WC code. + sp_signals: g.pnode({ + type: 'wclsFrameSaver', + name: 'spsaver', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: false, // true means save as RawDigit, else recob::Wire + frame_tags: ['gauss', 'wiener', 'dnnsp'], + frame_scale: [0.001, 0.001, 0.001], + // nticks: params.daq.nticks, + chanmaskmaps: [], + nticks: -1, + }, + }, nin=1, nout=1, uses=[mega_anode]), +}; + +// local perfect = import 'chndb-perfect.jsonnet'; +local base = import 'chndb-base.jsonnet'; +local chndb = [{ + type: 'OmniChannelNoiseDB', + name: 'ocndbperfect%d' % n, + // data: perfect(params, tools.anodes[n], tools.field, n) { dft:wc.tn(tools.dft) }, + data: base(params, tools.anodes[n], tools.field, n) { dft:wc.tn(tools.dft) }, + uses: [tools.anodes[n], tools.field, tools.dft], +} for n in std.range(0, std.length(tools.anodes) - 1)]; + +local nf_maker = import 'pgrapher/experiment/pdhd/nf.jsonnet'; +local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in std.range(0, std.length(tools.anodes) - 1)]; + +local sp_override = { // assume all tages sets in base sp.jsonnet + sparse: sigoutform == 'sparse', + // wiener_tag: "", + // gauss_tag: "", + use_roi_refinement: true, + use_roi_debug_mode: true, + troi_col_th_factor: 5, + //tight_lf_tag: "", + // loose_lf_tag: "", + //cleanup_roi_tag: "", + break_roi_loop1_tag: "", + break_roi_loop2_tag: "", + shrink_roi_tag: "", + extend_roi_tag: "", + //m_decon_charge_tag: "", + use_multi_plane_protection: true, + mp_tick_resolution: 10, +}; + + +//local sp = sp_maker(params, tools, { sparse: sigoutform == 'sparse' }); +local sp = sp_maker(params, tools, sp_override); + +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; + +local img = import 'pgrapher/experiment/pdhd/img.jsonnet'; +local img_maker = img(); +local img_pipes = [img_maker.per_anode(a) for a in tools.anodes]; + +//local util = import 'pgrapher/experiment/pdhd/funcs.jsonnet'; +local chsel_pipes = [ + g.pnode({ + type: 'ChannelSelector', + name: 'chsel%d' % n, + data: { + channels: std.range(2560 * n, 2560 * (n + 1) - 1), + // tags: ['orig%d' % n], // traces tag + }, + }, nin=1, nout=1) + for n in std.range(0, std.length(tools.anodes) - 1) +]; + +local dnnroi = import 'pgrapher/experiment/pdhd/dnnroi.jsonnet'; +local ts = { + type: "TorchService", + name: "dnnroi", + data: { + // model: "ts-model/unet-l23-cosmic500-e50.ts", + // model: "ts-model/CP49.ts", + model: "ts-model/unet-cosmic390-newwc-depofluxsplat-pdhd.ts", + //model: "ts-model/unet-cosmic300-depofluxsplat-pdhd.ts", + //model : "ts-model/cosmic390andshower200.ts", + device: "cpu", // "gpucpu", + concurrency: 1, + }, +}; + +local magoutput = 'protodunehd-data-check.root'; +local magnify = import 'pgrapher/experiment/pdhd/magnify-sinks.jsonnet'; +local magio = magnify(tools, magoutput); + +local use_magnify = std.extVar("use_magnify"); + +local dnn_trace_mergers = [ g.pnode({ + type: 'Retagger', + name: 'dnnmerger%d' %n, + data: { + tag_rules: [{ + // frame: {'.*': 'dnnsp',}, + // merge: {'dnnsp\\d': 'dnnsp%d' %n,}, + merge: {'dnnsp\\d[uvw]' : 'dnnsp%d' %n,}, + }], + }, +}, nin=1, nout=1) +for n in std.range(0, std.length(tools.anodes) - 1) ]; + +local nfsp_pipes = [ + g.pipeline( + if use_magnify =='true' then + [ + chsel_pipes[n], + magio.orig_pipe[n], + nf_pipes[n], + magio.raw_pipe[n], + sp_pipes[n], + + dnnroi(tools.anodes[n], ts, output_scale=1.0), + dnn_trace_mergers[n], + + magio.decon_pipe[n], + // magio.threshold_pipe[n], + // magio.debug_pipe[n], // use_roi_debug_mode=true in sp.jsonnet + img_pipes[n], + ] + else [ + chsel_pipes[n], + nf_pipes[n], + sp_pipes[n], + dnnroi(tools.anodes[n], ts, output_scale=1.0), + dnn_trace_mergers[n], + img_pipes[n], + ], + 'nfsp_pipe_%d' % n) + for n in std.range(0, std.length(tools.anodes) - 1) +]; + +// local fanpipe = util.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf'); +local fanout_tag_rules = [ + { + frame: { + '.*': 'orig%d' % tools.anodes[n].data.ident, + }, + trace: { + // fake doing Nmult SP pipelines + //orig: ['wiener', 'gauss'], + //'.*': 'orig', + }, + } + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local anode_ident = [tools.anodes[n].data.ident for n in std.range(0, std.length(tools.anodes) - 1)]; +local fanin_tag_rules = [ + { + frame: { + //['number%d' % n]: ['output%d' % n, 'output'], + '.*': 'framefanin', + }, + trace: { + ['gauss%d'%ind]:'gauss%d'%ind, + ['wiener%d'%ind]:'wiener%d'%ind, + ['threshold%d'%ind]:'threshold%d'%ind, + // ['tight_lf%d'%ind]:'tight_lf%d'%ind, + ['loose_lf%d'%ind]:'loose_lf%d'%ind, + ['dnnsp%d'%ind]:'dnnsp%d'%ind, + }, + + } + for ind in anode_ident + ]; +// local fanpipe = util.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'nfsp', [], fanout_tag_rules, fanin_tag_rules); + +local nanodes = std.length(tools.anodes); +local fanpipe = f.multifanout('FrameFanout', nfsp_pipes, [1,nanodes], [nanodes,1], 'sn_mag', fanin_tag_rules); + +local retagger = g.pnode({ + type: 'Retagger', + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'retagger', + }, + merge: { + 'gauss\\d': 'gauss', + 'wiener\\d': 'wiener', + 'threshold\\d': 'threshold', + 'dnnsp\\d': 'dnnsp', + }, + }], + }, +}, nin=1, nout=1); + +local sink = g.pnode({ type: 'DumpFrames' }, nin=1, nout=0); +// local graph = g.pipeline([wcls_input.adc_digits, fanpipe, retagger, wcls_output.sp_signals, sink]); +local graph = g.pipeline([wcls_input.adc_digits, fanpipe]); + +local app = { + type: 'Pgrapher', + data: { + edges: g.edges(graph), + }, +}; + +// Finally, the configuration sequence +g.uses(graph) + [app] diff --git a/cfg/pgrapher/experiment/pdhd/wcls-nf-sp-img.jsonnet b/cfg/pgrapher/experiment/pdhd/wcls-nf-sp-img.jsonnet new file mode 100644 index 000000000..3110b9bbe --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/wcls-nf-sp-img.jsonnet @@ -0,0 +1,226 @@ + +local reality = std.extVar('reality'); +local sigoutform = std.extVar('signal_output_form'); // eg "sparse" or "dense" + + +local wc = import 'wirecell.jsonnet'; +local f = import "pgrapher/common/funcs.jsonnet"; +local g = import 'pgraph.jsonnet'; + +local raw_input_label = std.extVar('raw_input_label'); // eg "daq" + + +local data_params = import 'params.jsonnet'; +local simu_params = import 'simparams.jsonnet'; +local params = if reality == 'data' then data_params else simu_params; + + +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local tools = tools_maker(params); + +local wcls_maker = import 'pgrapher/ui/wcls/nodes.jsonnet'; +local wcls = wcls_maker(params, tools); + +//local chndb_maker = import "pgrapher/experiment/pdsp/chndb.jsonnet"; + +local sp_maker = import 'pgrapher/experiment/pdhd/sp.jsonnet'; + + +// Collect the WC/LS input converters for use below. Make sure the +// "name" argument matches what is used in the FHiCL that loads this +// file. In particular if there is no ":" in the inputer then name +// must be the emtpy string. +local wcls_input = { + adc_digits: g.pnode({ + type: 'wclsRawFrameSource', + name: '', + data: { + art_tag: raw_input_label, + frame_tags: ['orig'], // this is a WCT designator + // nticks: params.daq.nticks, + }, + }, nin=0, nout=1), + +}; + +// Collect all the wc/ls output converters for use below. Note the +// "name" MUST match what is used in theh "outputers" parameter in the +// FHiCL that loads this file. +local mega_anode = { + type: 'MegaAnodePlane', + name: 'meganodes', + data: { + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + }, +}; + +local wcls_output = { + // The noise filtered "ADC" values. These are truncated for + // art::Event but left as floats for the WCT SP. Note, the tag + // "raw" is somewhat historical as the output is not equivalent to + // "raw data". + nf_digits: g.pnode({ + type: 'wclsFrameSaver', + name: 'nfsaver', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: true, // true means save as RawDigit, else recob::Wire + frame_tags: ['raw'], + // nticks: params.daq.nticks, + chanmaskmaps: ['bad'], + }, + }, nin=1, nout=1, uses=[mega_anode]), + + + // The output of signal processing. Note, there are two signal + // sets each created with its own filter. The "gauss" one is best + // for charge reconstruction, the "wiener" is best for S/N + // separation. Both are used in downstream WC code. + sp_signals: g.pnode({ + type: 'wclsFrameSaver', + name: 'spsaver', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: false, // true means save as RawDigit, else recob::Wire + frame_tags: ['gauss', 'wiener'], + frame_scale: [0.001, 0.001], + // nticks: params.daq.nticks, + chanmaskmaps: [], + nticks: -1, + }, + }, nin=1, nout=1, uses=[mega_anode]), +}; + +// local perfect = import 'chndb-perfect.jsonnet'; +local base = import 'chndb-base.jsonnet'; +local chndb = [{ + type: 'OmniChannelNoiseDB', + name: 'ocndbperfect%d' % n, + // data: perfect(params, tools.anodes[n], tools.field, n) { dft:wc.tn(tools.dft) }, + data: base(params, tools.anodes[n], tools.field, n) { dft:wc.tn(tools.dft) }, + uses: [tools.anodes[n], tools.field, tools.dft], +} for n in std.range(0, std.length(tools.anodes) - 1)]; + +local nf_maker = import 'pgrapher/experiment/pdhd/nf.jsonnet'; +local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in std.range(0, std.length(tools.anodes) - 1)]; + +local sp = sp_maker(params, tools, { sparse: sigoutform == 'sparse' }); +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; + +local img = import 'pgrapher/experiment/pdhd/img.jsonnet'; +local img_maker = img(); +local img_pipes = [img_maker.per_anode(a) for a in tools.anodes]; + +//local util = import 'pgrapher/experiment/pdhd/funcs.jsonnet'; +local chsel_pipes = [ + g.pnode({ + type: 'ChannelSelector', + name: 'chsel%d' % n, + data: { + channels: std.range(2560 * n, 2560 * (n + 1) - 1), + // tags: ['orig%d' % n], // traces tag + }, + }, nin=1, nout=1) + for n in std.range(0, std.length(tools.anodes) - 1) +]; + +local magoutput = 'protodunehd-data-check.root'; +local magnify = import 'pgrapher/experiment/pdhd/magnify-sinks.jsonnet'; +local magio = magnify(tools, magoutput); + +local use_magnify = std.extVar("use_magnify"); +local nfsp_pipes = [ + g.pipeline( + if use_magnify =='true' then + [ + chsel_pipes[n], + magio.orig_pipe[n], + nf_pipes[n], + magio.raw_pipe[n], + sp_pipes[n], + magio.decon_pipe[n], + // magio.threshold_pipe[n], + // magio.debug_pipe[n], // use_roi_debug_mode=true in sp.jsonnet + img_pipes[n], + ] + else [ + chsel_pipes[n], + // nf_pipes[n], # in order to run over reco1 artROOT, not raw H5. + sp_pipes[n], + img_pipes[n], + ], + 'nfsp_pipe_%d' % n) + for n in std.range(0, std.length(tools.anodes) - 1) +]; + +// local fanpipe = util.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf'); +local fanout_tag_rules = [ + { + frame: { + '.*': 'orig%d' % tools.anodes[n].data.ident, + }, + trace: { + // fake doing Nmult SP pipelines + //orig: ['wiener', 'gauss'], + //'.*': 'orig', + }, + } + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local anode_ident = [tools.anodes[n].data.ident for n in std.range(0, std.length(tools.anodes) - 1)]; +local fanin_tag_rules = [ + { + frame: { + //['number%d' % n]: ['output%d' % n, 'output'], + '.*': 'framefanin', + }, + trace: { + ['gauss%d'%ind]:'gauss%d'%ind, + ['wiener%d'%ind]:'wiener%d'%ind, + ['threshold%d'%ind]:'threshold%d'%ind, + // ['tight_lf%d'%ind]:'tight_lf%d'%ind, + ['loose_lf%d'%ind]:'loose_lf%d'%ind, + }, + + } + for ind in anode_ident + ]; +// local fanpipe = util.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'nfsp', [], fanout_tag_rules, fanin_tag_rules); + +local nanodes = std.length(tools.anodes); +local fanpipe = f.multifanout('FrameFanout', nfsp_pipes, [1,nanodes], [nanodes,1], 'sn_mag', fanin_tag_rules); + +local retagger = g.pnode({ + type: 'Retagger', + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'retagger', + }, + merge: { + 'gauss\\d': 'gauss', + 'wiener\\d': 'wiener', + }, + }], + }, +}, nin=1, nout=1); + +local sink = g.pnode({ type: 'DumpFrames' }, nin=1, nout=0); +// local graph = g.pipeline([wcls_input.adc_digits, fanpipe, retagger, wcls_output.sp_signals, sink]); +local graph = g.pipeline([wcls_input.adc_digits, fanpipe]); + +local app = { + type: 'Pgrapher', + data: { + edges: g.edges(graph), + }, +}; + +// Finally, the configuration sequence +g.uses(graph) + [app] diff --git a/cfg/pgrapher/experiment/pdhd/wcls-nf-sp.jsonnet b/cfg/pgrapher/experiment/pdhd/wcls-nf-sp.jsonnet new file mode 100644 index 000000000..f8fbf4857 --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/wcls-nf-sp.jsonnet @@ -0,0 +1,198 @@ +// This is a main entry point to configure a WC/LS job that applies +// noise filtering and signal processing to existing RawDigits. The +// FHiCL is expected to provide the following parameters as attributes +// in the "params" structure. +// +// Manual testing, eg: +// +// jsonnet -V reality=data -V epoch=dynamic -V raw_input_label=daq \\ +// -V signal_output_form=sparse \\ +// -J cfg cfg/pgrapher/experiment/uboone/wcls-nf-sp.jsonnet + + +local epoch = std.extVar('epoch'); // eg "dynamic", "after", "before", "perfect" +local reality = std.extVar('reality'); +local sigoutform = std.extVar('signal_output_form'); // eg "sparse" or "dense" + + +local wc = import 'wirecell.jsonnet'; +local g = import 'pgraph.jsonnet'; + +local raw_input_label = std.extVar('raw_input_label'); // eg "daq" + + +local data_params = import 'params.jsonnet'; +local simu_params = import 'simparams.jsonnet'; +local params = if reality == 'data' then data_params else simu_params; + + +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local tools = tools_maker(params); + +local wcls_maker = import 'pgrapher/ui/wcls/nodes.jsonnet'; +local wcls = wcls_maker(params, tools); + +//local nf_maker = import "pgrapher/experiment/pdhd/nf.jsonnet"; +//local chndb_maker = import "pgrapher/experiment/pdhd/chndb.jsonnet"; + +local sp_maker = import 'pgrapher/experiment/pdhd/sp.jsonnet'; + +//local chndbm = chndb_maker(params, tools); +//local chndb = if epoch == "dynamic" then chndbm.wcls_multi(name="") else chndbm.wct(epoch); + + +// Collect the WC/LS input converters for use below. Make sure the +// "name" argument matches what is used in the FHiCL that loads this +// file. In particular if there is no ":" in the inputer then name +// must be the emtpy string. +local wcls_input = { + adc_digits: g.pnode({ + type: 'wclsRawFrameSource', + name: '', + data: { + art_tag: raw_input_label, + frame_tags: ['orig'], // this is a WCT designator + // nticks: params.daq.nticks, + }, + }, nin=0, nout=1), + +}; + +// Collect all the wc/ls output converters for use below. Note the +// "name" MUST match what is used in theh "outputers" parameter in the +// FHiCL that loads this file. +local mega_anode = { + type: 'MegaAnodePlane', + name: 'meganodes', + data: { + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + }, +}; +local wcls_output = { + // The noise filtered "ADC" values. These are truncated for + // art::Event but left as floats for the WCT SP. Note, the tag + // "raw" is somewhat historical as the output is not equivalent to + // "raw data". + nf_digits: g.pnode({ + type: 'wclsFrameSaver', + name: 'nfsaver', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: true, // true means save as RawDigit, else recob::Wire + frame_tags: ['raw'], + // nticks: params.daq.nticks, + chanmaskmaps: ['bad'], + }, + }, nin=1, nout=1, uses=[mega_anode]), + + + // The output of signal processing. Note, there are two signal + // sets each created with its own filter. The "gauss" one is best + // for charge reconstruction, the "wiener" is best for S/N + // separation. Both are used in downstream WC code. + sp_signals: g.pnode({ + type: 'wclsFrameSaver', + name: 'spsaver', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: false, // true means save as RawDigit, else recob::Wire + frame_tags: ['gauss', 'wiener'], + frame_scale: [0.001, 0.001], + // nticks: params.daq.nticks, + chanmaskmaps: [], + nticks: -1, + }, + }, nin=1, nout=1, uses=[mega_anode]), +}; + +// local perfect = import 'chndb-perfect.jsonnet'; +local base = import 'chndb-base.jsonnet'; +local chndb = [{ + type: 'OmniChannelNoiseDB', + name: 'ocndbperfect%d' % n, + // data: perfect(params, tools.anodes[n], tools.field, n), + data: base(params, tools.anodes[n], tools.field, n){dft:wc.tn(tools.dft)}, + uses: [tools.anodes[n], tools.field, tools.dft], +} for n in std.range(0, std.length(tools.anodes) - 1)]; + +local nf_maker = import 'pgrapher/experiment/pdhd/nf.jsonnet'; +local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in std.range(0, std.length(tools.anodes) - 1)]; + +local sp = sp_maker(params, tools, { sparse: sigoutform == 'sparse' }); +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; + +local chsel_pipes = [ + g.pnode({ + type: 'ChannelSelector', + name: 'chsel%d' % n, + data: { + channels: std.range(2560 * n, 2560 * (n + 1) - 1), + //channels: if n==0 then std.range(2560*n,2560*(n+1)-1) else [], + //tags: ['orig%d' % n], // traces tag + }, + }, nin=1, nout=1) + for n in std.range(0, std.length(tools.anodes) - 1) +]; + +local magoutput = 'protodunehd-data-check.root'; +local magnify = import 'pgrapher/experiment/pdhd/magnify-sinks.jsonnet'; +local magio = magnify(tools, magoutput); + +local nfsp_pipes = [ + g.pipeline([ + chsel_pipes[n], + // magio.orig_pipe[n], + + nf_pipes[n], + // magio.raw_pipe[n], + + sp_pipes[n], + // magio.decon_pipe[n], + // magio.threshold_pipe[n], + // magio.debug_pipe[n], // use_roi_debug_mode=true in sp.jsonnet + ], + 'nfsp_pipe_%d' % n) + for n in std.range(0, std.length(tools.anodes) - 1) +]; + +//local f = import 'pgrapher/common/funcs.jsonnet'; +local f = import 'pgrapher/experiment/pdhd/funcs.jsonnet'; +//local outtags = ['gauss%d' % n for n in std.range(0, std.length(tools.anodes) - 1)]; +//local fanpipe = f.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf', outtags); +local fanpipe = f.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf'); + +local retagger = g.pnode({ + type: 'Retagger', + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'retagger', + }, + merge: { + 'gauss\\d': 'gauss', + 'wiener\\d': 'wiener', + }, + }], + }, +}, nin=1, nout=1); + +local sink = g.pnode({ type: 'DumpFrames' }, nin=1, nout=0); + + +// local graph = g.pipeline([wcls_input.adc_digits, rootfile_creation_frames, fanpipe, retagger, wcls_output.sp_signals, sink]); +local graph = g.pipeline([wcls_input.adc_digits, fanpipe, retagger, wcls_output.sp_signals, sink]); + +local app = { + type: 'Pgrapher', + data: { + edges: g.edges(graph), + }, +}; + +// Finally, the configuration sequence +g.uses(graph) + [app] diff --git a/cfg/pgrapher/experiment/pdhd/wcls-nf.jsonnet b/cfg/pgrapher/experiment/pdhd/wcls-nf.jsonnet new file mode 100644 index 000000000..03bf825ae --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/wcls-nf.jsonnet @@ -0,0 +1,191 @@ +// This is a main entry point to configure a WC/LS job that applies +// noise filtering to existing RawDigits. The FHiCL is expected to +// provide the following parameters as attributes in the "params" structure. + + +local reality = std.extVar('reality'); +local sigoutform = std.extVar('signal_output_form'); // eg "sparse" or "dense" + + +local wc = import 'wirecell.jsonnet'; +local g = import 'pgraph.jsonnet'; + +local raw_input_label = std.extVar('raw_input_label'); // eg "daq" + + +local data_params = import 'pgrapher/experiment/pdhd/params.jsonnet'; +local simu_params = import 'pgrapher/experiment/pdhd/simparams.jsonnet'; +local base = if reality == 'data' then data_params else simu_params; +local params = base { + daq: super.daq { + tick: 1.0/std.extVar('clock_speed') * wc.us, + }, +}; + +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local tools = tools_maker(params); + +local wcls_maker = import 'pgrapher/ui/wcls/nodes.jsonnet'; +local wcls = wcls_maker(params, tools); + +//local nf_maker = import "pgrapher/experiment/pdhd/nf.jsonnet"; +//local chndb_maker = import "pgrapher/experiment/pdhd/chndb.jsonnet"; + +local sp_maker = import 'pgrapher/experiment/pdhd/sp.jsonnet'; + +// Collect the WC/LS input converters for use below. Make sure the +// "name" argument matches what is used in the FHiCL that loads this +// file. In particular if there is no ":" in the inputer then name +// must be the emtpy string. +local wcls_input = { + adc_digits: g.pnode({ + type: 'wclsRawFrameSource', + name: '', + data: { + art_tag: raw_input_label, + frame_tags: ['orig'], // this is a WCT designator + // nticks: params.daq.nticks, + tick: params.daq.tick, + }, + }, nin=0, nout=1), + +}; + +// Collect all the wc/ls output converters for use below. Note the +// "name" MUST match what is used in theh "outputers" parameter in the +// FHiCL that loads this file. +local mega_anode = { + type: 'MegaAnodePlane', + name: 'meganodes', + data: { + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + }, +}; +local wcls_output = { + // The noise filtered "ADC" values. These are truncated for + // art::Event but left as floats for the WCT SP. Note, the tag + // "raw" is somewhat historical as the output is not equivalent to + // "raw data". + nf_digits: g.pnode({ + type: 'wclsFrameSaver', + name: 'nfsaver', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: true, // true means save as RawDigit, else recob::Wire + frame_tags: ['raw'], + // nticks: params.daq.nticks, + chanmaskmaps: ['bad'], + nticks: 0, // If nonzero, force number of ticks in output waveforms. + // If zero, use whatever input data has. + // If -1, use value as per LS's detector properties service. + }, + }, nin=1, nout=1, uses=[mega_anode]), + + + // The output of signal processing. Note, there are two signal + // sets each created with its own filter. The "gauss" one is best + // for charge reconstruction, the "wiener" is best for S/N + // separation. Both are used in downstream WC code. + sp_signals: g.pnode({ + type: 'wclsFrameSaver', + name: 'spsaver', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: false, // true means save as RawDigit, else recob::Wire + frame_tags: ['gauss', 'wiener'], + frame_scale: [0.001, 0.001], + // nticks: params.daq.nticks, + chanmaskmaps: [], + nticks: -1, + }, + }, nin=1, nout=1, uses=[mega_anode]), +}; + +// local perfect = import 'chndb-perfect.jsonnet'; +local base = import 'chndb-base.jsonnet'; +local chndb = [{ + type: 'OmniChannelNoiseDB', + name: 'ocndbperfect%d' % n, + // data: perfect(params, tools.anodes[n], tools.field, n), + data: base(params, tools.anodes[n], tools.field, n){dft:wc.tn(tools.dft)}, + uses: [tools.anodes[n], tools.field, tools.dft], +} for n in std.range(0, std.length(tools.anodes) - 1)]; + +local nf_maker = import 'pgrapher/experiment/pdhd/nf.jsonnet'; +local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in std.range(0, std.length(tools.anodes) - 1)]; + +local sp = sp_maker(params, tools, { sparse: sigoutform == 'sparse' }); +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; + +local chsel_pipes = [ + g.pnode({ + type: 'ChannelSelector', + name: 'chsel%d' % n, + data: { + channels: std.range(2560 * n, 2560 * (n + 1) - 1), + //channels: if n==0 then std.range(2560*n,2560*(n+1)-1) else [], + //tags: ['orig%d' % n], // traces tag + }, + }, nin=1, nout=1) + for n in std.range(0, std.length(tools.anodes) - 1) +]; + +local magoutput = 'protodunehd-data-check.root'; +local magnify = import 'pgrapher/experiment/pdhd/magnify-sinks.jsonnet'; +local magio = magnify(tools, magoutput); + +local nfsp_pipes = [ + g.pipeline([ + chsel_pipes[n], + // magio.orig_pipe[n], + + nf_pipes[n], + // magio.raw_pipe[n], + + // sp_pipes[n], + // magio.decon_pipe[n], + // magio.threshold_pipe[n], + // magio.debug_pipe[n], // use_roi_debug_mode=true in sp.jsonnet + ], + 'nfsp_pipe_%d' % n) + for n in std.range(0, std.length(tools.anodes) - 1) +]; + +//local f = import 'pgrapher/common/funcs.jsonnet'; +local f = import 'pgrapher/experiment/pdhd/funcs.jsonnet'; +//local outtags = ['gauss%d' % n for n in std.range(0, std.length(tools.anodes) - 1)]; +//local fanpipe = f.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf', outtags); +local fanpipe = f.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf'); + +local retagger = g.pnode({ + type: 'Retagger', + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'retagger', + }, + merge: { + 'raw\\d': 'raw', + }, + }], + }, +}, nin=1, nout=1); + +local sink = g.pnode({ type: 'DumpFrames' }, nin=1, nout=0); + +local graph = g.pipeline([wcls_input.adc_digits, fanpipe, retagger, wcls_output.nf_digits, sink]); + +local app = { + type: 'Pgrapher', + data: { + edges: g.edges(graph), + }, +}; + +// Finally, the configuration sequence +g.uses(graph) + [app] diff --git a/cfg/pgrapher/experiment/pdhd/wcls-rawdigit-dnnsp.jsonnet b/cfg/pgrapher/experiment/pdhd/wcls-rawdigit-dnnsp.jsonnet new file mode 100644 index 000000000..aa25eecbe --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/wcls-rawdigit-dnnsp.jsonnet @@ -0,0 +1,364 @@ +// This is a main entry point to configure a WC/LS job that applies +// noise filtering and signal processing to existing RawDigits. The +// FHiCL is expected to provide the following parameters as attributes +// in the "params" structure. +// +// epoch: the hardware noise fix expoch: "before", "after", "dynamic" or "perfect" +// reality: whether we are running on "data" or "sim"ulation. +// raw_input_label: the art::Event inputTag for the input RawDigit +// +// see the .fcl of the same name for an example +// +// Manual testing, eg: +// +// jsonnet -V reality=data -V epoch=dynamic -V raw_input_label=daq \\ +// -V signal_output_form=sparse \\ +// -J cfg cfg/pgrapher/experiment/uboone/wcls-nf-sp.jsonnet +// +// jsonnet -V reality=sim -V epoch=perfect -V raw_input_label=daq \\ +// -V signal_output_form=sparse \\ +// -J cfg cfg/pgrapher/experiment/uboone/wcls-nf-sp.jsonnet + + +local epoch = std.extVar('epoch'); // eg "dynamic", "after", "before", "perfect" +local reality = std.extVar('reality'); +local sigoutform = std.extVar('signal_output_form'); // eg "sparse" or "dense" +// local nsample_ext = std.extVar('nsample'); // eg 6000, 10000, or "auto" +// local nsample = if nsample_ext == 'auto' then 6000 else std.parseInt(nsample_ext); // set auto to 0 once larwirecell fixed + +local wc = import 'wirecell.jsonnet'; +local g = import 'pgraph.jsonnet'; + +local raw_input_label = std.extVar('raw_input_label'); // eg "daq" + + +local data_params = import 'pgrapher/experiment/pdhd/params.jsonnet'; +local simu_params = import 'pgrapher/experiment/pdhd/simparams.jsonnet'; +local base = if reality == 'data' then data_params else simu_params; +local params = base { + daq: super.daq { + tick: 1.0/std.extVar('clock_speed') * wc.us, + }, +}; + +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local tools = tools_maker(params); + +local wcls_maker = import 'pgrapher/ui/wcls/nodes.jsonnet'; +local wcls = wcls_maker(params, tools); + +//local nf_maker = import "pgrapher/experiment/pdsp/nf.jsonnet"; +//local chndb_maker = import "pgrapher/experiment/pdsp/chndb.jsonnet"; + +local sp_maker = import 'pgrapher/experiment/pdhd/sp.jsonnet'; + +//local chndbm = chndb_maker(params, tools); +//local chndb = if epoch == "dynamic" then chndbm.wcls_multi(name="") else chndbm.wct(epoch); + + +// Collect the WC/LS input converters for use below. Make sure the +// "name" argument matches what is used in the FHiCL that loads this +// file. In particular if there is no ":" in the inputer then name +// must be the emtpy string. +local wcls_input = { + adc_digits: g.pnode({ + type: 'wclsRawFrameSource', + name: '', + data: { + art_tag: raw_input_label, + frame_tags: ['orig'], // this is a WCT designator + //nticks: params.daq.nticks, + // nticks: nsample, + tick: params.daq.tick, + }, + }, nin=0, nout=1), + +}; + +// Collect all the wc/ls output converters for use below. Note the +// "name" MUST match what is used in theh "outputers" parameter in the +// FHiCL that loads this file. +local mega_anode = { + type: 'MegaAnodePlane', + name: 'meganodes', + data: { + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + }, +}; +local wcls_output = { + // The noise filtered "ADC" values. These are truncated for + // art::Event but left as floats for the WCT SP. Note, the tag + // "raw" is somewhat historical as the output is not equivalent to + // "raw data". + nf_digits: g.pnode({ + type: 'wclsFrameSaver', + name: 'nfsaver', + data: { + anode: wc.tn(tools.anode), + digitize: true, // true means save as RawDigit, else recob::Wire + frame_tags: ['raw'], + //nticks: params.daq.nticks, + // nticks: nsample, + chanmaskmaps: ['bad'], + }, + }, nin=1, nout=1, uses=[tools.anode]), + + + // The output of signal processing. Note, there are two signal + // sets each created with its own filter. The "gauss" one is best + // for charge reconstruction, the "wiener" is best for S/N + // separation. Both are used in downstream WC code. + sp_signals: g.pnode({ + type: 'wclsFrameSaver', + name: 'spsaver', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: false, // true means save as RawDigit, else recob::Wire + frame_tags: ['gauss', 'wiener','dnnsp'], + frame_scale: [0.001, 0.001,0.001], + //nticks: params.daq.nticks, + // nticks: nsample, + chanmaskmaps: [], + summary_tags: ['threshold'], // retagger makes this tag + // just one threshold value + summary_operator: { threshold: 'set' }, + nticks: -1, + + }, + }, nin=1, nout=1, uses=[mega_anode]), +}; + +// local perfect = import 'chndb-perfect.jsonnet'; +local base = import 'pgrapher/experiment/pdhd/chndb-base.jsonnet'; +local chndb = [{ + type: 'OmniChannelNoiseDB', + name: 'ocndbperfect%d' % n, + // data: perfect(params, tools.anodes[n], tools.field, n), + data: base(params, tools.anodes[n], tools.field, n){dft:wc.tn(tools.dft)}, + uses: [tools.anodes[n], tools.field, tools.dft], +} for n in std.range(0, std.length(tools.anodes) - 1)]; + +// local nf_maker = import 'pgrapher/experiment/pdsp/nf.jsonnet'; +// local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in std.range(0, std.length(tools.anodes) - 1)]; + +// an empty omnibus noise filter +// for suppressing bad channels stored in the noise db +// local obnf = [ +// g.pnode( +// { +// type: 'OmnibusNoiseFilter', +// name: 'nf%d' % n, +// data: { +// +// // This is the number of bins in various filters +// // nsamples: params.nf.nsamples, +// +// channel_filters: [], +// grouped_filters: [], +// channel_status_filters: [], +// noisedb: wc.tn(chndb[n]), +// // intraces: 'orig%d' % n, // frame tag get all traces +// intraces: 'orig', // frame tag get all traces +// outtraces: 'raw%d' % n, +// }, +// }, uses=[chndb[n], tools.anodes[n]], nin=1, nout=1 +// ) +// for n in std.range(0, std.length(tools.anodes) - 1) +// ]; +// local nf_pipes = [g.pipeline([obnf[n]], name='nf%d' % n) for n in std.range(0, std.length(tools.anodes) - 1)]; + +local sp_override = { // assume all tages sets in base sp.jsonnet + sparse: sigoutform == 'sparse', + // wiener_tag: "", + // gauss_tag: "", + use_roi_refinement: true, + use_roi_debug_mode: true, + troi_col_th_factor: 5, + //tight_lf_tag: "", + // loose_lf_tag: "", + //cleanup_roi_tag: "", + break_roi_loop1_tag: "", + break_roi_loop2_tag: "", + shrink_roi_tag: "", + extend_roi_tag: "", + //m_decon_charge_tag: "", + use_multi_plane_protection: true, + mp_tick_resolution: 10, +}; + + +//local sp = sp_maker(params, tools, { sparse: sigoutform == 'sparse' }); +local sp = sp_maker(params, tools, sp_override); +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; + +local chsel_pipes = [ + g.pnode({ + type: 'ChannelSelector', + name: 'chsel%d' % n, + data: { + channels: std.range(2560 * n, 2560 * (n + 1) - 1), + //tags: ['orig%d' % n], // traces tag + }, + }, nin=1, nout=1) + for n in std.range(0, std.length(tools.anodes) - 1) +]; + +local hio_orig = [g.pnode({ + type: 'HDF5FrameTap', + name: 'hio_orig%d' % n, + data: { + anode: wc.tn(tools.anodes[n]), + trace_tags: ['orig%d'%n], + filename: "g4-rec-%d.h5" % n, + chunk: [0, 0], // ncol, nrow + gzip: 2, + high_throughput: true, + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local hio_sp = [g.pnode({ + type: 'HDF5FrameTap', + name: 'hio_sp%d' % n, + data: { + anode: wc.tn(tools.anodes[n]), + trace_tags: ['loose_lf%d' % n + , 'tight_lf%d' % n + , 'cleanup_roi%d' % n + , 'break_roi_1st%d' % n + , 'break_roi_2nd%d' % n + , 'shrink_roi%d' % n + , 'extend_roi%d' % n + , 'mp3_roi%d' % n + , 'mp2_roi%d' % n + , 'decon_charge%d' % n + , 'gauss%d' % n], + filename: "g4-rec-%d.h5" % n, + chunk: [0, 0], // ncol, nrow + gzip: 2, + high_throughput: true, + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + + +local hio_dnn = [g.pnode({ + type: 'HDF5FrameTap', + name: 'hio_dnn%d' % n, + data: { + anode: wc.tn(tools.anodes[n]), + // trace_tags: ['dnn_sp%d' % n], + trace_tags: ['dnnsp%d' % n], + filename: "g4-rec-%d.h5" % n, + chunk: [0, 0], // ncol, nrow + gzip: 2, + high_throughput: true, + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + + +local dnnroi = import 'pgrapher/experiment/pdhd/dnnroi.jsonnet'; +local ts = { + type: "TorchService", + name: "dnnroi", + data: { + // model: "ts-model/unet-l23-cosmic500-e50.ts", + // model: "ts-model/CP49.ts", + //model: "ts-model/unet-cosmic390-newwc-depofluxsplat-pdhd.ts", + // model: "ts-model/unet-cosmic300-depofluxsplat-pdhd.ts", + model : "ts-model/cosmic390andshower200.ts", + device: "cpu", // "gpucpu", + concurrency: 1, + }, +}; + + + + + + + +local magoutput = 'protodunehd-data-check.root'; +local magnify = import 'pgrapher/experiment/pdhd/magnify-sinks.jsonnet'; +local magio = magnify(tools, magoutput); + +local dnn_trace_mergers = [ g.pnode({ + type: 'Retagger', + name: 'dnnmerger%d' %n, + data: { + tag_rules: [{ + // frame: {'.*': 'dnnsp',}, + // merge: {'dnnsp\\d': 'dnnsp%d' %n,}, + merge: {'dnnsp\\d[uvw]' : 'dnnsp%d' %n,}, + }], + }, +}, nin=1, nout=1) +for n in std.range(0, std.length(tools.anodes) - 1) ]; + +local nfsp_pipes = [ + g.pipeline([ + chsel_pipes[n], + // magio.orig_pipe[n], + // nf_pipes[n], + // magio.raw_pipe[n], + sp_pipes[n], + + // hio_sp[n], + dnnroi(tools.anodes[n], ts, output_scale=1.0), + dnn_trace_mergers[n], + // hio_dnn[n], + + // magio.decon_pipe[n], + // magio.threshold_pipe[n], + // magio.debug_pipe[n], // use_roi_debug_mode=true in sp.jsonnet + ], + 'nfsp_pipe_%d' % n) + for n in std.range(0, std.length(tools.anodes) - 1) +]; + +//local f = import 'pgrapher/common/funcs.jsonnet'; +local f = import 'pgrapher/experiment/pdhd/funcs.jsonnet'; +//local outtags = ['gauss%d' % n for n in std.range(0, std.length(tools.anodes) - 1)]; +//local fanpipe = f.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf', outtags); +local fanpipe = f.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf'); + +local retagger = g.pnode({ + type: 'Retagger', + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'retagger', + }, + merge: { + 'gauss\\d': 'gauss', + 'wiener\\d': 'wiener', + 'threshold\\d': 'threshold', + 'dnnsp\\d': 'dnnsp', + }, + }], + }, +}, nin=1, nout=1); + +local sink = g.pnode({ type: 'DumpFrames' }, nin=1, nout=0); + + +//local graph = g.pipeline([wcls_input.adc_digits, rootfile_creation_frames, fanpipe, retagger, wcls_output.sp_signals, sink]); +local graph = g.pipeline([wcls_input.adc_digits, fanpipe, retagger, wcls_output.sp_signals, sink]); + +local app = { + type: 'Pgrapher', + data: { + edges: g.edges(graph), + }, +}; + +// Finally, the configuration sequence +g.uses(graph) + [app] \ No newline at end of file diff --git a/cfg/pgrapher/experiment/pdhd/wcls-rawdigit-sp.jsonnet b/cfg/pgrapher/experiment/pdhd/wcls-rawdigit-sp.jsonnet new file mode 100644 index 000000000..4412ca0c5 --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/wcls-rawdigit-sp.jsonnet @@ -0,0 +1,251 @@ +// This is a main entry point to configure a WC/LS job that applies +// noise filtering and signal processing to existing RawDigits. The +// FHiCL is expected to provide the following parameters as attributes +// in the "params" structure. +// +// epoch: the hardware noise fix expoch: "before", "after", "dynamic" or "perfect" +// reality: whether we are running on "data" or "sim"ulation. +// raw_input_label: the art::Event inputTag for the input RawDigit +// +// see the .fcl of the same name for an example +// +// Manual testing, eg: +// +// jsonnet -V reality=data -V epoch=dynamic -V raw_input_label=daq \\ +// -V signal_output_form=sparse \\ +// -J cfg cfg/pgrapher/experiment/uboone/wcls-nf-sp.jsonnet +// +// jsonnet -V reality=sim -V epoch=perfect -V raw_input_label=daq \\ +// -V signal_output_form=sparse \\ +// -J cfg cfg/pgrapher/experiment/uboone/wcls-nf-sp.jsonnet + + +local epoch = std.extVar('epoch'); // eg "dynamic", "after", "before", "perfect" +local reality = std.extVar('reality'); +local sigoutform = std.extVar('signal_output_form'); // eg "sparse" or "dense" +// local nsample_ext = std.extVar('nsample'); // eg 6000, 10000, or "auto" +// local nsample = if nsample_ext == 'auto' then 6000 else std.parseInt(nsample_ext); // set auto to 0 once larwirecell fixed + +local wc = import 'wirecell.jsonnet'; +local g = import 'pgraph.jsonnet'; + +local raw_input_label = std.extVar('raw_input_label'); // eg "daq" + + +local data_params = import 'pgrapher/experiment/pdhd/params.jsonnet'; +local simu_params = import 'pgrapher/experiment/pdhd/simparams.jsonnet'; +local base = if reality == 'data' then data_params else simu_params; +local params = base { + daq: super.daq { + tick: 1.0/std.extVar('clock_speed') * wc.us, + }, +}; + +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local tools = tools_maker(params); + +local wcls_maker = import 'pgrapher/ui/wcls/nodes.jsonnet'; +local wcls = wcls_maker(params, tools); + +//local nf_maker = import "pgrapher/experiment/pdsp/nf.jsonnet"; +//local chndb_maker = import "pgrapher/experiment/pdsp/chndb.jsonnet"; + +local sp_maker = import 'pgrapher/experiment/pdhd/sp.jsonnet'; + +//local chndbm = chndb_maker(params, tools); +//local chndb = if epoch == "dynamic" then chndbm.wcls_multi(name="") else chndbm.wct(epoch); + + +// Collect the WC/LS input converters for use below. Make sure the +// "name" argument matches what is used in the FHiCL that loads this +// file. In particular if there is no ":" in the inputer then name +// must be the emtpy string. +local wcls_input = { + adc_digits: g.pnode({ + type: 'wclsRawFrameSource', + name: '', + data: { + art_tag: raw_input_label, + frame_tags: ['orig'], // this is a WCT designator + //nticks: params.daq.nticks, + // nticks: nsample, + tick: params.daq.tick, + }, + }, nin=0, nout=1), + +}; + +// Collect all the wc/ls output converters for use below. Note the +// "name" MUST match what is used in theh "outputers" parameter in the +// FHiCL that loads this file. +local mega_anode = { + type: 'MegaAnodePlane', + name: 'meganodes', + data: { + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + }, +}; +local wcls_output = { + // The noise filtered "ADC" values. These are truncated for + // art::Event but left as floats for the WCT SP. Note, the tag + // "raw" is somewhat historical as the output is not equivalent to + // "raw data". + nf_digits: g.pnode({ + type: 'wclsFrameSaver', + name: 'nfsaver', + data: { + anode: wc.tn(tools.anode), + digitize: true, // true means save as RawDigit, else recob::Wire + frame_tags: ['raw'], + //nticks: params.daq.nticks, + // nticks: nsample, + chanmaskmaps: ['bad'], + }, + }, nin=1, nout=1, uses=[tools.anode]), + + + // The output of signal processing. Note, there are two signal + // sets each created with its own filter. The "gauss" one is best + // for charge reconstruction, the "wiener" is best for S/N + // separation. Both are used in downstream WC code. + sp_signals: g.pnode({ + type: 'wclsFrameSaver', + name: 'spsaver', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: false, // true means save as RawDigit, else recob::Wire + frame_tags: ['gauss', 'wiener'], + frame_scale: [0.001, 0.001], + //nticks: params.daq.nticks, + // nticks: nsample, + chanmaskmaps: [], + summary_tags: ['threshold'], // retagger makes this tag + // just one threshold value + summary_operator: { threshold: 'set' }, + nticks: -1, + + }, + }, nin=1, nout=1, uses=[mega_anode]), +}; + +// local perfect = import 'chndb-perfect.jsonnet'; +local base = import 'pgrapher/experiment/pdhd/chndb-base.jsonnet'; +local chndb = [{ + type: 'OmniChannelNoiseDB', + name: 'ocndbperfect%d' % n, + // data: perfect(params, tools.anodes[n], tools.field, n), + data: base(params, tools.anodes[n], tools.field, n){dft:wc.tn(tools.dft)}, + uses: [tools.anodes[n], tools.field, tools.dft], +} for n in std.range(0, std.length(tools.anodes) - 1)]; + +// local nf_maker = import 'pgrapher/experiment/pdsp/nf.jsonnet'; +// local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in std.range(0, std.length(tools.anodes) - 1)]; + +// an empty omnibus noise filter +// for suppressing bad channels stored in the noise db +// local obnf = [ +// g.pnode( +// { +// type: 'OmnibusNoiseFilter', +// name: 'nf%d' % n, +// data: { +// +// // This is the number of bins in various filters +// // nsamples: params.nf.nsamples, +// +// channel_filters: [], +// grouped_filters: [], +// channel_status_filters: [], +// noisedb: wc.tn(chndb[n]), +// // intraces: 'orig%d' % n, // frame tag get all traces +// intraces: 'orig', // frame tag get all traces +// outtraces: 'raw%d' % n, +// }, +// }, uses=[chndb[n], tools.anodes[n]], nin=1, nout=1 +// ) +// for n in std.range(0, std.length(tools.anodes) - 1) +// ]; +// local nf_pipes = [g.pipeline([obnf[n]], name='nf%d' % n) for n in std.range(0, std.length(tools.anodes) - 1)]; + +local sp_override = { + sparse: sigoutform == 'sparse', + // use_multi_plane_protection: true, + mp_tick_resolution: 10, + // use_roi_debug_mode: true, +}; +local sp = sp_maker(params, tools, sp_override); +// local sp = sp_maker(params, tools, { sparse: sigoutform == 'sparse' }); +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; + +local chsel_pipes = [ + g.pnode({ + type: 'ChannelSelector', + name: 'chsel%d' % n, + data: { + channels: std.range(2560 * n, 2560 * (n + 1) - 1), + //tags: ['orig%d' % n], // traces tag + }, + }, nin=1, nout=1) + for n in std.range(0, std.length(tools.anodes) - 1) +]; + +local magoutput = 'protodunehd-data-check.root'; +local magnify = import 'pgrapher/experiment/pdhd/magnify-sinks.jsonnet'; +local magio = magnify(tools, magoutput); + +local nfsp_pipes = [ + g.pipeline([ + chsel_pipes[n], + // magio.orig_pipe[n], + // nf_pipes[n], + // magio.raw_pipe[n], + sp_pipes[n], + // magio.decon_pipe[n], + // magio.threshold_pipe[n], + // magio.debug_pipe[n], // use_roi_debug_mode=true in sp.jsonnet + ], + 'nfsp_pipe_%d' % n) + for n in std.range(0, std.length(tools.anodes) - 1) +]; + +//local f = import 'pgrapher/common/funcs.jsonnet'; +local f = import 'pgrapher/experiment/pdhd/funcs.jsonnet'; +//local outtags = ['gauss%d' % n for n in std.range(0, std.length(tools.anodes) - 1)]; +//local fanpipe = f.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf', outtags); +local fanpipe = f.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf'); + +local retagger = g.pnode({ + type: 'Retagger', + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'retagger', + }, + merge: { + 'gauss\\d': 'gauss', + 'wiener\\d': 'wiener', + 'theshold\\d': 'theshold', + }, + }], + }, +}, nin=1, nout=1); + +local sink = g.pnode({ type: 'DumpFrames' }, nin=1, nout=0); + + +//local graph = g.pipeline([wcls_input.adc_digits, rootfile_creation_frames, fanpipe, retagger, wcls_output.sp_signals, sink]); +local graph = g.pipeline([wcls_input.adc_digits, fanpipe, retagger, wcls_output.sp_signals, sink]); + +local app = { + type: 'Pgrapher', + data: { + edges: g.edges(graph), + }, +}; + +// Finally, the configuration sequence +g.uses(graph) + [app] diff --git a/cfg/pgrapher/experiment/pdhd/wcls-sim-drift-deposplat.jsonnet b/cfg/pgrapher/experiment/pdhd/wcls-sim-drift-deposplat.jsonnet new file mode 100644 index 000000000..86100c6b1 --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/wcls-sim-drift-deposplat.jsonnet @@ -0,0 +1,358 @@ + +local g = import 'pgraph.jsonnet'; +local f = import 'pgrapher/common/funcs.jsonnet'; +local util = import 'pgrapher/experiment/pdhd/funcs.jsonnet'; +local wc = import 'wirecell.jsonnet'; + +local io = import 'pgrapher/common/fileio.jsonnet'; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; + +local params = import 'pgrapher/experiment/pdhd/simparams.jsonnet'; + +// local tools = tools_maker(params); +local btools = tools_maker(params); +local tools = btools { + anodes : [btools.anodes[0], ], +}; + +local sim_maker = import 'pgrapher/experiment/pdhd/sim.jsonnet'; +local sim = sim_maker(params, tools); + +local nanodes = std.length(tools.anodes); +local anode_iota = std.range(0, nanodes-1); + +local wcls_maker = import "pgrapher/ui/wcls/nodes.jsonnet"; +local wcls = wcls_maker(params, tools); +local wcls_input = { + depos: wcls.input.depos(name="", art_tag="IonAndScint"), +}; + +local mega_anode = { + type: 'MegaAnodePlane', + name: 'meganodes', + data: { + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + }, +}; +local wcls_output = { + // ADC output from simulation + sim_digits: g.pnode({ + type: 'wclsFrameSaver', + name: 'simdigits', + data: { + anode: wc.tn(mega_anode), + digitize: true, // true means save as RawDigit, else recob::Wire + frame_tags: ['daq'], + // nticks: params.daq.nticks, + // chanmaskmaps: ['bad'], + }, + }, nin=1, nout=1, uses=[mega_anode]), + + // The noise filtered "ADC" values. These are truncated for + // art::Event but left as floats for the WCT SP. Note, the tag + // "raw" is somewhat historical as the output is not equivalent to + // "raw data". + nf_digits: wcls.output.digits(name="nfdigits", tags=["raw"]), + + // The output of signal processing. Note, there are two signal + // sets each created with its own filter. The "gauss" one is best + // for charge reconstruction, the "wiener" is best for S/N + // separation. Both are used in downstream WC code. + sp_signals: wcls.output.signals(name="spsignals", tags=["gauss", "wiener"]), + + // save "threshold" from normal decon for each channel noise + // used in imaging + sp_thresholds: wcls.output.thresholds(name="spthresholds", tags=["threshold"]), +}; + +//local deposio = io.numpy.depos(output); +local drifter = sim.drifter; +// local bagger = sim.make_bagger(); +local bagger = [sim.make_bagger("bagger%d"%n) for n in anode_iota]; + +// signal plus noise pipelines +//local sn_pipes = sim.signal_pipelines; +local sn_pipes = sim.splusn_pipelines; + +// local perfect = import 'pgrapher/experiment/pdhd/chndb-perfect.jsonnet'; +local base = import 'pgrapher/experiment/pdhd/chndb-base.jsonnet'; +local chndb = [{ + type: 'OmniChannelNoiseDB', + name: 'ocndbperfect%d' % n, + // data: perfect(params, tools.anodes[n], tools.field, n), + data: base(params, tools.anodes[n], tools.field, n), + uses: [tools.anodes[n], tools.field], // pnode extension +} for n in anode_iota]; + +//local chndb_maker = import 'pgrapher/experiment/pdhd/chndb.jsonnet'; +//local noise_epoch = "perfect"; +//local noise_epoch = "after"; +//local chndb_pipes = [chndb_maker(params, tools.anodes[n], tools.fields[n]).wct(noise_epoch) +// for n in std.range(0, std.length(tools.anodes)-1)]; +local nf_maker = import 'pgrapher/experiment/pdhd/nf.jsonnet'; +// local nf_pipes = [nf_maker(params, tools.anodes[n], chndb_pipes[n]) for n in std.range(0, std.length(tools.anodes)-1)]; +local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in anode_iota]; + +local sp_maker = import 'pgrapher/experiment/pdhd/sp.jsonnet'; +local sp = sp_maker(params, tools, { sparse: true, use_roi_debug_mode: true, use_multi_plane_protection: true, mp_tick_resolution: 4, }); +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; + +// local deposplats = [sim.make_ductor('splat%d'%n, tools.anodes[n], tools.pirs[0], 'DepoSplat', 'deposplat%d'%n) for n in anode_iota] ; +local deposplats = [util.splat(params, tools, tools.anodes[n]) for n in anode_iota] ; + +local rng = tools.random; +local wcls_simchannel_sink = g.pnode({ + type: 'wclsSimChannelSink', + name: 'postdrift', + data: { + artlabel: "simpleSC", // where to save in art::Event + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + rng: wc.tn(rng), + tick: 0.5*wc.us, + start_time: -0.25*wc.ms, + readout_time: self.tick*6000, + nsigma: 3.0, + drift_speed: params.lar.drift_speed, + u_to_rp: 90.58*wc.mm, + v_to_rp: 95.29*wc.mm, + y_to_rp: 100*wc.mm, + u_time_offset: 0.0*wc.us, + v_time_offset: 0.0*wc.us, + y_time_offset: 0.0*wc.us, + use_energy: true, + }, +}, nin=1, nout=1, uses=tools.anodes); + +local magoutput = 'g4-rec-0.root'; +local magnify = import 'pgrapher/experiment/pdhd/magnify-sinks.jsonnet'; +local sinks = magnify(tools, magoutput); + +local hio_truth = [g.pnode({ + type: 'HDF5FrameTap', + name: 'hio_truth%d' % n, + data: { + anode: wc.tn(tools.anodes[n]), + trace_tags: ['deposplat%d'%n], + filename: "g4-tru-%d.h5" % n, + chunk: [0, 0], // ncol, nrow + gzip: 2, + high_throughput: true, + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local hio_orig = [g.pnode({ + type: 'HDF5FrameTap', + name: 'hio_orig%d' % n, + data: { + anode: wc.tn(tools.anodes[n]), + trace_tags: ['orig%d'%n], + filename: "g4-rec-%d.h5" % n, + chunk: [0, 0], // ncol, nrow + gzip: 2, + high_throughput: true, + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local hio_sp = [g.pnode({ + type: 'HDF5FrameTap', + name: 'hio_sp%d' % n, + data: { + anode: wc.tn(tools.anodes[n]), + trace_tags: ['loose_lf%d' % n + , 'tight_lf%d' % n + , 'cleanup_roi%d' % n + , 'break_roi_1st%d' % n + , 'break_roi_2nd%d' % n + , 'shrink_roi%d' % n + , 'extend_roi%d' % n + , 'mp3_roi%d' % n + , 'mp2_roi%d' % n + , 'decon_charge%d' % n + , 'gauss%d' % n], + filename: "g4-rec-%d.h5" % n, + chunk: [0, 0], // ncol, nrow + gzip: 2, + high_throughput: true, + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local hio_dnn = [g.pnode({ + type: 'HDF5FrameTap', + name: 'hio_dnn%d' % n, + data: { + anode: wc.tn(tools.anodes[n]), + // trace_tags: ['dnn_sp%d' % n], + trace_tags: ['dnnsp%d' % n], + filename: "g4-rec-%d.h5" % n, + chunk: [0, 0], // ncol, nrow + gzip: 2, + high_throughput: true, + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local rio_orig = [g.pnode({ + type: 'ExampleROOTAna', + name: 'rio_orig_apa%d' % n, + data: { + output_filename: "g4-rec-%d.root" % n, + anode: wc.tn(tools.anodes[n]), + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local rio_nf = [g.pnode({ + type: 'ExampleROOTAna', + name: 'rio_nf_apa%d' % n, + data: { + output_filename: "g4-rec-%d.root" % n, + anode: wc.tn(tools.anodes[n]), + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local rio_sp = [g.pnode({ + type: 'ExampleROOTAna', + name: 'rio_sp_apa%d' % n, + data: { + output_filename: "g4-rec-%d.root" % n, + anode: wc.tn(tools.anodes[n]), + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +// Note: better switch to layers +local dnnroi = import 'pgrapher/experiment/pdhd/dnnroi.jsonnet'; +local ts = { + type: "TorchService", + name: "dnnroi", + data: { + // model: "ts-model/unet-l23-cosmic500-e50.ts", + model: "ts-model/CP49.ts", + device: "cpu", // "gpucpu", + concurrency: 1, + }, +}; + + +local reco_fork = [ + g.pipeline([ + // wcls_simchannel_sink[n], + bagger[n], + sn_pipes[n], + // hio_orig[n], + // nf_pipes[n], + // rio_nf[n], + sp_pipes[n], + hio_sp[n], + + // dnn_roi_finding[n], + + dnnroi(tools.anodes[n], ts, output_scale=1.2), + + hio_dnn[n], + // rio_sp[n], + g.pnode({ type: 'DumpFrames', name: 'reco_fork%d'%n }, nin=1, nout=0), + // perapa_img_pipelines[n], + ], + 'reco_fork%d' % n) + for n in anode_iota +]; + +local truth_fork = [ + g.pipeline([ + deposplats[n], + hio_truth[n], + g.pnode({ type: 'DumpFrames', name: 'truth_fork%d'%n }, nin=1, nout=0) + ], + 'truth_fork%d' % n) + for n in anode_iota +]; + +local depo_fanout = [g.pnode({ + type:'DepoFanout', + name:'depo_fanout-%d'%n, + data:{ + multiplicity:2, + tags: [], + }}, nin=1, nout=2) for n in anode_iota]; +local frame_fanin = [g.pnode({ + type: 'FrameFanin', + name: 'frame_fanin-%d'%n, + data: { + multiplicity: 2, + tags: [], + }}, nin=2, nout=1) for n in anode_iota]; + +local frame_sink = g.pnode({ type: 'DumpFrames' }, nin=1, nout=0); + +local multipass = [g.intern(innodes=[depo_fanout[n]], centernodes=[truth_fork[n], reco_fork[n]], outnodes=[], + edges = [ + g.edge(depo_fanout[n], truth_fork[n], 0, 0), + g.edge(depo_fanout[n], reco_fork[n], 1, 0)]) for n in anode_iota]; + +// local multipass = [reco_fork[n] for n in anode_iota]; + +local outtags = ['orig%d' % n for n in anode_iota]; +// local bi_manifold = f.fanpipe('DepoFanout', multipass, 'FrameFanin', 'sn_mag_nf', outtags); + + +local depo_fanout_1st = g.pnode({ + type:'DepoFanout', + name:'depo_fanout_1st', + data:{ + multiplicity:nanodes, + tags: [], + }}, nin=1, nout=nanodes); +local bi_manifold = g.intern(innodes=[depo_fanout_1st], centernodes=multipass, outnodes=[], + edges = [ + g.edge(depo_fanout_1st, multipass[n], n, 0) for n in anode_iota + ], +); + +local retagger = g.pnode({ + type: 'Retagger', + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'orig', + }, + merge: { + 'orig\\d': 'daq', + }, + }], + }, +}, nin=1, nout=1); + +//local frameio = io.numpy.frames(output); +local sink = sim.frame_sink; + +// g4 sim as input +local graph = g.intern(innodes=[wcls_input.depos], centernodes=[drifter, depo_fanout_1st]+multipass, outnodes=[], + edges = + [ + g.edge(wcls_input.depos, drifter, 0, 0), + g.edge(drifter, depo_fanout_1st, 0, 0), + ] + + [g.edge(depo_fanout_1st, multipass[n], n, 0) for n in anode_iota], + ); +local app = { + type: 'Pgrapher', + data: { + edges: g.edges(graph), + }, +}; +g.uses(graph) + [app] diff --git a/cfg/pgrapher/experiment/pdhd/wcls-sim-drift-simchannel-priorSCE.jsonnet b/cfg/pgrapher/experiment/pdhd/wcls-sim-drift-simchannel-priorSCE.jsonnet new file mode 100644 index 000000000..11a3230d9 --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/wcls-sim-drift-simchannel-priorSCE.jsonnet @@ -0,0 +1,200 @@ +// This is a main entry point for configuring a wire-cell CLI job to +// simulate protoDUNE-SP. It is simplest signal-only simulation with +// one set of nominal field response function. It excludes noise. +// The kinematics are a mixture of Ar39 "blips" and some ideal, +// straight-line MIP tracks. +// +// Output is a Python numpy .npz file. + +local g = import 'pgraph.jsonnet'; +local f = import 'pgrapher/common/funcs.jsonnet'; +local wc = import 'wirecell.jsonnet'; + +local io = import 'pgrapher/common/fileio.jsonnet'; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local base = import 'pgrapher/experiment/pdhd/simparams.jsonnet'; +local params = base { + lar: super.lar { + // Longitudinal diffusion constant + DL: std.extVar('DL') * wc.cm2 / wc.s, + // Transverse diffusion constant + DT: std.extVar('DT') * wc.cm2 / wc.s, + // Electron lifetime + lifetime: std.extVar('lifetime') * wc.ms, + // Electron drift speed, assumes a certain applied E-field + drift_speed: std.extVar('driftSpeed') * wc.mm / wc.us, + }, +}; + +local tools = tools_maker(params); + +local sim_maker = import 'pgrapher/experiment/pdhd/sim.jsonnet'; +local sim = sim_maker(params, tools); + +local nanodes = std.length(tools.anodes); +local anode_iota = std.range(0, nanodes - 1); + + +local output = 'wct-sim-ideal-sig.npz'; + + +//local depos = g.join_sources(g.pnode({type:"DepoMerger", name:"BlipTrackJoiner"}, nin=2, nout=1), +// [sim.ar39(), sim.tracks(tracklist)]); +// local depos = sim.tracks(tracklist, step=1.0 * wc.mm); + +local wcls_maker = import "pgrapher/ui/wcls/nodes.jsonnet"; +local wcls = wcls_maker(params, tools); +local wcls_input = { + depos: wcls.input.depos(name="", art_tag="IonAndScint", assn_art_tag="IonAndScint:priorSCE"), + // depos: wcls.input.depos(name="electron", art_tag="IonAndScint"), // default art_tag="blopper" +}; + +// Collect all the wc/ls output converters for use below. Note the +// "name" MUST match what is used in theh "outputers" parameter in the +// FHiCL that loads this file. +local mega_anode = { + type: 'MegaAnodePlane', + name: 'meganodes', + data: { + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + }, +}; +local wcls_output = { + // ADC output from simulation + // sim_digits: wcls.output.digits(name="simdigits", tags=["orig"]), + sim_digits: g.pnode({ + type: 'wclsFrameSaver', + name: 'simdigits', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: true, // true means save as RawDigit, else recob::Wire + frame_tags: ['daq'], + // nticks: params.daq.nticks, + // chanmaskmaps: ['bad'], + pedestal_mean: 'native', + }, + }, nin=1, nout=1, uses=[mega_anode]), + + // The noise filtered "ADC" values. These are truncated for + // art::Event but left as floats for the WCT SP. Note, the tag + // "raw" is somewhat historical as the output is not equivalent to + // "raw data". + nf_digits: wcls.output.digits(name="nfdigits", tags=["raw"]), + + // The output of signal processing. Note, there are two signal + // sets each created with its own filter. The "gauss" one is best + // for charge reconstruction, the "wiener" is best for S/N + // separation. Both are used in downstream WC code. + sp_signals: wcls.output.signals(name="spsignals", tags=["gauss", "wiener"]), + + // save "threshold" from normal decon for each channel noise + // used in imaging + sp_thresholds: wcls.output.thresholds(name="spthresholds", tags=["threshold"]), +}; + +//local deposio = io.numpy.depos(output); +local drifter = sim.drifter; +local bagger = sim.make_bagger(); + +// signal plus noise pipelines +//local sn_pipes = sim.signal_pipelines; +local sn_pipes = sim.splusn_pipelines; + +local perfect = import 'pgrapher/experiment/pdsp/chndb-perfect.jsonnet'; +local chndb = [{ + type: 'OmniChannelNoiseDB', + name: 'ocndbperfect%d' % n, + data: perfect(params, tools.anodes[n], tools.field, n), + uses: [tools.anodes[n], tools.field], // pnode extension +} for n in anode_iota]; + +//local chndb_maker = import 'pgrapher/experiment/pdsp/chndb.jsonnet'; +//local noise_epoch = "perfect"; +//local noise_epoch = "after"; +//local chndb_pipes = [chndb_maker(params, tools.anodes[n], tools.fields[n]).wct(noise_epoch) +// for n in std.range(0, std.length(tools.anodes)-1)]; +local nf_maker = import 'pgrapher/experiment/pdsp/nf.jsonnet'; +// local nf_pipes = [nf_maker(params, tools.anodes[n], chndb_pipes[n]) for n in std.range(0, std.length(tools.anodes)-1)]; +local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in anode_iota]; + +local sp_maker = import 'pgrapher/experiment/pdsp/sp.jsonnet'; +local sp = sp_maker(params, tools); +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; + +local rng = tools.random; +local wcls_simchannel_sink = g.pnode({ + type: 'wclsSimChannelSink', + name: 'postdrift', + data: { + artlabel: 'simpleSC', // where to save in art::Event + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + rng: wc.tn(rng), + tick: 0.5 * wc.us, + start_time: -0.25 * wc.ms, + readout_time: self.tick * 6000, + nsigma: 3.0, + drift_speed: params.lar.drift_speed, + u_to_rp: 100 * wc.mm, // 90.58 * wc.mm, + v_to_rp: 100 * wc.mm, // 95.29 * wc.mm, + y_to_rp: 100 * wc.mm, + u_time_offset: 0.0 * wc.us, + v_time_offset: 0.0 * wc.us, + y_time_offset: 0.0 * wc.us, + g4_ref_time: -250 * wc.us, + use_energy: true, + }, +}, nin=1, nout=1, uses=tools.anodes); + +// local magoutput = 'protodune-data-check.root'; +// local magnify = import 'pgrapher/experiment/pdsp/magnify-sinks.jsonnet'; +// local sinks = magnify(tools, magoutput); + +local multipass = [ + g.pipeline([ + // wcls_simchannel_sink[n], + sn_pipes[n], + // sinks.orig_pipe[n], + // nf_pipes[n], + // sp_pipes[n], + ], + 'multipass%d' % n) + for n in anode_iota +]; +local outtags = ['orig%d' % n for n in anode_iota]; +local bi_manifold = f.fanpipe('DepoSetFanout', multipass, 'FrameFanin', 'sn_mag_nf', outtags); +// local bi_manifold = f.fanpipe('DepoFanout', multipass, 'FrameFanin', 'sn_mag_nf', outtags); + +local retagger = g.pnode({ + type: 'Retagger', + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'orig', + }, + merge: { + 'orig\\d': 'daq', + }, + }], + }, +}, nin=1, nout=1); + +//local frameio = io.numpy.frames(output); +local sink = sim.frame_sink; + +local graph = g.pipeline([wcls_input.depos, drifter, wcls_simchannel_sink, bagger, bi_manifold, retagger, wcls_output.sim_digits, sink]); + +local app = { + type: 'Pgrapher', + data: { + edges: g.edges(graph), + }, +}; + + +// Finally, the configuration sequence which is emitted. + +g.uses(graph) + [app] diff --git a/cfg/pgrapher/experiment/pdhd/wcls-sp.jsonnet b/cfg/pgrapher/experiment/pdhd/wcls-sp.jsonnet new file mode 100644 index 000000000..5905bcf1e --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/wcls-sp.jsonnet @@ -0,0 +1,238 @@ +// This is a main entry point to configure a WC/LS job that applies +// noise filtering and signal processing to existing RawDigits. The +// FHiCL is expected to provide the following parameters as attributes +// in the "params" structure. +// +// epoch: the hardware noise fix expoch: "before", "after", "dynamic" or "perfect" +// reality: whether we are running on "data" or "sim"ulation. +// raw_input_label: the art::Event inputTag for the input RawDigit +// +// see the .fcl of the same name for an example +// +// Manual testing, eg: +// +// jsonnet -V reality=data -V epoch=dynamic -V raw_input_label=daq \\ +// -V signal_output_form=sparse \\ +// -J cfg cfg/pgrapher/experiment/uboone/wcls-nf-sp.jsonnet +// +// jsonnet -V reality=sim -V epoch=perfect -V raw_input_label=daq \\ +// -V signal_output_form=sparse \\ +// -J cfg cfg/pgrapher/experiment/uboone/wcls-nf-sp.jsonnet + + +local epoch = std.extVar('epoch'); // eg "dynamic", "after", "before", "perfect" +local reality = std.extVar('reality'); +local sigoutform = std.extVar('signal_output_form'); // eg "sparse" or "dense" +// local nsample_ext = std.extVar('nsample'); // eg 6000, 10000, or "auto" +// local nsample = if nsample_ext == 'auto' then 6000 else std.parseInt(nsample_ext); // set auto to 0 once larwirecell fixed + +local wc = import 'wirecell.jsonnet'; +local g = import 'pgraph.jsonnet'; + +local raw_input_label = std.extVar('raw_input_label'); // eg "daq" + + +local data_params = import 'pgrapher/experiment/pdhd/params.jsonnet'; +local simu_params = import 'pgrapher/experiment/pdhd/simparams.jsonnet'; +local params = if reality == 'data' then data_params else simu_params; + +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local tools = tools_maker(params); + +local wcls_maker = import 'pgrapher/ui/wcls/nodes.jsonnet'; +local wcls = wcls_maker(params, tools); + +//local nf_maker = import "pgrapher/experiment/pdsp/nf.jsonnet"; +//local chndb_maker = import "pgrapher/experiment/pdsp/chndb.jsonnet"; + +local sp_maker = import 'pgrapher/experiment/pdhd/sp.jsonnet'; + +//local chndbm = chndb_maker(params, tools); +//local chndb = if epoch == "dynamic" then chndbm.wcls_multi(name="") else chndbm.wct(epoch); + + +// Collect the WC/LS input converters for use below. Make sure the +// "name" argument matches what is used in the FHiCL that loads this +// file. In particular if there is no ":" in the inputer then name +// must be the emtpy string. +local wcls_input = { + adc_digits: g.pnode({ + type: 'wclsCookedFrameSource', + name: '', + data: { + art_tag: raw_input_label, + frame_tags: ['orig'], // this is a WCT designator + //nticks: params.daq.nticks, + // nticks: nsample, + }, + }, nin=0, nout=1), + +}; + +// Collect all the wc/ls output converters for use below. Note the +// "name" MUST match what is used in theh "outputers" parameter in the +// FHiCL that loads this file. +local mega_anode = { + type: 'MegaAnodePlane', + name: 'meganodes', + data: { + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + }, +}; +local wcls_output = { + // The noise filtered "ADC" values. These are truncated for + // art::Event but left as floats for the WCT SP. Note, the tag + // "raw" is somewhat historical as the output is not equivalent to + // "raw data". + nf_digits: g.pnode({ + type: 'wclsFrameSaver', + name: 'nfsaver', + data: { + anode: wc.tn(tools.anode), + digitize: true, // true means save as RawDigit, else recob::Wire + frame_tags: ['raw'], + //nticks: params.daq.nticks, + // nticks: nsample, + chanmaskmaps: ['bad'], + }, + }, nin=1, nout=1, uses=[tools.anode]), + + + // The output of signal processing. Note, there are two signal + // sets each created with its own filter. The "gauss" one is best + // for charge reconstruction, the "wiener" is best for S/N + // separation. Both are used in downstream WC code. + sp_signals: g.pnode({ + type: 'wclsFrameSaver', + name: 'spsaver', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: false, // true means save as RawDigit, else recob::Wire + frame_tags: ['gauss', 'wiener'], + frame_scale: [0.001, 0.001], + //nticks: params.daq.nticks, + // nticks: nsample, + chanmaskmaps: [], + summary_tags: ['threshold'], // retagger makes this tag + // just one threshold value + summary_operator: { threshold: 'set' }, + nticks: -1, + + }, + }, nin=1, nout=1, uses=[mega_anode]), +}; + +// local perfect = import 'chndb-perfect.jsonnet'; +local base = import 'pgrapher/experiment/pdhd/chndb-base.jsonnet'; +local chndb = [{ + type: 'OmniChannelNoiseDB', + name: 'ocndbperfect%d' % n, + // data: perfect(params, tools.anodes[n], tools.field, n), + data: base(params, tools.anodes[n], tools.field, n){dft:wc.tn(tools.dft)}, + uses: [tools.anodes[n], tools.field, tools.dft], +} for n in std.range(0, std.length(tools.anodes) - 1)]; + +// local nf_maker = import 'pgrapher/experiment/pdsp/nf.jsonnet'; +// local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in std.range(0, std.length(tools.anodes) - 1)]; + +// an empty omnibus noise filter +// for suppressing bad channels stored in the noise db +local obnf = [ + g.pnode( + { + type: 'OmnibusNoiseFilter', + name: 'nf%d' % n, + data: { + + // This is the number of bins in various filters + // nsamples: params.nf.nsamples, + + channel_filters: [], + grouped_filters: [], + channel_status_filters: [], + noisedb: wc.tn(chndb[n]), + // intraces: 'orig%d' % n, // frame tag get all traces + intraces: 'orig', // frame tag get all traces + outtraces: 'raw%d' % n, + }, + }, uses=[chndb[n], tools.anodes[n]], nin=1, nout=1 + ) + for n in std.range(0, std.length(tools.anodes) - 1) +]; +local nf_pipes = [g.pipeline([obnf[n]], name='nf%d' % n) for n in std.range(0, std.length(tools.anodes) - 1)]; + +local sp = sp_maker(params, tools, { sparse: sigoutform == 'sparse' }); +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; + +local chsel_pipes = [ + g.pnode({ + type: 'ChannelSelector', + name: 'chsel%d' % n, + data: { + channels: std.range(2560 * n, 2560 * (n + 1) - 1), + //tags: ['orig%d' % n], // traces tag + }, + }, nin=1, nout=1) + for n in std.range(0, std.length(tools.anodes) - 1) +]; + +local magoutput = 'protodunehd-data-check.root'; +local magnify = import 'pgrapher/experiment/pdhd/magnify-sinks.jsonnet'; +local magio = magnify(tools, magoutput); + +local nfsp_pipes = [ + g.pipeline([ + chsel_pipes[n], + // magio.orig_pipe[n], + nf_pipes[n], + // magio.raw_pipe[n], + sp_pipes[n], + // magio.decon_pipe[n], + // magio.threshold_pipe[n], + // magio.debug_pipe[n], // use_roi_debug_mode=true in sp.jsonnet + ], + 'nfsp_pipe_%d' % n) + for n in std.range(0, std.length(tools.anodes) - 1) +]; + +//local f = import 'pgrapher/common/funcs.jsonnet'; +local f = import 'pgrapher/experiment/pdhd/funcs.jsonnet'; +//local outtags = ['gauss%d' % n for n in std.range(0, std.length(tools.anodes) - 1)]; +//local fanpipe = f.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf', outtags); +local fanpipe = f.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf'); + +local retagger = g.pnode({ + type: 'Retagger', + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'retagger', + }, + merge: { + 'gauss\\d': 'gauss', + 'wiener\\d': 'wiener', + 'theshold\\d': 'theshold', + }, + }], + }, +}, nin=1, nout=1); + +local sink = g.pnode({ type: 'DumpFrames' }, nin=1, nout=0); + + +//local graph = g.pipeline([wcls_input.adc_digits, rootfile_creation_frames, fanpipe, retagger, wcls_output.sp_signals, sink]); +local graph = g.pipeline([wcls_input.adc_digits, fanpipe, retagger, wcls_output.sp_signals, sink]); + +local app = { + type: 'Pgrapher', + data: { + edges: g.edges(graph), + }, +}; + +// Finally, the configuration sequence +g.uses(graph) + [app] diff --git a/cfg/pgrapher/experiment/pdhd/wct-sim-check.jsonnet b/cfg/pgrapher/experiment/pdhd/wct-sim-check.jsonnet new file mode 100644 index 000000000..cd1837db2 --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/wct-sim-check.jsonnet @@ -0,0 +1,105 @@ +# usage: wire-cell -l stdout wct-sim-check.jsonnet + +local g = import 'pgraph.jsonnet'; +local f = import 'pgrapher/common/funcs.jsonnet'; +local wc = import 'wirecell.jsonnet'; + +local io = import 'pgrapher/common/fileio.jsonnet'; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local params = import 'pgrapher/experiment/pdhd/simparams.jsonnet'; + +local tools = tools_maker(params); + +local sim_maker = import 'pgrapher/experiment/pdhd/sim.jsonnet'; +local sim = sim_maker(params, tools); + +local tracklist = [ + + { + time: 0 * wc.us, + charge: -500, // negative means # electrons per step (see below configuration) + ray: params.det.bounds, + }, + +]; + +local depos = sim.tracks(tracklist, step=0.1 * wc.mm); // MIP <=> 5000e/mm + +local nanodes = std.length(tools.anodes); +local anode_iota = std.range(0, nanodes-1); +local anode_idents = [anode.data.ident for anode in tools.anodes]; + +// local output = 'wct-sim-ideal-sig.npz'; +// local deposio = io.numpy.depos(output); +local drifter = sim.drifter; +local bagger = sim.make_bagger(); +// signal plus noise pipelines +local sn_pipes = sim.splusn_pipelines; +// local analog_pipes = sim.analog_pipelines; + +local perfect = import 'pgrapher/experiment/pdhd/chndb-base.jsonnet'; +local chndb = [{ + type: 'OmniChannelNoiseDB', + name: 'ocndbperfect%d' % n, + data: perfect(params, tools.anodes[n], tools.field, n){dft:wc.tn(tools.dft)}, + uses: [tools.anodes[n], tools.field, tools.dft], +} for n in anode_iota]; + +local nf_maker = import 'pgrapher/experiment/pdhd/nf.jsonnet'; +local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in std.range(0, std.length(tools.anodes) - 1)]; + +local sp_override = { + sparse: true, + use_roi_debug_mode: false, + use_multi_plane_protection: true, + process_planes: [0, 1, 2] +}; + +local sp_maker = import 'pgrapher/experiment/pdhd/sp.jsonnet'; +local sp = sp_maker(params, tools, sp_override); +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; + +local magoutput = 'porotdunehd-sim-check.root'; +local magnify = import 'pgrapher/experiment/pdhd/magnify-sinks.jsonnet'; +local magnifyio = magnify(tools, magoutput); + +local parallel_pipes = [ + g.pipeline([ + sn_pipes[n], + // magnifyio.orig_pipe[n], + nf_pipes[n], + magnifyio.raw_pipe[n], + sp_pipes[n], + // magnifyio.debug_pipe[n], + magnifyio.decon_pipe[n], + ], + 'parallel_pipe_%d' % n) + for n in std.range(0, std.length(tools.anodes) - 1) +]; +local outtags = ['raw%d' % n for n in std.range(0, std.length(tools.anodes) - 1)]; +local parallel_graph = f.fanpipe('DepoSetFanout', parallel_pipes, 'FrameFanin', 'sn_mag_nf', outtags); + +//local frameio = io.numpy.frames(output); +local sink = sim.frame_sink; +local graph = g.pipeline([depos, drifter, bagger, parallel_graph, sink]); + + +local app = { + type: 'Pgrapher', + data: { + edges: g.edges(graph), + }, +}; + +local cmdline = { + type: "wire-cell", + data: { + plugins: ["WireCellGen", "WireCellPgraph", "WireCellSio", "WireCellSigProc", "WireCellRoot"], + apps: ["Pgrapher"] + } +}; + + +// Finally, the configuration sequence which is emitted. + +[cmdline] + g.uses(graph) + [app] diff --git a/cfg/pgrapher/experiment/pdhd/wct-sim-drift-deposplat.jsonnet b/cfg/pgrapher/experiment/pdhd/wct-sim-drift-deposplat.jsonnet new file mode 100644 index 000000000..efc3f8a16 --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/wct-sim-drift-deposplat.jsonnet @@ -0,0 +1,317 @@ +# usage: wire-cell -l stdout wct-sim-check.jsonnet + +local g = import 'pgraph.jsonnet'; +local f = import 'pgrapher/common/funcs.jsonnet'; +local util = import 'pgrapher/experiment/pdhd/funcs.jsonnet'; +local wc = import 'wirecell.jsonnet'; + +local io = import 'pgrapher/common/fileio.jsonnet'; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; + +local params = import 'pgrapher/experiment/pdhd/simparams.jsonnet'; + +// local tools = tools_maker(params); +local btools = tools_maker(params); +local tools = btools { + anodes : [btools.anodes[0], ], + // anodes : [btools.anodes[0], btools.anodes[1], ], +}; + +local sim_maker = import 'pgrapher/experiment/pdhd/sim.jsonnet'; +local sim = sim_maker(params, tools); + +local nanodes = std.length(tools.anodes); +local anode_iota = std.range(0, nanodes-1); + +local tracklist = [ + + { + time: 0 * wc.us, + charge: -500, // negative means # electrons per step (see below configuration) + ray: params.det.bounds, + }, + +]; + +local track_depos = sim.tracks(tracklist, step=0.1 * wc.mm); + +local drifter = sim.drifter; +local bagger = [sim.make_bagger("bagger%d"%n) for n in anode_iota]; + +// signal plus noise pipelines +//local sn_pipes = sim.signal_pipelines; +local sn_pipes = sim.splusn_pipelines; + +// local perfect = import 'pgrapher/experiment/pdhd/chndb-perfect.jsonnet'; +local base = import 'pgrapher/experiment/pdhd/chndb-base.jsonnet'; +local chndb = [{ + type: 'OmniChannelNoiseDB', + name: 'ocndbperfect%d' % n, + // data: perfect(params, tools.anodes[n], tools.field, n), + data: base(params, tools.anodes[n], tools.field, n), + uses: [tools.anodes[n], tools.field], // pnode extension +} for n in anode_iota]; + +//local chndb_maker = import 'pgrapher/experiment/pdhd/chndb.jsonnet'; +//local noise_epoch = "perfect"; +//local noise_epoch = "after"; +//local chndb_pipes = [chndb_maker(params, tools.anodes[n], tools.fields[n]).wct(noise_epoch) +// for n in std.range(0, std.length(tools.anodes)-1)]; +local nf_maker = import 'pgrapher/experiment/pdhd/nf.jsonnet'; +// local nf_pipes = [nf_maker(params, tools.anodes[n], chndb_pipes[n]) for n in std.range(0, std.length(tools.anodes)-1)]; +local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in anode_iota]; + +local sp_maker = import 'pgrapher/experiment/pdhd/sp.jsonnet'; +// local sp = sp_maker(params, tools, { sparse: true, }); +local sp = sp_maker(params, tools, { sparse: true, use_roi_debug_mode: true, use_multi_plane_protection: true, mp_tick_resolution: 4, }); +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; + +// local deposplats = [sim.make_ductor('splat%d'%n, tools.anodes[n], tools.pirs[0], 'DepoSplat', 'ductor%d'%n) for n in anode_iota] ; +local deposplats = [util.splat(params, tools, tools.anodes[n]) for n in anode_iota] ; + +local rng = tools.random; +local magoutput = 'pdhd-sim-check-deposplat.root'; +local magnify = import 'pgrapher/experiment/pdhd/magnify-sinks.jsonnet'; +local magnifyio = magnify(tools, magoutput); + +local hio_truth = [g.pnode({ + type: 'HDF5FrameTap', + name: 'hio_truth%d' % n, + data: { + anode: wc.tn(tools.anodes[n]), + trace_tags: ['deposplat%d'%n], + filename: "g4-tru-%d.h5" % n, + chunk: [0, 0], // ncol, nrow + gzip: 2, + high_throughput: true, + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local hio_orig = [g.pnode({ + type: 'HDF5FrameTap', + name: 'hio_orig%d' % n, + data: { + anode: wc.tn(tools.anodes[n]), + trace_tags: ['orig%d'%n], + filename: "g4-rec-%d.h5" % n, + chunk: [0, 0], // ncol, nrow + gzip: 2, + high_throughput: true, + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local hio_sp = [g.pnode({ + type: 'HDF5FrameTap', + name: 'hio_sp%d' % n, + data: { + anode: wc.tn(tools.anodes[n]), + trace_tags: ['loose_lf%d' % n + , 'tight_lf%d' % n + , 'cleanup_roi%d' % n + , 'break_roi_1st%d' % n + , 'break_roi_2nd%d' % n + , 'shrink_roi%d' % n + , 'extend_roi%d' % n + , 'mp3_roi%d' % n + , 'mp2_roi%d' % n + , 'decon_charge%d' % n + , 'gauss%d' % n], + filename: "g4-rec-%d.h5" % n, + chunk: [0, 0], // ncol, nrow + gzip: 2, + high_throughput: true, + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local hio_dnn = [g.pnode({ + type: 'HDF5FrameTap', + name: 'hio_dnn%d' % n, + data: { + anode: wc.tn(tools.anodes[n]), + // trace_tags: ['dnn_sp%d' % n], + trace_tags: ['dnnsp%d' % n], + filename: "g4-rec-%d.h5" % n, + chunk: [0, 0], // ncol, nrow + gzip: 2, + high_throughput: true, + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local rio_orig = [g.pnode({ + type: 'ExampleROOTAna', + name: 'rio_orig_apa%d' % n, + data: { + output_filename: "g4-rec-%d.root" % n, + anode: wc.tn(tools.anodes[n]), + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local rio_nf = [g.pnode({ + type: 'ExampleROOTAna', + name: 'rio_nf_apa%d' % n, + data: { + output_filename: "g4-rec-%d.root" % n, + anode: wc.tn(tools.anodes[n]), + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +local rio_sp = [g.pnode({ + type: 'ExampleROOTAna', + name: 'rio_sp_apa%d' % n, + data: { + output_filename: "g4-rec-%d.root" % n, + anode: wc.tn(tools.anodes[n]), + }, + }, nin=1, nout=1), + for n in std.range(0, std.length(tools.anodes) - 1) + ]; + +// Note: better switch to layers +local dnnroi = import 'pgrapher/experiment/pdhd/dnnroi.jsonnet'; +local ts = { + type: "TorchService", + name: "dnnroi", + data: { + // model: "ts-model/unet-l23-cosmic500-e50.ts", + model: "ts-model/CP49.ts", + device: "cpu", // "gpucpu", + concurrency: 1, + }, +}; + + +local reco_fork = [ + g.pipeline([ + // wcls_simchannel_sink[n], + bagger[n], + sn_pipes[n], + // magnifyio.orig_pipe[n], + // hio_orig[n], + // nf_pipes[n], + // rio_nf[n], + sp_pipes[n], + hio_sp[n], + // magnifyio.debug_pipe[n], + magnifyio.decon_pipe[n], + + dnnroi(tools.anodes[n], ts, output_scale=1.2), + + magnifyio.dnnroi_pipe[n], + // hio_dnn[n], + // rio_sp[n], + g.pnode({ type: 'DumpFrames', name: 'reco_fork%d'%n }, nin=1, nout=0), + // perapa_img_pipelines[n], + ], + 'reco_fork%d' % n) + for n in anode_iota +]; + +local truth_fork = [ + g.pipeline([ + deposplats[n], + hio_truth[n], + magnifyio.truth_pipe[n], + g.pnode({ type: 'DumpFrames', name: 'truth_fork%d'%n }, nin=1, nout=0) + ], + 'truth_fork%d' % n) + for n in anode_iota +]; + +local depo_fanout = [g.pnode({ + type:'DepoFanout', + name:'depo_fanout-%d'%n, + data:{ + multiplicity:2, + tags: [], + }}, nin=1, nout=2) for n in anode_iota]; +local frame_fanin = [g.pnode({ + type: 'FrameFanin', + name: 'frame_fanin-%d'%n, + data: { + multiplicity: 2, + tags: [], + }}, nin=2, nout=1) for n in anode_iota]; + +local frame_sink = g.pnode({ type: 'DumpFrames' }, nin=1, nout=0); + +local multipass = [g.intern(innodes=[depo_fanout[n]], centernodes=[truth_fork[n], reco_fork[n]], outnodes=[], + edges = [ + g.edge(depo_fanout[n], truth_fork[n], 0, 0), + g.edge(depo_fanout[n], reco_fork[n], 1, 0)]) for n in anode_iota]; + +// local multipass = [reco_fork[n] for n in anode_iota]; + +local outtags = ['orig%d' % n for n in anode_iota]; +// local bi_manifold = f.fanpipe('DepoFanout', multipass, 'FrameFanin', 'sn_mag_nf', outtags); + + +local depo_fanout_1st = g.pnode({ + type:'DepoFanout', + name:'depo_fanout_1st', + data:{ + multiplicity:nanodes, + tags: [], + }}, nin=1, nout=nanodes); +local bi_manifold = g.intern(innodes=[depo_fanout_1st], centernodes=multipass, outnodes=[], + edges = [ + g.edge(depo_fanout_1st, multipass[n], n, 0) for n in anode_iota + ], +); + +local retagger = g.pnode({ + type: 'Retagger', + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'orig', + }, + merge: { + 'orig\\d': 'daq', + }, + }], + }, +}, nin=1, nout=1); + +//local frameio = io.numpy.frames(output); +local sink = sim.frame_sink; + +// trackdepo as input +local graph = g.intern(innodes=[track_depos], centernodes=[drifter, depo_fanout_1st]+multipass, outnodes=[], + edges = + [ + g.edge(track_depos, drifter, 0, 0), + g.edge(drifter, depo_fanout_1st, 0, 0), + ] + + [g.edge(depo_fanout_1st, multipass[n], n, 0) for n in anode_iota], + ); +local app = { + type: 'Pgrapher', + data: { + edges: g.edges(graph), + }, +}; +local cmdline = { + type: "wire-cell", + data: { + plugins: ["WireCellPgraph", "WireCellGen","WireCellSio","WireCellSigProc","WireCellRoot","WireCellLarsoft","WireCellHio","WireCellTbb",'WireCellImg',"WireCellPytorch"], + // plugins: ["WireCellGen", "WireCellPgraph", "WireCellSio", "WireCellSigProc", "WireCellRoot", "WireCellPytorch"], + apps: ["Pgrapher"] + } +}; +[cmdline] + g.uses(graph) + [app] + diff --git a/cfg/pgrapher/experiment/pdhd/wct-sim-fans.jsonnet b/cfg/pgrapher/experiment/pdhd/wct-sim-fans.jsonnet new file mode 100644 index 000000000..fd75f828d --- /dev/null +++ b/cfg/pgrapher/experiment/pdhd/wct-sim-fans.jsonnet @@ -0,0 +1,175 @@ +local g = import "pgraph.jsonnet"; +local f = import "pgrapher/common/funcs.jsonnet"; +local wc = import "wirecell.jsonnet"; + +local io = import 'pgrapher/common/fileio.jsonnet'; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local params = import 'pgrapher/experiment/pdhd/simparams.jsonnet'; +local fcl_params = { + use_dnnroi: false, +}; + +local tools = tools_maker(params); +// local tools_all = tools_maker(params); +// local tools = tools_all {anodes: [tools_all.anodes[n] for n in [0,1,2,3]]}; + +local sim_maker = import 'pgrapher/experiment/pdhd/sim.jsonnet'; +local sim = sim_maker(params, tools); + +// Deposit and drifter /////////////////////////////////////////////////////////////////////////////// + +local thetaXZ = 45*wc.deg; + +local stubby = { + tail: wc.point(-100, 100, 100, wc.cm), + head: wc.point(-100*(1 + std.tan(thetaXZ)), 100, 100*(1+1), wc.cm), +}; + +local tracklist = [ + + { + time: 0 * wc.us, + charge: -500, // 5000 e/mm + ray: stubby, // params.det.bounds, + }, + +]; + +// local depos = sim.tracks(tracklist, step=1.0 * wc.mm); +local depos = sim.tracks(tracklist, step=0.1 * wc.mm); + + +local drifter = sim.drifter; +local bagger = sim.make_bagger(); + +// Parallel part ////////////////////////////////////////////////////////////////////////////// + + +// local sn_pipes = sim.signal_pipelines; +local sn_pipes = sim.splusn_pipelines; + +local sp_maker = import 'pgrapher/experiment/pdhd/sp.jsonnet'; +local sp_override = { + sparse: false, + use_roi_debug_mode: false, + use_multi_plane_protection: true, + mp_tick_resolution: 4, +}; +local sp = sp_maker(params, tools, sp_override); +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; + +local img = import 'pgrapher/experiment/pdhd/img.jsonnet'; +local img_maker = img(); +local img_pipes = [img_maker.per_anode(a) for a in tools.anodes]; + +local magoutput = 'protodunehd-sim-fans.root'; +local magnify = import 'pgrapher/experiment/pdhd/magnify-sinks.jsonnet'; +local sinks = magnify(tools, magoutput); +local frame_tap = function(name, outname, tags, digitize) { + ret: g.fan.tap('FrameFanout', g.pnode({ + type: "FrameFileSink", + name: name, + data: { + outname: outname, + tags: tags, + digitize: digitize, + }, + }, nin=1, nout=0), name), +}.ret; +local frame_sink = function(name, outname, tags, digitize) { + ret: g.pnode({ + type: "FrameFileSink", + name: name, + data: { + outname: outname, + tags: tags, + digitize: digitize, + }, + }, nin=1, nout=0), +}.ret; + +// local dnnroi = import 'pgrapher/experiment/pdhd/dnnroi.jsonnet'; +// local ts = { +// type: "TorchService", +// name: "dnnroi", +// data: { +// model: "ts-model/unet-l23-cosmic500-e50.ts", +// device: "gpucpu", +// concurrency: 1, +// }, +// }; + +local parallel_pipes = [ + g.pipeline([ + sn_pipes[n], + // frame_tap( + // name="orig%d"%tools.anodes[n].data.ident, + // outname="frame-orig%d.tar.bz2"%tools.anodes[n].data.ident, + // tags=["orig%d"%tools.anodes[n].data.ident], + // digitize=true + // ), + // sinks.orig_pipe[n], + sp_pipes[n], + // frame_tap( + // name="gauss%d"%tools.anodes[n].data.ident, + // outname="frame-gauss%d.tar.bz2"%tools.anodes[n].data.ident, + // tags=["gauss%d"%tools.anodes[n].data.ident], + // digitize=false + // ), + // sinks.decon_pipe[n], + // sinks.debug_pipe[n], // use_roi_debug_mode=true in sp.jsonnet + // dnnroi(tools.anodes[n], ts, output_scale=1.2), + // sinks.dnnroi_pipe[n], + // g.pnode({type: "DumpFrames", name: "dumpframes-%d"%tools.anodes[n].data.ident}, nin = 1, nout=0), + img_pipes[n], + ], + 'parallel_pipe_%d' % n) + for n in std.range(0, std.length(tools.anodes) - 1)]; + +local outtags = []; +local tag_rules = { + frame: { + '.*': 'framefanin', + }, + trace: {['gauss%d' % anode.data.ident]: ['gauss%d' % anode.data.ident] for anode in tools.anodes} + + {['wiener%d' % anode.data.ident]: ['wiener%d' % anode.data.ident] for anode in tools.anodes} + + {['threshold%d' % anode.data.ident]: ['threshold%d' % anode.data.ident] for anode in tools.anodes} + + {['dnnsp%d' % anode.data.ident]: ['dnnsp%d' % anode.data.ident] for anode in tools.anodes}, +}; + +// local parallel_graph = f.multifanout('DepoSetFanout', parallel_pipes, [1,4], [4,1], 'sn_mag', tag_rules); + +local nanodes = std.length(tools.anodes); +local parallel_graph = f.multifanout('DepoSetFanout', parallel_pipes, [1,nanodes], [nanodes,1], 'sn_mag', tag_rules); + + + +// Only one sink //////////////////////////////////////////////////////////////////////////// + + +local sink = sim.frame_sink; + + +// Final pipeline ////////////////////////////////////////////////////////////////////////////// + +// local graph = g.pipeline([depos, drifter, bagger, parallel_graph, sink], "main"); +local graph = g.pipeline([depos, drifter, bagger, parallel_graph], "main"); +// local graph = g.pipeline([depos, drifter, bagger, parallel_pipes[0]], "main"); + +local app = { + type: 'Pgrapher', //Pgrapher, TbbFlow + data: { + edges: g.edges(graph), + }, +}; + +local cmdline = { + type: "wire-cell", + data: { + // plugins: ["WireCellGen", "WireCellPgraph", "WireCellSio", "WireCellSigProc", "WireCellRoot", "WireCellTbb", "WireCellImg", "WireCellPytorch"], + plugins: ["WireCellGen", "WireCellPgraph", "WireCellSio", "WireCellSigProc", "WireCellRoot", "WireCellTbb", "WireCellImg"], + apps: ["Pgrapher"] + } +}; + +[cmdline] + g.uses(graph) + [app] diff --git a/cfg/pgrapher/experiment/sbnd/chndb-base.jsonnet b/cfg/pgrapher/experiment/sbnd/chndb-base.jsonnet index 319dd098d..392abdbd8 100644 --- a/cfg/pgrapher/experiment/sbnd/chndb-base.jsonnet +++ b/cfg/pgrapher/experiment/sbnd/chndb-base.jsonnet @@ -20,167 +20,15 @@ function(params, anode, field, n, rms_cuts=[]) // For MicroBooNE, channel groups is a 2D list. Each element is // one group of channels which should be considered together for // coherent noise filtering. - // groups: [std.range(g*48, (g+1)*48-1) for g in std.range(0,171)], - groups: [std.range(n * 2560 + u * 40, n * 2560 + (u + 1) * 40 - 1) for u in std.range(0, 19)] - + [std.range(n * 2560 + 800 + v * 40, n * 2560 + 800 + (v + 1) * 40 - 1) for v in std.range(0, 19)] - + [std.range(n * 2560 + 1600 + w * 48, n * 2560 + 1600 + (w + 1) * 48 - 1) for w in std.range(0, 19)], + //groups: [std.range(g*48, (g+1)*48-1) for g in std.range(0,171)], + groups: [std.range(n * 5632 + g*64, n * 5632 + (g+1)*64-1) for g in std.range(0,87)], + //groups: [std.range(n * 2560 + u * 40, n * 2560 + (u + 1) * 40 - 1) for u in std.range(0, 19)] + // + [std.range(n * 2560 + 800 + v * 40, n * 2560 + 800 + (v + 1) * 40 - 1) for v in std.range(0, 19)] + // + [std.range(n * 2560 + 1600 + w * 48, n * 2560 + 1600 + (w + 1) * 48 - 1) for w in std.range(0, 19)], // Externally determined "bad" channels. - bad: [ - # CE group: Inactive FE - 4411, # femb515x12 - 4412, # femb515x13 - 9990, # femb605x10 - 11842, # femb120x03 - # CE group: Broken connection - 1, # femb311u39 - 400, # femb301u40 - 401, # femb301u39 - 800, # femb320v01 - 801, # femb320v02 - 876, # femb319v37 - 1200, # femb310v01 - 2961, # femb501u39 - 5321, # femb216u39 - 5363, # femb217u37 - 6132, # femb215v13 - 7058, # femb213x03 - 7295, # femb202x01 - 7681, # femb611u39 - 8080, # femb601u40 - 8328, # femb607u32 - 8480, # femb620v01 - 9282, # femb620x03 - 9283, # femb620x04 - 9736, # femb611x25 - 9854, # femb602x02 - 10800, # femb105u40 - 11024, # femb110u16 - 11457, # femb110v18 - 11459, # femb110v20 - 11463, # femb110v24 - 11469, # femb110v30: bad in 4875-185-1500, ok in 5803-76-3200 - 11517, # femb109v38 - 11669, # femb105v30 - 11679, # femb105v40 - 12756, # femb110x44 - 12801, # femb411u39 - 13001, # femb416u39 - 13081, # femb418u39 - # CE group: ENC > 2000e - 4410, # femb515x11: High noise, no signal 5008-76 - #----- - # CE group excessive sticky - #femb318x - 1719, # femb318x24 - 5125, # femb211u35 - 7551, # femb208x33 - 7190, # femb211x39 - 7194, # femb211x43 - 7918, # femb616u02, sticky pedestal (three peaks) - #----- - # CE group: good. - # femb311 - 2, # femb311u38, no signal - 4, # femb311u36, very sticky pedestal 5308-76 - 1632, # femb320x33, very sticky pedestal 5308-76 - 2169, # femb302x07, Mostly stuck on one bad code, 5308-76 - 2450, # femb308x14, Very noisy (1000 ADC) in run 5759 (20nov2019) - 3541, # femb516v22, very sticky--signal near zero half the time (5308-81) - 3543, # femb516v24, very sticky--signal near zero half the time (5308-81) - 3661, # femb513v22, most signal near zero (5308-81) - 3663, # femb513v24, most signal near zero (5308-81) - 4061, # femb503v22, most signal near zero (5308-81) - 4063, # femb503v24, most signal near zero (5308-81) - 4141, # femb501v22, signal near zero half the time (5308-81) - 4143, # femb501v24, signal sometimes near zero (5308-81) - 4377, # femb516x26, very sticky pedestal - 4379, # femb516x28, very sticky pedestal - 4381, # femb516x30, very sticky pedestal - 4383, # femb516x32, very sticky pedestal - 4385, # femb516x34, very sticky pedestal - 4387, # femb516x36, very sticky pedestal - 4521, # femb513x26, very sticky pedestal - 4523, # femb513x28, very sticky pedestal - 4525, # femb513x30, very sticky pedestal - 4527, # femb513x32, very sticky pedestal - 4529, # femb513x34, very sticky pedestal - 4531, # femb513x36, very sticky pedestal - 4652, # femb501x36, very sticky pedestal - 4654, # femb501x34, very sticky pedestal - 4656, # femb501x32, very sticky pedestal - 4658, # femb501x30, very sticky pedestal - 4660, # femb501x28, very sticky pedestal - 4658, # femb501x26, very sticky pedestal - 4748, # femb503x36, very sticky pedestal - 4750, # femb503x34, very sticky pedestal - 4752, # femb503x32, very sticky pedestal - 4754, # femb503x30, very sticky pedestal - 4756, # femb503x28, very sticky pedestal - 4758, # femb503x26, very sticky pedestal - 5361, # femb217u39, no signal - 7680, # femb611u40: No signal in 5308-76, end wire - 8501, # femb620v22, very sticky pedestal - 8503, # femb620v24, very sticky pedestal - 8821, # femb612v22, very sticky pedestal - 8823, # femb612v24, very sticky pedestal - 9261, # femb601v22, very sticky pedestal - 9263, # femb601v24, very sticky pedestal - 9305, # femb620x26, very sticky pedestal - 9307, # femb620x28, very sticky pedestal - 9309, # femb620x30, very sticky pedestal - 9311, # femb620x32, very sticky pedestal - 9313, # femb620x34, very sticky pedestal - 9315, # femb620x36, very sticky pedestal - 9689, # femb612x26, very sticky pedestal - 9691, # femb612x28, very sticky pedestal - 9693, # femb612x30, very sticky pedestal - 9695, # femb612x32, very sticky pedestal - 9697, # femb612x34, very sticky pedestal - 9699, # femb612x36, very sticky pedestal - 9772, # femb601x26, very sticky pedestal - 9774, # femb601x28, very sticky pedestal - 9776, # femb601x30, very sticky pedestal - 9778, # femb601x32, very sticky pedestal - 9780, # femb601x34, very sticky pedestal - 9782, # femb601x36, very sticky pedestal - 10102, # femb608x42, mostly stuck on one code - 10189, # femb609x03, mostly stuck on one code - 10697, # femb102u23, mostly stuck on a few classic codes - 10907, # femb107u13, mostly stuck on one code - 11203, # femb116v04, stuck on many classic codes - 11270, # femb115v31, stuck on many classic codes - 11902, # femb119x15, stuck on two classic codes - 12324, # femb101x44, stuck on many classic codes - 12333, # femb101x35, stuck on many classic codes - 12744, # femb109x08, stuck on many classic codes - 13363, # femb405u37, very noisy, nosignal 5308-76-4800 - - #----- - # These 16 channels are an intermitently bad ASIC. - # Matt W. 19oct2018. - # femb316u - 200, # femb316u40 - 202, # femb316u38 - 204, # femb316u36 - 206, # femb316u34 - 208, # femb316u32 - # femb316v - 991, # femb316v32 - 993, # femb316v34 - 995, # femb316v36 - 997, # femb316v38 - 999, # femb316v40 - # femb316x - 1829, # femb316x38 - 1831, # femb316x40 - 1833, # femb316x42 - 1835, # femb316x44 - 1837, # femb316x46 - 1839 # femb316x48 - #----- -], + bad: [], // Overide defaults for specific channels. If an info is // mentioned for a particular channel in multiple objects in this @@ -193,8 +41,9 @@ function(params, anode, field, n, rms_cuts=[]) // repeat values found here in subsequent entries unless you // wish to change them. { - channels: std.range(n * 2560, (n + 1) * 2560 - 1), - nominal_baseline: 2048.0, // adc count + //channels: std.range(n * 2560, (n + 1) * 2560 - 1), + channels: std.range(n * 5632, n * 5632 + 5631) + nominal_baseline: 2001.0, // adc count [879.5 mV] gain_correction: 1.0, // unitless response_offset: 0.0, // ticks? pad_window_front: 10, // ticks? @@ -221,13 +70,15 @@ function(params, anode, field, n, rms_cuts=[]) }, + { //channels: { wpid: wc.WirePlaneId(wc.Ulayer) }, - channels: std.range(n * 2560, n * 2560 + 800- 1), - freqmasks: [ - { value: 1.0, lobin: 0, hibin: $.nsamples - 1 }, - { value: 0.0, lobin: 169, hibin: 173 }, - { value: 0.0, lobin: 513, hibin: 516 }, +// channels: std.range(n * 2560, n * 2560 + 800- 1), + channels: std.range(n * 5632, n * 5632 + 1984-1), + freqmasks: [ +// { value: 1.0, lobin: 0, hibin: $.nsamples - 1 }, +// { value: 0.0, lobin: 169, hibin: 173 }, +// { value: 0.0, lobin: 513, hibin: 516 }, ], /// this will use an average calculated from the anode // response: { wpid: wc.WirePlaneId(wc.Ulayer) }, @@ -242,11 +93,12 @@ function(params, anode, field, n, rms_cuts=[]) { //channels: { wpid: wc.WirePlaneId(wc.Vlayer) }, - channels: std.range(n * 2560 + 800, n * 2560 + 1600- 1), +// channels: std.range(n * 2560 + 800, n * 2560 + 1600- 1), + channels: std.range(n * 5632 + 1984, n * 5632 + 3968-1), freqmasks: [ - { value: 1.0, lobin: 0, hibin: $.nsamples - 1 }, - { value: 0.0, lobin: 169, hibin: 173 }, - { value: 0.0, lobin: 513, hibin: 516 }, + // { value: 1.0, lobin: 0, hibin: $.nsamples - 1 }, + // { value: 0.0, lobin: 169, hibin: 173 }, + // { value: 0.0, lobin: 513, hibin: 516 }, ], /// this will use an average calculated from the anode // response: { wpid: wc.WirePlaneId(wc.Vlayer) }, @@ -259,19 +111,21 @@ function(params, anode, field, n, rms_cuts=[]) }, local freqbinner = wc.freqbinner(params.daq.tick, params.nf.nsamples); - local harmonic_freqs = [f*wc.kilohertz for f in + local harmonic_freqs = [ + //f*wc.kilohertz for f in // [51.5, 102.8, 154.2, 205.5, 256.8, 308.2, 359.2, 410.5, 461.8, 513.2, 564.5, 615.8] - [51.5, 77.2, 102.8, 128.5, 154.2, 180.0, 205.5, 231.5, 256.8, 282.8, 308.2, 334.0, 359.2, 385.5, 410.5, 461.8, 513.2, 564.5, 615.8, 625.0] + //[51.5, 77.2, 102.8, 128.5, 154.2, 180.0, 205.5, 231.5, 256.8, 282.8, 308.2, 334.0, 359.2, 385.5, 410.5, 461.8, 513.2, 564.5, 615.8, 625.0] ]; - + { //channels: { wpid: wc.WirePlaneId(wc.Wlayer) }, - channels: std.range(n * 2560 + 1600, n * 2560 + 2560- 1), - nominal_baseline: 400.0, +// channels: std.range(n * 2560 + 1600, n * 2560 + 2560- 1), + channels: std.range(n * 5632 + 3968, n * 5632 + 5632-1), + nominal_baseline: 650, decon_limit: 0.05, decon_limit1: 0.08, freqmasks: freqbinner.freqmasks(harmonic_freqs, 5.0*wc.kilohertz), }, - + ] + rms_cuts, } diff --git a/cfg/pgrapher/experiment/sbnd/chndb-perfect.jsonnet b/cfg/pgrapher/experiment/sbnd/chndb-perfect.jsonnet index 22b16ccc3..176df07b6 100644 --- a/cfg/pgrapher/experiment/sbnd/chndb-perfect.jsonnet +++ b/cfg/pgrapher/experiment/sbnd/chndb-perfect.jsonnet @@ -1,9 +1,11 @@ -// Perfect channel noise DB object configuration for microboone. - +// Base channel noise DB object configuration for microboone +// This does not include any run dependent RMS cuts. +// See chndb.jsonnet +local handmade = import 'chndb-resp.jsonnet'; local wc = import 'wirecell.jsonnet'; -function(params, anode, field, n) +function(params, anode, field, n, rms_cuts=[]) { anode: wc.tn(anode), field_response: wc.tn(field), @@ -11,97 +13,119 @@ function(params, anode, field, n) tick: params.daq.tick, // This sets the number of frequency-domain bins used in the noise - // filtering. It is expected that time-domain waveforms have the - // same number of samples. + // filtering. It is not necessarily true that the time-domain + // waveforms have the same number of ticks. This must be non-zero. nsamples: params.nf.nsamples, // For MicroBooNE, channel groups is a 2D list. Each element is // one group of channels which should be considered together for // coherent noise filtering. //groups: [std.range(g*48, (g+1)*48-1) for g in std.range(0,171)], - groups: [std.range(n * 2560 + u * 40, n * 2560 + (u + 1) * 40 - 1) for u in std.range(0, 19)] - + [std.range(n * 2560 + 800 + v * 40, n * 2560 + 800 + (v + 1) * 40 - 1) for v in std.range(0, 19)] - + [std.range(n * 2560 + 1600 + w * 48, n * 2560 + 1600 + (w + 1) * 48 - 1) for w in std.range(0, 19)], + groups: [std.range(n * 5632 + g*64, n * 5632 + (g+1)*64-1) for g in std.range(0,87)], + //groups: [std.range(n * 2560 + u * 40, n * 2560 + (u + 1) * 40 - 1) for u in std.range(0, 19)] + // + [std.range(n * 2560 + 800 + v * 40, n * 2560 + 800 + (v + 1) * 40 - 1) for v in std.range(0, 19)] + // + [std.range(n * 2560 + 1600 + w * 48, n * 2560 + 1600 + (w + 1) * 48 - 1) for w in std.range(0, 19)], + // Externally determined "bad" channels. - //bad: [], - //bad: // shorted-U - // [296] + std.range(298, 315) + [317] + std.range(319,327) + std.range(336, 337) - // + std.range(343, 345) + std.range(348, 351) + std.range(376, 400) + std.range(410, 445) - // + std.range(447, 484) + std.range(501, 503) + std.range(505, 520) + std.range(522, 524) - // + std.range(536, 559) + std.range(561, 592) + std.range(595, 598) + std.range(600, 632) - // + std.range(634, 652) + [654] + std.range(656,671) - // // inverse "V" due to disconnected MB - // + std.range(864, 911) - // + std.range(3936,3983) - // // shorted-Y - // + std.range(7136, 7199) + std.range(7201, 7214) + std.range(7216, 7263), + bad: [1000,5000,8000,12000], # set dummy "bad" channels to test cmm // Overide defaults for specific channels. If an info is // mentioned for a particular channel in multiple objects in this // list then last mention wins. - /*channel_info: [ - - // First entry provides default channel info across ALL - // channels. Subsequent entries override a subset of channels - // with a subset of these entries. There's no reason to - // repeat values found here in subsequent entries unless you - // wish to change them. - { - channels: std.range(0, 2400 + 2400 + 3456 - 1), - nominal_baseline: 2048.0, // adc count - gain_correction: 1.0, // unitless - response_offset: 0.0, // ticks? - pad_window_front: 10, // ticks? - pad_window_back: 10, // ticks? - decon_limit: 0.02, - decon_limit1: 0.09, - adc_limit: 15, - min_rms_cut: 1.0, - max_rms_cut: 5.0, - - // parameter used to make "rcrc" spectrum - rcrc: 1.0*wc.millisecond, - - // parameters used to make "config" spectrum - reconfig : {}, - - // list to make "noise" spectrum mask - freqmasks: [], - - // field response waveform to make "response" spectrum. - response: {}, - - }, - - { - channels: {wpid: wc.WirePlaneId(wc.Ulayer)}, - pad_window_front: 20, - decon_limit: 0.02, - decon_limit1: 0.09, - }, - - { - channels: {wpid: wc.WirePlaneId(wc.Vlayer)}, - decon_limit: 0.01, - decon_limit1: 0.08, - }, - - { - channels: {wpid: wc.WirePlaneId(wc.Wlayer)}, - nominal_baseline: 400.0, - decon_limit: 0.05, - decon_limit1: 0.08, - }, - - { // these are before hardware fix - channels: params.nf.misconfigured.channels, - reconfig: { - from: {gain: params.nf.misconfigured.gain, - shaping: params.nf.misconfigured.shaping}, - to: {gain: params.elec.gain, - shaping: params.elec.shaping}, - } - }, - ],*/ + channel_info: [ + + // First entry provides default channel info across ALL + // channels. Subsequent entries override a subset of channels + // with a subset of these entries. There's no reason to + // repeat values found here in subsequent entries unless you + // wish to change them. + { + //channels: std.range(n * 2560, (n + 1) * 2560 - 1), + channels: std.range(n * 5632, n * 5632 + 5631), + nominal_baseline: 2001.0, // adc count [879.5 mV] + gain_correction: 1.0, // unitless + response_offset: 0.0, // ticks? + pad_window_front: 10, // ticks? + pad_window_back: 10, // ticks? + decon_limit: 0.02, + decon_limit1: 0.09, + adc_limit: 15, + roi_min_max_ratio: 0.8, // default 0.8 + min_rms_cut: 1.0, // units??? + max_rms_cut: 30.0, // units??? + + // parameter used to make "rcrc" spectrum + rcrc: 1.1 * wc.millisecond, // 1.1 for collection, 3.3 for induction + rc_layers: 1, // default 2 + + // parameters used to make "config" spectrum + reconfig: {}, + + // list to make "noise" spectrum mask + freqmasks: [], + + // field response waveform to make "response" spectrum. + response: {}, + + }, + + + { + //channels: { wpid: wc.WirePlaneId(wc.Ulayer) }, +// channels: std.range(n * 2560, n * 2560 + 800- 1), + channels: std.range(n * 5632, n * 5632 + 1984-1), + freqmasks: [ +// { value: 1.0, lobin: 0, hibin: $.nsamples - 1 }, +// { value: 0.0, lobin: 169, hibin: 173 }, +// { value: 0.0, lobin: 513, hibin: 516 }, + ], + /// this will use an average calculated from the anode + // response: { wpid: wc.WirePlaneId(wc.Ulayer) }, + /// this uses hard-coded waveform. + response: { waveform: handmade.u_resp, waveformid: wc.Ulayer }, + response_offset: 120, // offset of the negative peak + pad_window_front: 20, + decon_limit: 0.02, + decon_limit1: 0.07, + roi_min_max_ratio: 3.0, + }, + + { + //channels: { wpid: wc.WirePlaneId(wc.Vlayer) }, +// channels: std.range(n * 2560 + 800, n * 2560 + 1600- 1), + channels: std.range(n * 5632 + 1984, n * 5632 + 3968-1), + freqmasks: [ + // { value: 1.0, lobin: 0, hibin: $.nsamples - 1 }, + // { value: 0.0, lobin: 169, hibin: 173 }, + // { value: 0.0, lobin: 513, hibin: 516 }, + ], + /// this will use an average calculated from the anode + // response: { wpid: wc.WirePlaneId(wc.Vlayer) }, + /// this uses hard-coded waveform. + response: { waveform: handmade.v_resp, waveformid: wc.Vlayer }, + response_offset: 124, + decon_limit: 0.01, + decon_limit1: 0.08, + roi_min_max_ratio: 1.5, + }, + + local freqbinner = wc.freqbinner(params.daq.tick, params.nf.nsamples); + local harmonic_freqs = [ + //f*wc.kilohertz for f in + // [51.5, 102.8, 154.2, 205.5, 256.8, 308.2, 359.2, 410.5, 461.8, 513.2, 564.5, 615.8] + //[51.5, 77.2, 102.8, 128.5, 154.2, 180.0, 205.5, 231.5, 256.8, 282.8, 308.2, 334.0, 359.2, 385.5, 410.5, 461.8, 513.2, 564.5, 615.8, 625.0] + ]; + + { + //channels: { wpid: wc.WirePlaneId(wc.Wlayer) }, +// channels: std.range(n * 2560 + 1600, n * 2560 + 2560- 1), + channels: std.range(n * 5632 + 3968, n * 5632 + 5632-1), + nominal_baseline: 650, + decon_limit: 0.05, + decon_limit1: 0.08, + freqmasks: freqbinner.freqmasks(harmonic_freqs, 5.0*wc.kilohertz), + }, + + ] + rms_cuts, } diff --git a/cfg/pgrapher/experiment/sbnd/clus.jsonnet b/cfg/pgrapher/experiment/sbnd/clus.jsonnet new file mode 100644 index 000000000..673dae89e --- /dev/null +++ b/cfg/pgrapher/experiment/sbnd/clus.jsonnet @@ -0,0 +1,175 @@ +local wc = import "wirecell.jsonnet"; +local g = import "pgraph.jsonnet"; +local f = import 'pgrapher/common/funcs.jsonnet'; +local params = import "pgrapher/experiment/sbnd/simparams.jsonnet"; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local tools = tools_maker(params); +local anodes = tools.anodes; + +local clus ( + anode, + face = 0, + drift_speed = 1.56 * wc.mm / wc.us, // 1.56 for sbnd + time_offset = -200 * wc.us, + bee_dir = "data", + bee_zip = "mabc.zip", + initial_index = "0", + initial_runNo = "1", + initial_subRunNo = "1", + initial_eventNo = "1") = +{ + local index = std.parseInt(initial_index), + + local LrunNo = std.parseInt(initial_runNo), + local LsubRunNo = std.parseInt(initial_subRunNo), + local LeventNo = std.parseInt(initial_eventNo), + + local drift_sign = if anode.data.ident%2 == 0 then 1 else -1, + // local drift_sign = 1, + + // Note, the "sampler" must be unique to the "sampling". + local bs_live = { + type: "BlobSampler", + name: "%s-%d"%[anode.name, face], + data: { + drift_speed: drift_speed*drift_sign, + time_offset: time_offset, + strategy: [ + // "center", + // "corner", + // "edge", + // "bounds", + "stepped", + // {name:"grid", step:1, planes:[0,1]}, + // {name:"grid", step:1, planes:[1,2]}, + // {name:"grid", step:1, planes:[2,0]}, + // {name:"grid", step:2, planes:[0,1]}, + // {name:"grid", step:2, planes:[1,2]}, + // {name:"grid", step:2, planes:[2,0]}, + ], + // extra: [".*"] // want all the extra + extra: [".*wire_index"] // + // extra: [] // + }}, + local bs_dead = { + type: "BlobSampler", + name: "%s-%d"%[anode.name, face], + data: { + strategy: [ + "center", + ], + extra: [".*"] // want all the extra + }}, + + + local geom_helper = { + type: "SimpleClusGeomHelper", + name: "uboone", + data: { + a0f0: { + face: 0, + pitch_u: 3 * wc.mm, + pitch_v: 3 * wc.mm, + pitch_w: 3 * wc.mm, + angle_u: 1.0472, // 60 degrees + angle_v: -1.0472, // -60 degrees + angle_w: 0, // 0 degrees + drift_speed: drift_speed*1, + tick: 0.5 * wc.us, // 0.5 mm per tick + tick_drift: self.drift_speed * self.tick, + time_offset: time_offset, + nticks_live_slice: 4, + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_ymin: -99.5 * wc.cm, + FV_ymax: 101.5 * wc.cm, + FV_zmin: 15 * wc.cm, + FV_zmax: 1022 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + FV_ymin_margin: 2.5 * wc.cm, + FV_ymax_margin: 2.5 * wc.cm, + FV_zmin_margin: 3 * wc.cm, + FV_zmax_margin: 3 * wc.cm + }, + a1f1: self.a0f0 { + face: 1, + drift_speed: drift_speed*-1, + } + } + }, + + local ptb = g.pnode({ + type: "PointTreeBuilding", + name: "%s-%d"%[anode.name, face], + data: { + samplers: { + "3d": wc.tn(bs_live), + "dead": wc.tn(bs_dead), + }, + multiplicity: 2, + tags: ["live", "dead"], + anode: wc.tn(anode), + face: face, + geom_helper: wc.tn(geom_helper), + } + }, nin=2, nout=1, uses=[bs_live, bs_dead]), + + local mabc = g.pnode({ + type: "MultiAlgBlobClustering", + name: "%s-%d"%[anode.name, face], + data: { + inpath: "pointtrees/%d", + outpath: "pointtrees/%d", + perf: true, + bee_dir: bee_dir, // "data/0/0", // not used + bee_zip: "mabc-%s-face%d.zip"%[anode.name, face], + bee_detector: "sbnd", + initial_index: index, // New RSE configuration + use_config_rse: true, // Enable use of configured RSE + runNo: LrunNo, + subRunNo: LsubRunNo, + eventNo: LeventNo, + save_deadarea: true, + anode: wc.tn(anode), + face: face, + geom_helper: wc.tn(geom_helper), + func_cfgs: [ + {name: "clustering_live_dead", dead_live_overlap_offset: 2}, + {name: "clustering_extend", flag: 4, length_cut: 60 * wc.cm, num_try: 0, length_2_cut: 15 * wc.cm, num_dead_try: 1}, + {name: "clustering_regular", length_cut: 60*wc.cm, flag_enable_extend: false}, + {name: "clustering_regular", length_cut: 30*wc.cm, flag_enable_extend: true}, + {name: "clustering_parallel_prolong", length_cut: 35*wc.cm}, + {name: "clustering_close", length_cut: 1.2*wc.cm}, + {name: "clustering_extend_loop", num_try: 3}, + {name: "clustering_separate", use_ctpc: true}, + {name: "clustering_connect1"}, + {name: "clustering_deghost"}, + {name: "clustering_examine_x_boundary"}, + // {name: "clustering_protect_overclustering"}, + // {name: "clustering_neutrino"}, + {name: "clustering_isolated"}, + ], + } + }, nin=1, nout=1, uses=[geom_helper]), + + local sink = g.pnode({ + type: "TensorFileSink", + name: "%s-%d"%[anode.name, face], + data: { + outname: "clus-%s-face%d.tar.gz"%[anode.name, face], + prefix: "clustering_", // json, numpy, dummy + dump_mode: true, + } + }, nin=1, nout=0), + + clus_pipe(dump=true) :: + if dump then + g.pipeline([ptb, mabc, sink], "clus_pipe-%s-%d"%[anode.name, face]) + else + g.pipeline([ptb, mabc], "clus_pipe-%s-%d"%[anode.name, face]), +}; + +function () { + per_volume(anode, face=0, dump=true) :: clus(anode, face=face).clus_pipe(dump), +} \ No newline at end of file diff --git a/cfg/pgrapher/experiment/sbnd/img.jsonnet b/cfg/pgrapher/experiment/sbnd/img.jsonnet index c674c7674..9f726a263 100644 --- a/cfg/pgrapher/experiment/sbnd/img.jsonnet +++ b/cfg/pgrapher/experiment/sbnd/img.jsonnet @@ -1,134 +1,365 @@ -// some functions to help build pipelines for imaging. These are -// mostly per-apa but tiling portions are per-face. +local wc = import "wirecell.jsonnet"; +local g = import "pgraph.jsonnet"; +local f = import 'pgrapher/common/funcs.jsonnet'; +local params = import "pgrapher/experiment/sbnd/simparams.jsonnet"; //added Ewerton 2023-09-10 +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local tools = tools_maker(params); +local anodes = tools.anodes; -local g = import 'pgraph.jsonnet'; -local wc = import 'wirecell.jsonnet'; -{ +// added Ewerton 2023-08-23 +local nanodes = std.length(tools.anodes); +local anode_iota = std.range(0, nanodes - 1); + +local img = { + // IFrame -> IFrame + pre_proc :: function(anode, aname = "") { + + local waveform_map = { + type: 'WaveformMap', + name: 'wfm', + data: { + //filename: "microboone-charge-error.json.bz2", //commented 2023-10-17 + filename: "sbnd-charge-error.json.bz2", //added 2023-10-17 + }, uses: [],}, + + local charge_err = g.pnode({ + type: 'ChargeErrorFrameEstimator', + name: "cefe-"+aname, + data: { + intag: "gauss%d" % anode.data.ident, + outtag: "gauss_error%d" % anode.data.ident, + anode: wc.tn(anode), + rebin: 4, // this number should be consistent with the waveform_map choice + fudge_factors: [2.31, 2.31, 1.1], // fudge factors for each plane [0,1,2] + time_limits: [12, 800], // the unit of this is in ticks + errors: wc.tn(waveform_map), + }, + }, nin=1, nout=1, uses=[waveform_map, anode]), + + // added Ewerton 2023-08-23 + local chsel_pipes = + g.pnode({ + type: 'ChannelSelector', + //name: 'chsel%d' % n, + name: 'chsel%d' % anode.data.ident, + data: { + channels: std.range(5632 * anode.data.ident, 5632 * (anode.data.ident + 1) - 1), + //tags: ['orig%d' % n], // traces tag //commented? Ewerton 2023-09-xx + tags: ['gauss%d' % anode.data.ident, 'wiener%d' % anode.data.ident], // changed Ewerton 2023-09-27 + }, + }, nin=1, nout=1, uses=[anode]), + + local mag = g.pnode({ + type: 'MagnifySink', + name: 'magimgtest%d' % anode.data.ident, + data: { + output_filename: "magoutput.root", + root_file_mode: 'UPDATE', + frames: ['gauss%d' %anode.data.ident, 'wiener%d' %anode.data.ident], + summaries: ['wiener%d' %anode.data.ident], + summary_operator: { ['wiener%d' % anode.data.ident]: 'set' }, + trace_has_tag: true, + anode: wc.tn(anode), + }, + }, nin=1, nout=1, uses=[anode]), + + local cmm_mod = g.pnode({ + type: 'CMMModifier', + name: "cmm-mod-"+aname, + data: { + cm_tag: "bad", + trace_tag: "gauss%d" % anode.data.ident, + anode: wc.tn(anode), + // start: 0, // start veto ... + // end: 9592, // end of veto + // ncount_cont_ch: 2, + // cont_ch_llimit: [296, 2336+4800 ], // veto if continues bad channels + // cont_ch_hlimit: [671, 2463+4800 ], + // ncount_veto_ch: 1, + // veto_ch_llimit: [3684], // direct veto these channels + // veto_ch_hlimit: [3699], + // dead_ch_ncount: 10, + // dead_ch_charge: 1000, + // ncount_dead_ch: 2, + // dead_ch_llimit: [2160, 2080], // veto according to the charge size for dead channels + // dead_ch_hlimit: [2176, 2096], + ncount_org: 1, // organize the dead channel ranges according to these boundaries + org_llimit: [0], // must be ordered ... + org_hlimit: [3400], // must be ordered ... + }, + }, nin=1, nout=1, uses=[anode]), + + local frame_quality_tagging = g.pnode({ + type: 'FrameQualityTagging', + name: "frame-qual-tag-"+aname, + data: { + trace_tag: "gauss%d" % anode.data.ident, + anode: wc.tn(anode), + nrebin: 4, // rebin count ... + length_cut: 3, + time_cut: 3, + ch_threshold: 100, + n_cover_cut1: 12, + n_fire_cut1: 14, + n_cover_cut2: 6, + n_fire_cut2: 6, + fire_threshold: 0.22, + n_cover_cut3: [1200, 1200, 1800 ], + percent_threshold: [0.25, 0.25, 0.2 ], + threshold1: [300, 300, 360 ], + threshold2: [150, 150, 180 ], + min_time: 3180, + max_time: 7870, + flag_corr: 1, + }, + }, nin=1, nout=1, uses=[anode]), + + local frame_masking = g.pnode({ + type: 'FrameMasking', + name: "frame-masking-"+aname, + data: { + cm_tag: "bad", + trace_tags: ['gauss%d' % anode.data.ident,'wiener%d' % anode.data.ident,], //uncommented Ewerton 2023-09-26 + //trace_tags: ['orig%d' % anode.data.ident,], //commented added Ewerton 2023-09-26 + anode: wc.tn(anode), + }, + }, nin=1, nout=1, uses=[anode]), + //ret: g.pipeline([chsel_pipes, mag, cmm_mod, frame_masking, charge_err], "uboone-preproc"), //changed Ewerton 2023-10-05 + ret: g.pipeline([chsel_pipes, cmm_mod, frame_masking, charge_err], "uboone-preproc"), //changed Ewerton 2023-10-05 + }.ret, + // A functio that sets up slicing for an APA. - slicing :: function(anode, aname, tag="", span=4) { + slicing :: function(anode, aname, span=4, active_planes=[0,1,2], masked_planes=[], dummy_planes=[]) { ret: g.pnode({ - type: "SumSlices", + type: "MaskSlices", name: "slicing-"+aname, data: { - tag: tag, tick_span: span, + wiener_tag: "wiener%d" % anode.data.ident, + summary_tag: "wiener%d" % anode.data.ident, + charge_tag: "gauss%d" % anode.data.ident, + error_tag: "gauss_error%d" % anode.data.ident, anode: wc.tn(anode), + min_tbin: 0, + max_tbin: 3400, //we used 0 previously. changed to 3400 to check Ewerton 2023-10-25 (original=8500 PD?) + active_planes: active_planes, + masked_planes: masked_planes, + dummy_planes: dummy_planes, + //nthreshold: [1e-6, 1e-6, 1e-6], + nthreshold: [3.6, 3.6, 3.6], //original + //nthreshold: [2.5, 2.5, 2.5], //changed Ewerton + // nthreshold: [0, 0, 0], //changed Ewerton }, }, nin=1, nout=1, uses=[anode]), }.ret, // A function sets up tiling for an APA incuding a per-face split. tiling :: function(anode, aname) { - - local slice_fanout = g.pnode({ - type: "SliceFanout", - name: "slicefanout-" + aname, - data: { multiplicity: 2 }, - }, nin=1, nout=2), - - local tilings = [g.pnode({ + local tilings = g.pnode({ + //local tilings = [g.pnode({ type: "GridTiling", - name: "tiling-%s-face%d"%[aname, face], + //name: "tiling-%s-face%d"%[aname, face], //commented Ewerton 2023-09-21 + name: "tiling-%s-face%d"%[aname, anode.data.ident], //added Ewerton 2023-09-21 data: { anode: wc.tn(anode), - face: face, + //face: face, //commented Ewerton 2023-09-21 + face: anode.data.ident, //added Ewerton 2023-09-21 + nudge: 1e-2, //original } - }, nin=1, nout=1, uses=[anode]) for face in [0,1]], - - local blobsync = g.pnode({ - type: "BlobSetSync", - name: "blobsetsync-" + aname, - data: { multiplicity: 2 } - }, nin=2, nout=1), - - ret: g.intern( - innodes=[slice_fanout], - outnodes=[blobsync], - centernodes=tilings, - edges= - [g.edge(slice_fanout, tilings[n], n, 0) for n in [0,1]] + - [g.edge(tilings[n], blobsync, 0, n) for n in [0,1]], - name='tiling-' + aname), + //}, nin=1, nout=1, uses=[anode]) for face in [0,1]], //commented Ewerton 2023-10-06 + }, nin=1, nout=1, uses=[anode]) ,//added Ewerton 2023-10-06 + + //ret: tilings[0], //commented Ewerton 2023-10-06 + ret: tilings, //added Ewerton 2023-10-06 + }.ret, + + // + multi_active_slicing_tiling :: function(anode, name, span=4) { + local active_planes = [[0,1,2],[0,1],[1,2],[0,2],], + local masked_planes = [[],[2],[0],[1]], + local iota = std.range(0,std.length(active_planes)-1), + local slicings = [$.slicing(anode, name+"_%d"%n, span, active_planes[n], masked_planes[n]) + for n in iota], + local tilings = [$.tiling(anode, name+"_%d"%n) + for n in iota], + local multipass = [g.pipeline([slicings[n],tilings[n]]) for n in iota], + ret: f.fanpipe("FrameFanout", multipass, "BlobSetMerge", "multi_active_slicing_tiling-%s"%anode.name), }.ret, + // + multi_masked_2view_slicing_tiling :: function(anode, name, span=500) { + local dummy_planes = [[2],[0],[1]], + local masked_planes = [[0,1],[1,2],[0,2]], + local iota = std.range(0,std.length(dummy_planes)-1), + local slicings = [$.slicing(anode, name+"_%d"%n, span, + active_planes=[],masked_planes=masked_planes[n], dummy_planes=dummy_planes[n]) + for n in iota], + local tilings = [$.tiling(anode, name+"_%d"%n) + for n in iota], + local multipass = [g.pipeline([slicings[n],tilings[n]]) for n in iota], + ret: f.fanpipe("FrameFanout", multipass, "BlobSetMerge", "multi_masked_slicing_tiling-%s"%anode.name), + }.ret, + + local clustering_policy = "uboone", // uboone, simple + // Just clustering clustering :: function(anode, aname, spans=1.0) { ret : g.pnode({ type: "BlobClustering", name: "blobclustering-" + aname, - data: { spans : spans } + data: { spans : spans, policy: clustering_policy } }, nin=1, nout=1), - }.ret, + }.ret, + + // in: IBlobSet out: ICluster + solving :: function(anode, aname) { - // this bundles clustering, grouping and solving. Other patterns - // should be explored. Note, anode isn't really needed, we just - // use it for its ident and to keep similar calling pattern to - // above.. - solving :: function(anode, aname, spans=1.0, threshold=0.0) { local bc = g.pnode({ type: "BlobClustering", name: "blobclustering-" + aname, - data: { spans : spans } - }, nin=1, nout=1), - local bg = g.pnode({ - type: "BlobGrouping", - name: "blobgrouping-" + aname, - data: { - } + data: { policy: "uboone" } }, nin=1, nout=1), - local bs = g.pnode({ - type: "BlobSolving", - name: "blobsolving-" + aname, - data: { threshold: threshold } + + local gc = g.pnode({ + type: "GlobalGeomClustering", + name: "global-clustering-" + aname, + data: { policy: "uboone" } }, nin=1, nout=1), - ret: g.intern( - innodes=[bc], outnodes=[bs], centernodes=[bg], - edges=[g.edge(bc,bg), g.edge(bg,bs)], - name="solving-" + aname), + + solving :: function(suffix = "1st") { + local bg = g.pnode({ + type: "BlobGrouping", + name: "blobgrouping-" + aname + suffix, + data: { + } + }, nin=1, nout=1), + local cs1 = g.pnode({ + type: "ChargeSolving", + name: "cs1-" + aname + suffix, + data: { + weighting_strategies: ["uniform"], //"uniform", "simple", "uboone" + solve_config: "uboone", + whiten: true, + } + }, nin=1, nout=1), + local cs2 = g.pnode({ + type: "ChargeSolving", + name: "cs2-" + aname + suffix, + data: { + weighting_strategies: ["uboone"], //"uniform", "simple", "uboone" + solve_config: "uboone", + whiten: true, + } + }, nin=1, nout=1), + local local_clustering = g.pnode({ + type: "LocalGeomClustering", + name: "local-clustering-" + aname + suffix, + data: { + dryrun: false, + } + }, nin=1, nout=1), + // ret: g.pipeline([bg, cs1],"cs-pipe"+aname+suffix), + ret: g.pipeline([bg, cs1, local_clustering, cs2],"cs-pipe"+aname+suffix), + }.ret, + + global_deghosting :: function(suffix = "1st") { + ret: g.pnode({ + type: "ProjectionDeghosting", + name: "ProjectionDeghosting-" + aname + suffix, + data: { + dryrun: false, + } + }, nin=1, nout=1), + }.ret, + + local_deghosting :: function(config_round = 1, suffix = "1st", good_blob_charge_th=300) { + ret: g.pnode({ + type: "InSliceDeghosting", + name: "inslice_deghosting-" + aname + suffix, + data: { + dryrun: false, + config_round: config_round, + good_blob_charge_th: good_blob_charge_th, + } + }, nin=1, nout=1), + }.ret, + + local gd1 = self.global_deghosting("1st"), + local cs1 = self.solving("1st"), + local ld1 = self.local_deghosting(1,"1st"), + + local gd2 = self.global_deghosting("2nd"), + local cs2 = self.solving("2nd"), + local ld2 = self.local_deghosting(2,"2nd"), + + local cs3 = self.solving("3rd"), + local ld3 = self.local_deghosting(3,"3rd"), + + // ret: g.pipeline([bc, gd1, cs1, ld1, gd2, cs2, ld2, cs3, ld3, gc],"uboone-solving"), + ret: g.pipeline([bc, cs1, ld1, gc],"simple-solving"), }.ret, - dump :: function(anode, aname) // no longer accepts drift speed - g.pnode({ + dump :: function(anode, aname, drift_speed) { + local cs = g.pnode({ type: "ClusterFileSink", - name: "clustertap-" + aname, - data: { - filename: "clusters-"+aname+"-%04d.json", - }, - }, nin=1, nout=0), - - // A function that reverts blobs to frames - reframing :: function(anode, aname) { - ret : g.pnode({ - type: "BlobReframer", - name: "blobreframing-" + aname, + name: "clustersink-"+aname, data: { - frame_tag: "reframe%d" %anode.data.ident, + outname: "clusters-apa-"+aname+".tar.gz", + format: "json", // json, numpy, dummy } - }, nin=1, nout=1), + }, nin=1, nout=0), + ret: cs }.ret, +}; - // fill ROOT histograms with frames - magnify :: function(anode, aname, frame_tag="orig") { - ret: g.pnode({ - type: 'MagnifySink', - name: 'magnify-'+aname, - data: { - output_filename: "magnify-img.root", - root_file_mode: 'UPDATE', - frames: [frame_tag + anode.data.ident], - trace_has_tag: true, - anode: wc.tn(anode), - }, - }, nin=1, nout=1), +function() { + local imgpipe (anode, multi_slicing, add_dump = true) = + if multi_slicing == "single" + then g.pipeline([ + // img.slicing(anode, anode.name, 109, active_planes=[0,1,2], masked_planes=[],dummy_planes=[]), // 109*22*4 + // img.slicing(anode, anode.name, 1916, active_planes=[], masked_planes=[0,1],dummy_planes=[2]), // 109*22*4 + img.slicing(anode, anode.name, 4, active_planes=[0,1,2], masked_planes=[],dummy_planes=[]), // 109*22*4 + img.tiling(anode, anode.name), + img.solving(anode, anode.name), + // img.clustering(anode, anode.name), + ] + if add_dump then [ + img.dump(anode, anode.name, params.lar.drift_speed),] else []) + else if multi_slicing == "active" + then g.pipeline([ + img.multi_active_slicing_tiling(anode, anode.name+"-ms-active", 4), + img.solving(anode, anode.name+"-ms-active"), + // img.clustering(anode, anode.name+"-ms-active"), + ] + if add_dump then [ + img.dump(anode, anode.name+"-ms-active", params.lar.drift_speed),] else []) + else if multi_slicing == "masked" + then g.pipeline([ + img.multi_masked_2view_slicing_tiling(anode, anode.name+"-ms-masked", 500), + img.clustering(anode, anode.name+"-ms-masked"), + ] + if add_dump then [ + img.dump(anode, anode.name+"-ms-masked", params.lar.drift_speed),] else []) + else { + local st = if multi_slicing == "multi-2view" + then img.multi_active_slicing_tiling(anode, anode.name+"-ms-active", 4) + else g.pipeline([ + img.slicing(anode, anode.name, 4, active_planes=[0,1,2], masked_planes=[],dummy_planes=[]), // 109*22*4 + img.tiling(anode, anode.name),]), + local active_fork = g.pipeline([ + st, + img.solving(anode, anode.name+"-ms-active"), + ] + if add_dump then [ + img.dump(anode, anode.name+"-ms-active", params.lar.drift_speed),] else []), + local masked_fork = g.pipeline([ + img.multi_masked_2view_slicing_tiling(anode, anode.name+"-ms-masked", 500), // 109, 1744 (total 9592) + img.clustering(anode, anode.name+"-ms-masked"), + ] + if add_dump then [ + img.dump(anode, anode.name+"-ms-masked", params.lar.drift_speed),] else []), + ret: g.fan.fanout("FrameFanout",[active_fork,masked_fork], "fan_active_masked-%s"%anode.name), }.ret, - // the end - dumpframes :: function(anode, aname) { - ret: g.pnode({ - type: "DumpFrames", - name: "dumpframes-"+aname, - }, nin=1, nout=0), - }.ret, + per_anode(anode, multi_slicing = "single", add_dump = true) :: g.pipeline([ + img.pre_proc(anode, anode.name), + imgpipe(anode, multi_slicing, add_dump), + ], "per_anode"), } diff --git a/cfg/pgrapher/experiment/sbnd/magnify-sinks.jsonnet b/cfg/pgrapher/experiment/sbnd/magnify-sinks.jsonnet index f041e5269..d0b1f0b1d 100644 --- a/cfg/pgrapher/experiment/sbnd/magnify-sinks.jsonnet +++ b/cfg/pgrapher/experiment/sbnd/magnify-sinks.jsonnet @@ -83,8 +83,8 @@ function(tools, outputfile) { data: { output_filename: outputfile, root_file_mode: 'UPDATE', - summaries: ['wiener%d' % n], // note that if tag set, each apa should have a tag set for FrameFanin - summary_operator: { ['wiener%d' % n]: 'set' }, // []: obj comprehension + summaries: ['wiener%d' % n], #commented Ewerton 2024-03-04 ['threshold%d' % n], // note that if tag set, each apa should have a tag set for FrameFanin + summary_operator: { ['threshold%d' % n]: 'set' }, // []: obj comprehension anode: wc.tn(tools.anodes[n]), }, }, nin=1, nout=1) diff --git a/cfg/pgrapher/experiment/sbnd/nf.jsonnet b/cfg/pgrapher/experiment/sbnd/nf.jsonnet index af61fc6b7..37e6f5bf3 100644 --- a/cfg/pgrapher/experiment/sbnd/nf.jsonnet +++ b/cfg/pgrapher/experiment/sbnd/nf.jsonnet @@ -16,11 +16,7 @@ function(params, anode, chndbobj, n, name='', dft=default_dft) noisedb: wc.tn(chndbobj), anode: wc.tn(anode), dft: wc.tn(dft), - resmp: [ - {channels: std.range(2128, 2175), sample_from: 5996}, - {channels: std.range(1520, 1559), sample_from: 5996}, - {channels: std.range( 440, 479), sample_from: 5996}, - ], + resmp: [], }, }, local grouped = { @@ -39,15 +35,7 @@ function(params, anode, chndbobj, n, name='', dft=default_dft) name: name, uses: [dft, chndbobj, anode], data: { - extra_stky: [ - {channels: std.range(n * 2560, (n + 1) * 2560 - 1), bits: [0,1,63]}, - {channels: [4], bits: [6] }, - {channels: [159], bits: [6] }, - {channels: [164], bits: [36] }, - {channels: [168], bits: [7] }, - {channels: [323], bits: [24] }, - {channels: [451], bits: [25] }, - ], + extra_stky: [], noisedb: wc.tn(chndbobj), anode: wc.tn(anode), dft: wc.tn(dft), @@ -83,7 +71,7 @@ function(params, anode, chndbobj, n, name='', dft=default_dft) // wc.tn(gaincalib), ], grouped_filters: [ - // wc.tn(grouped), + wc.tn(grouped), ], channel_status_filters: [ ], @@ -92,7 +80,7 @@ function(params, anode, chndbobj, n, name='', dft=default_dft) outtraces: 'raw%d' % n, }, }, uses=[chndbobj, anode, sticky, single, grouped, gaincalib], nin=1, nout=1), - + //}, uses=[chndbobj, anode, sticky, single, gaincalib], nin=1, nout=1), pipe: g.pipeline([obnf], name=name), }.pipe diff --git a/cfg/pgrapher/experiment/sbnd/simparams.jsonnet b/cfg/pgrapher/experiment/sbnd/simparams.jsonnet index 46b9f0b62..8ad00ebbd 100644 --- a/cfg/pgrapher/experiment/sbnd/simparams.jsonnet +++ b/cfg/pgrapher/experiment/sbnd/simparams.jsonnet @@ -12,8 +12,8 @@ base { DT : 8.8 * wc.cm2/wc.s, // Electron lifetime lifetime : 10*wc.ms, - // Electron drift speed, assumes a certain applied E-field - drift_speed : 1.59*wc.mm/wc.us, + // Electron drift speed, assumes 0.5 kV/cm and 88.4 K consistent with 1D sim + drift_speed : 1.563*wc.mm/wc.us, }, diff --git a/cfg/pgrapher/experiment/sbnd/sp-filters.jsonnet b/cfg/pgrapher/experiment/sbnd/sp-filters.jsonnet index f40bb242b..938dd7b1d 100644 --- a/cfg/pgrapher/experiment/sbnd/sp-filters.jsonnet +++ b/cfg/pgrapher/experiment/sbnd/sp-filters.jsonnet @@ -72,29 +72,25 @@ local wf(name, data={}) = { // wf('Wire_col', { sigma: 1.0 / wc.sqrtpi * 3.0 }), // ] -/** - * Optimized SP parameters (May 2019) - * Associated tuning in sp.jsonnet - */ [ - lf('ROI_tight_lf', { tau: 0.014 * wc.megahertz }), // 0.02 - lf('ROI_tighter_lf', { tau: 0.06 * wc.megahertz }), // 0.1 - lf('ROI_loose_lf', { tau: 0.002 * wc.megahertz }), // 0.0025 + lf('ROI_tight_lf', { tau: 0.0185 * wc.megahertz }), // 0.02 + lf('ROI_tighter_lf', { tau: 0.145 * wc.megahertz }), // 0.1 + lf('ROI_loose_lf', { tau: 0.00175 * wc.megahertz }), // 0.0025 hf('Gaus_tight'), hf('Gaus_wide', { sigma: 0.10 * wc.megahertz }), hf('Wiener_tight_U', { - sigma: 0.148788 * wc.megahertz, - power: 3.76194, + sigma: 0.15 * wc.megahertz, + power: 5.5, }), hf("Wiener_tight_V", { - sigma: 0.1596568 * wc.megahertz, - power: 4.36125 }), + sigma: 0.15 * wc.megahertz, + power: 5.0 }), hf('Wiener_tight_W', { - sigma: 0.13623 * wc.megahertz, - power: 3.35324, + sigma: 0.25 * wc.megahertz, + power: 3.0, }), hf('Wiener_wide_U', { diff --git a/cfg/pgrapher/experiment/sbnd/sp.jsonnet b/cfg/pgrapher/experiment/sbnd/sp.jsonnet index 0d56a167d..7402426f3 100644 --- a/cfg/pgrapher/experiment/sbnd/sp.jsonnet +++ b/cfg/pgrapher/experiment/sbnd/sp.jsonnet @@ -51,7 +51,7 @@ function(params, tools, override = {}) { field_response: wc.tn(tools.field), elecresponse: wc.tn(tools.elec_resp), ftoffset: 0.0, // default 0.0 - ctoffset: 1.0*wc.microsecond, // default -8.0 + ctoffset: 2.0*wc.microsecond, // default -8.0 per_chan_resp: pc.name, fft_flag: 0, // 1 is faster but higher memory, 0 is slightly slower but lower memory postgain: 1.0, // default 1.2 @@ -64,8 +64,8 @@ function(params, tools, override = {}) { lroi_jump_one_bin: 1, // default 0 r_th_factor: 3.0, // default 3 - r_fake_signal_low_th: 375, // default 500 - r_fake_signal_high_th: 750, // default 1000 + r_fake_signal_low_th: 400, // default 500 + r_fake_signal_high_th: 800, // default 1000 r_fake_signal_low_th_ind_factor: 1.0, // default 1 r_fake_signal_high_th_ind_factor: 1.0, // default 1 r_th_peak: 3.0, // default 3.0 @@ -75,7 +75,9 @@ function(params, tools, override = {}) { // frame tags wiener_tag: 'wiener%d' % anode.data.ident, + wiener_threshold_tag: 'threshold%d' % anode.data.ident, decon_charge_tag: 'decon_charge%d' % anode.data.ident, + //gauss_tag: '', // <- commented Ewerton: empty that won't use wiener tag!! need fix? (already talked to Haiwang on Mar 4, 2024) gauss_tag: 'gauss%d' % anode.data.ident, use_roi_debug_mode: false, diff --git a/cfg/pgrapher/experiment/sbnd/wcls-img-clus.jsonnet b/cfg/pgrapher/experiment/sbnd/wcls-img-clus.jsonnet new file mode 100644 index 000000000..512afbcd5 --- /dev/null +++ b/cfg/pgrapher/experiment/sbnd/wcls-img-clus.jsonnet @@ -0,0 +1,105 @@ +local g = import "pgraph.jsonnet"; +local f = import "pgrapher/common/funcs.jsonnet"; +local wc = import "wirecell.jsonnet"; + +local tools_maker = import 'pgrapher/common/tools.jsonnet'; + +// added Ewerton 2023-09-06 +local reality = std.extVar('reality'); +local params_maker = +if reality == 'data' then import 'params.jsonnet' +else import 'simparams.jsonnet'; + +local base = import 'pgrapher/experiment/sbnd/simparams.jsonnet'; +local params = base { + lar: super.lar { // <- super.lar overrides default values + // Longitudinal diffusion constant + DL: std.extVar('DL') * wc.cm2 / wc.s, + // Transverse diffusion constant + DT: std.extVar('DT') * wc.cm2 / wc.s, + // Electron lifetime + lifetime: std.extVar('lifetime') * wc.ms, + // Electron drift speed, assumes a certain applied E-field + drift_speed: std.extVar('driftSpeed') * wc.mm / wc.us, + }, +}; + + +local tools_all = tools_maker(params); +local tools = tools_all {anodes: [tools_all.anodes[n] for n in [0,1]]}; //added Ewerton 2023-09-08 + +// must match name used in fcl +local wcls_input = g.pnode({ + type: 'wclsCookedFrameSource', //added wcls Ewerton 2023-07-27 + name: 'sigs', + data: { + nticks: params.daq.nticks, + frame_scale: 50, // scale up input recob::Wire by this factor + summary_scale: 50, // scale up input summary by this factor + frame_tags: ["orig"], // frame tags (only one frame in this module) + recobwire_tags: std.extVar('recobwire_tags'), // ["sptpc2d:gauss", "sptpc2d:wiener"], + trace_tags: std.extVar('trace_tags'), // ["gauss", "wiener"], + summary_tags: std.extVar('summary_tags'), // ["", "sptpc2d:wienersummary"], + input_mask_tags: std.extVar('input_mask_tags'), // ["sptpc2d:badmasks"], + output_mask_tags: std.extVar('output_mask_tags'), // ["bad"], + }, +}, nin=0, nout=1); + +local img = import 'pgrapher/experiment/sbnd/img.jsonnet'; +local img_maker = img(); +local img_pipes = [img_maker.per_anode(a, "multi-3view", add_dump = false) for a in tools.anodes]; + +local clus = import 'pgrapher/experiment/sbnd/clus.jsonnet'; +local clus_maker = clus(); +local clus_pipes = [clus_maker.per_volume(tools.anodes[0], face=0, dump=true), clus_maker.per_volume(tools.anodes[1], face=1, dump=true)]; + +local img_clus_pipe = [g.intern( + innodes = [img_pipes[n]], + centernodes = [], + outnodes = [clus_pipes[n]], + edges = [ + g.edge(img_pipes[n], clus_pipes[n], p, p) + for p in std.range(0, 1) + ] +) +for n in std.range(0, std.length(tools.anodes) - 1)]; + +local fanout_apa_rules = +[ + { + frame: { + //'.*': 'number%d' % n, + //'.*': 'gauss%d' % n, + //'.*': 'framefanout%d ' % n, + '.*': 'orig%d' % n, + }, + trace: { + // fake doing Nmult SP pipelines + //orig: ['wiener', 'gauss'], + gauss: 'gauss%d' % n, //uncommented Ewerton 2023-09-27 + wiener: 'wiener%d' % n, //created Ewerton 2023-09-27 + //'.*': 'orig', + }, + } + for n in std.range(0, std.length(tools.anodes) - 1) +]; +local parallel_graph = f.fanout("FrameFanout", img_clus_pipe, "parallel_graph", fanout_apa_rules); + +local graph = g.pipeline([wcls_input, parallel_graph], "main"); // added Ewerton 2023-09-08 + +local app = { + type: 'Pgrapher', //Pgrapher, TbbFlow + data: { + edges: g.edges(graph), + }, +}; + +local cmdline = { + type: "wire-cell", + data: { + plugins: ["WireCellGen", "WireCellPgraph", "WireCellSio", "WireCellSigProc", "WireCellRoot", "WireCellTbb", "WireCellImg"], + apps: ["Pgrapher"] //TbbFlow + } +}; + +[cmdline] + g.uses(graph) + [app] diff --git a/cfg/pgrapher/experiment/sbnd/wcls-nf-sp.jsonnet b/cfg/pgrapher/experiment/sbnd/wcls-nf-sp.jsonnet index 161f7cc42..35e28312a 100644 --- a/cfg/pgrapher/experiment/sbnd/wcls-nf-sp.jsonnet +++ b/cfg/pgrapher/experiment/sbnd/wcls-nf-sp.jsonnet @@ -42,14 +42,8 @@ local tools = tools_maker(params); local wcls_maker = import 'pgrapher/ui/wcls/nodes.jsonnet'; local wcls = wcls_maker(params, tools); -//local nf_maker = import "pgrapher/experiment/pdsp/nf.jsonnet"; -//local chndb_maker = import "pgrapher/experiment/pdsp/chndb.jsonnet"; - local sp_maker = import 'pgrapher/experiment/sbnd/sp.jsonnet'; -//local chndbm = chndb_maker(params, tools); -//local chndb = if epoch == "dynamic" then chndbm.wcls_multi(name="") else chndbm.wct(epoch); - // Collect the WC/LS input converters for use below. Make sure the // "name" argument matches what is used in the FHiCL that loads this @@ -111,26 +105,29 @@ local wcls_output = { frame_tags: ['gauss', 'wiener'], // this may be needed to convert the decon charge [units:e-] to be consistent with the LArSoft default ?unit? e.g. decon charge * 0.005 --> "charge value" to GaussHitFinder - frame_scale: [1.0, 1.0], - // nticks: params.daq.nticks, + frame_scale: [0.02, 0.02], + nticks: params.daq.nticks, chanmaskmaps: [], - nticks: -1, + //nticks: , }, }, nin=1, nout=1, uses=[mega_anode]), }; -// local perfect = import 'chndb-perfect.jsonnet'; -local base = import 'chndb-base.jsonnet'; + +local perfect = import 'pgrapher/experiment/sbnd/chndb-perfect.jsonnet'; +//local base = import 'chndb-base_sbnd.jsonnet'; + local chndb = [{ type: 'OmniChannelNoiseDB', name: 'ocndbperfect%d' % n, - // data: perfect(params, tools.anodes[n], tools.field, n), - data: base(params, tools.anodes[n], tools.field, n){dft:wc.tn(tools.dft)}, + data: perfect(params, tools.anodes[n], tools.field, n){dft:wc.tn(tools.dft)}, + // data: base(params, tools.anodes[n], tools.field, n){dft:wc.tn(tools.dft)}, uses: [tools.anodes[n], tools.field, tools.dft], } for n in std.range(0, std.length(tools.anodes) - 1)]; -// local nf_maker = import 'pgrapher/experiment/pdsp/nf.jsonnet'; -// local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in std.range(0, std.length(tools.anodes) - 1)]; + +local nf_maker = import 'pgrapher/experiment/sbnd/nf.jsonnet'; +local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in std.range(0, std.length(tools.anodes) - 1)]; local sp = sp_maker(params, tools, { sparse: sigoutform == 'sparse' }); local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; @@ -140,38 +137,35 @@ local chsel_pipes = [ type: 'ChannelSelector', name: 'chsel%d' % n, data: { - channels: std.range(5638 * n, 5638 * (n + 1) - 1), + channels: std.range(5632 * n, 5632 * (n + 1) - 1), //tags: ['orig%d' % n], // traces tag }, }, nin=1, nout=1) for n in std.range(0, std.length(tools.anodes) - 1) ]; -local magoutput = 'protodune-data-check.root'; +local magoutput = 'sbnd-data-check.root'; local magnify = import 'pgrapher/experiment/sbnd/magnify-sinks.jsonnet'; local sinks = magnify(tools, magoutput); local nfsp_pipes = [ g.pipeline([ chsel_pipes[n], - // sinks.orig_pipe[n], + //sinks.orig_pipe[n], - // nf_pipes[n], - // sinks.raw_pipe[n], + nf_pipes[n], + //sinks.raw_pipe[n], sp_pipes[n], - // sinks.decon_pipe[n], - // sinks.threshold_pipe[n], + //sinks.decon_pipe[n], + //sinks.threshold_pipe[n], // sinks.debug_pipe[n], // use_roi_debug_mode=true in sp.jsonnet ], 'nfsp_pipe_%d' % n) for n in std.range(0, std.length(tools.anodes) - 1) ]; -//local f = import 'pgrapher/common/funcs.jsonnet'; local f = import 'pgrapher/experiment/sbnd/funcs.jsonnet'; -//local outtags = ['gauss%d' % n for n in std.range(0, std.length(tools.anodes) - 1)]; -//local fanpipe = f.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf', outtags); local fanpipe = f.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf'); local retagger = g.pnode({ @@ -199,7 +193,7 @@ local sink = g.pnode({ type: 'DumpFrames' }, nin=1, nout=0); local graph = g.pipeline([wcls_input.adc_digits, fanpipe, retagger, wcls_output.sp_signals, sink]); local app = { - type: 'TbbFlow:', + type: 'TbbFlow', data: { edges: g.edges(graph), }, diff --git a/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-depoflux-nf-sp.jsonnet b/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-depoflux-nf-sp.jsonnet new file mode 100644 index 000000000..9589e9490 --- /dev/null +++ b/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-depoflux-nf-sp.jsonnet @@ -0,0 +1,341 @@ +// This is a main entry point for configuring a wire-cell CLI job to +// simulate SBND. It is simplest signal-only simulation with +// one set of nominal field response function. + +local epoch = std.extVar('epoch'); // eg "dynamic", "after", "before", "perfect" +local reality = std.extVar('reality'); +local sigoutform = std.extVar('signal_output_form'); // eg "sparse" or "dense" +local savetid = std.extVar("save_track_id"); + +local g = import 'pgraph.jsonnet'; +local f = import 'pgrapher/common/funcs.jsonnet'; +local wc = import 'wirecell.jsonnet'; +local io = import 'pgrapher/common/fileio.jsonnet'; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; + +local data_params = import 'params.jsonnet'; +local simu_params = import 'simparams.jsonnet'; +local params = if reality == 'data' then data_params else simu_params; + +local base = import 'pgrapher/experiment/sbnd/simparams.jsonnet'; +local params = base { + lar: super.lar { // <- super.lar overrides default values + // Longitudinal diffusion constant + DL: std.extVar('DL') * wc.cm2 / wc.s, + // Transverse diffusion constant + DT: std.extVar('DT') * wc.cm2 / wc.s, + // Electron lifetime + lifetime: std.extVar('lifetime') * wc.ms, + // Electron drift speed, assumes a certain applied E-field + drift_speed: std.extVar('driftSpeed') * wc.mm / wc.us, + }, +}; + +local tools = tools_maker(params); +local sim_maker = import 'pgrapher/experiment/sbnd/sim.jsonnet'; +local sim = sim_maker(params, tools); +local nanodes = std.length(tools.anodes); +local anode_iota = std.range(0, nanodes - 1); + +local wcls_maker = import "pgrapher/ui/wcls/nodes.jsonnet"; +local wcls = wcls_maker(params, tools); + +// added Ewerton 2023-03-14 +local wcls_input_sim = { + depos: wcls.input.depos(name="", art_tag=std.extVar('inputTag')), + deposet: g.pnode({ + type: 'wclsSimDepoSetSource', + name: "", + data: { + model: "", + scale: -1, //scale is -1 to correct a sign error in the SimDepoSource converter. + art_tag: std.extVar('inputTag'), //name of upstream art producer of depos "label:instance:processName" + id_is_track: if (savetid == 'true') then false else true, + assn_art_tag: "", + }, + }, nin=0, nout=1), +}; +// Collect all the wc/ls output converters for use below. Note the +// "name" MUST match what is used in theh "outputers" parameter in the +// FHiCL that loads this file. +local mega_anode = { + type: 'MegaAnodePlane', + name: 'meganodes', + data: { + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + }, +}; +local wcls_output_sim = { + // ADC output from simulation + // sim_digits: wcls.output.digits(name="simdigits", tags=["orig"]), + sim_digits: g.pnode({ + type: 'wclsFrameSaver', + name: 'simdigits', + data: { + anode: wc.tn(mega_anode), + digitize: true, // true means save as RawDigit, else recob::Wire + frame_tags: ['daq'], + nticks: params.daq.nticks, + pedestal_mean: 'native', + }, + }, nin=1, nout=1, uses=[mega_anode]), + // The noise filtered "ADC" values. These are truncated for + // art::Event but left as floats for the WCT SP. Note, the tag + // "raw" is somewhat historical as the output is not equivalent to + // "raw data". + nf_digits: wcls.output.digits(name="nfdigits", tags=["raw"]), + // The output of signal processing. Note, there are two signal + // sets each created with its own filter. The "gauss" one is best + // for charge reconstruction, the "wiener" is best for S/N + // separation. Both are used in downstream WC code. + sp_signals: wcls.output.signals(name="spsignals", tags=["gauss", "wiener"]), + // save "threshold" from normal decon for each channel noise + // used in imaging + sp_thresholds: wcls.output.thresholds(name="spthresholds", tags=["wiener"]), +}; + + +local drifter = sim.drifter; + +local setdrifter = g.pnode({ + type: 'DepoSetDrifter', + data: { + drifter: "Drifter" + } + }, nin=1, nout=1, + uses=[drifter]); + +// signal plus noise pipelines +local sn_pipes = sim.splusn_pipelines; + +local rng = tools.random; +local wcls_depoflux_writer = g.pnode({ + type: 'wclsDepoFluxWriter', + name: 'postdrift', + data: { + anodes: [wc.tn(anode) for anode in tools.anodes], + field_response: wc.tn(tools.field), + tick: 0.5 * wc.us, + window_start: 0.0 * wc.ms, + window_duration: self.tick * 3400, + nsigma: 3.0, + + reference_time: -1700 * wc.us, + + energy: 1, # equivalent to use_energy = true + simchan_label: 'simpleSC', + sed_label: if (savetid == 'true') then 'ionandscint' else '', //added Ewerton 2023-10-13 + sparse: false, + }, +}, nin=1, nout=1, uses=tools.anodes + [tools.field]); + +local sp_maker = import 'pgrapher/experiment/sbnd/sp.jsonnet'; +local sp = sp_maker(params, tools, { sparse: sigoutform == 'sparse' }); +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; + +local magoutput = 'sbnd-data-check.root'; +local magnify = import 'pgrapher/experiment/sbnd/magnify-sinks.jsonnet'; +local sinks = magnify(tools, magoutput); + +local perfect = import 'pgrapher/experiment/sbnd/chndb-perfect.jsonnet'; + +local chndb = [{ + type: 'OmniChannelNoiseDB', + name: 'ocndbperfect%d' % n, + data: perfect(params, tools.anodes[n], tools.field, n){dft:wc.tn(tools.dft)}, + // data: base(params, tools.anodes[n], tools.field, n){dft:wc.tn(tools.dft)}, + uses: [tools.anodes[n], tools.field, tools.dft], +} for n in anode_iota]; + +local nf_maker = import 'pgrapher/experiment/sbnd/nf.jsonnet'; +local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in anode_iota]; + +local multipass1 = [ + g.pipeline([ + sn_pipes[n], + ], + 'multipass%d' % n) + for n in anode_iota +]; + +local multipass2 = [ + g.pipeline([ + sn_pipes[n], + //sinks.orig_pipe[n], + + nf_pipes[n], + sinks.raw_pipe[n], + + sp_pipes[n], + + sinks.decon_pipe[n], + sinks.threshold_pipe[n], + ], + 'multipass%d' % n) + for n in anode_iota +]; + + +local f_sp = import 'pgrapher/experiment/sbnd/funcs.jsonnet'; + +local outtags = ['orig%d' % n for n in anode_iota]; +local bi_manifold1 = f.fanpipe('DepoSetFanout', multipass1, 'FrameFanin', 'sn_mag_nf', outtags); +local bi_manifold2 = f_sp.fanpipe('DepoSetFanout', multipass2, 'FrameFanin', 'sn_mag_nf_mod2', outtags, "true"); + +local retagger_sim = g.pnode({ + type: 'Retagger', + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'orig', + }, + merge: { + 'orig\\d': 'daq', + }, + }], + }, +}, nin=1, nout=1); + +local sink_sim = sim.frame_sink; + +//===============================NF+SP============================================ + +// Collect all the wc/ls output converters for use below. Note the +// "name" MUST match what is used in theh "outputers" parameter in the +// FHiCL that loads this file. + +local wcls_output_sp = { + // The noise filtered "ADC" values. These are truncated for + // art::Event but left as floats for the WCT SP. Note, the tag + // "raw" is somewhat historical as the output is not equivalent to + // "raw data". + nf_digits: g.pnode({ + type: 'wclsFrameSaver', + name: 'nfsaver', + data: { + anode: wc.tn(mega_anode), + digitize: true, // true means save as RawDigit, else recob::Wire + frame_tags: ['raw'], + }, + }, nin=1, nout=1, uses=[mega_anode]), + + + // The output of signal processing. Note, there are two signal + // sets each created with its own filter. The "gauss" one is best + // for charge reconstruction, the "wiener" is best for S/N + // separation. Both are used in downstream WC code. + sp_signals: g.pnode({ + type: 'wclsFrameSaver', + name: 'spsaver', + data: { + anode: wc.tn(mega_anode), + digitize: false, // true means save as RawDigit, else recob::Wire + frame_tags: ['gauss', 'wiener'], + + // this may be needed to convert the decon charge [units:e-] to be consistent with the LArSoft default + // for SBND, this scale is about ~50. Frame scale needed when using LArSoft producers reading in recob::Wire. + frame_scale: [0.02, 0.02], + nticks: params.daq.nticks, + summary_tags: ['wiener'], # commented Ewerton 2024-03-04 -> ['wiener'], + summary_operator: {threshold: 'set'}, + chanmaskmaps: ['bad'], + }, + }, nin=1, nout=1, uses=[mega_anode]), + +}; + + +local chsel_pipes = [ + g.pnode({ + type: 'ChannelSelector', + name: 'chsel%d' % n, + data: { + channels: std.range(5632 * n, 5632 * (n + 1) - 1), + }, + }, nin=1, nout=1) + for n in anode_iota +]; + + +local nfsp_pipes = [ + g.pipeline([ + chsel_pipes[n], + //sinks.orig_pipe[n], + + nf_pipes[n], // NEED to include this pipe for channelmaskmaps + //sinks.raw_pipe[n], + + sp_pipes[n], + //sinks.decon_pipe[n], + sinks.threshold_pipe[n], + // sinks.debug_pipe[n], // use_roi_debug_mode=true in sp.jsonnet + ], + 'nfsp_pipe_%d' % n) + for n in anode_iota +]; + +//local fanpipe = f_sp.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf'); // commented Ewerton 2023-05-24 +local fanpipe = f_sp.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf_mod'); //added Ewerton 2023-05-24 + +local retagger_sp = g.pnode({ + type: 'Retagger', + name: 'sp', //added Ewerton 2023-05-24 + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'retagger', + }, + merge: { + 'gauss\\d': 'gauss', + 'wiener\\d': 'wiener', + }, + }], + }, +}, nin=1, nout=1); + +local sink_sp = g.pnode({ type: 'DumpFrames' }, nin=1, nout=0); + +local graph1 = g.pipeline([ +wcls_input_sim.deposet, //sim +setdrifter, //sim +wcls_depoflux_writer, //sim +bi_manifold1, //sim +retagger_sim, //sim +wcls_output_sim.sim_digits, //sim +fanpipe, //sp +retagger_sp, //sp +wcls_output_sp.sp_signals, //sp +sink_sp //sp +]); + + +local graph2 = g.pipeline([ +wcls_input_sim.deposet, //sim +setdrifter, //sim +wcls_depoflux_writer, //sim +bi_manifold2, //sim +retagger_sp, //sp +wcls_output_sp.sp_signals, //sp +sink_sp //sp +]); + +local save_simdigits = std.extVar('save_simdigits'); + +local graph = if save_simdigits == "true" then graph1 else graph2; + +local app = { + type: 'TbbFlow', + data: { + edges: g.edges(graph), + }, +}; + +// Finally, the configuration sequence +g.uses(graph) + [app] + diff --git a/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-simchannel-deposetsimchannelsink.jsonnet b/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-simchannel-deposetsimchannelsink.jsonnet index ba9e24f51..f109cbabf 100644 --- a/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-simchannel-deposetsimchannelsink.jsonnet +++ b/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-simchannel-deposetsimchannelsink.jsonnet @@ -80,6 +80,9 @@ local wcls_output = { // for charge reconstruction, the "wiener" is best for S/N // separation. Both are used in downstream WC code. sp_signals: wcls.output.signals(name="spsignals", tags=["gauss", "wiener"]), + // save "threshold" from normal decon for each channel noise + // used in imaging + sp_thresholds: wcls.output.thresholds(name="spthresholds", tags=["threshold"]), }; //local deposio = io.numpy.depos(output); local drifter = sim.drifter; diff --git a/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-simchannel-nf-sp.jsonnet b/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-simchannel-nf-sp.jsonnet new file mode 100644 index 000000000..1cbe90272 --- /dev/null +++ b/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-simchannel-nf-sp.jsonnet @@ -0,0 +1,349 @@ +// This is a main entry point for configuring a wire-cell CLI job to +// simulate SBND. It is simplest signal-only simulation with +// one set of nominal field response function. + +local epoch = std.extVar('epoch'); // eg "dynamic", "after", "before", "perfect" +local reality = std.extVar('reality'); +local sigoutform = std.extVar('signal_output_form'); // eg "sparse" or "dense" + +local g = import 'pgraph.jsonnet'; +local f = import 'pgrapher/common/funcs.jsonnet'; +local wc = import 'wirecell.jsonnet'; +local io = import 'pgrapher/common/fileio.jsonnet'; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; + +local data_params = import 'params.jsonnet'; +local simu_params = import 'simparams.jsonnet'; +local params = if reality == 'data' then data_params else simu_params; + +local base = import 'pgrapher/experiment/sbnd/simparams.jsonnet'; +local params = base { + lar: super.lar { // <- super.lar overrides default values + // Longitudinal diffusion constant + DL: std.extVar('DL') * wc.cm2 / wc.s, + // Transverse diffusion constant + DT: std.extVar('DT') * wc.cm2 / wc.s, + // Electron lifetime + lifetime: std.extVar('lifetime') * wc.ms, + // Electron drift speed, assumes a certain applied E-field + drift_speed: std.extVar('driftSpeed') * wc.mm / wc.us, + }, +}; + +local tools = tools_maker(params); +local sim_maker = import 'pgrapher/experiment/sbnd/sim.jsonnet'; +local sim = sim_maker(params, tools); +local nanodes = std.length(tools.anodes); +local anode_iota = std.range(0, nanodes - 1); + +local wcls_maker = import "pgrapher/ui/wcls/nodes.jsonnet"; +local wcls = wcls_maker(params, tools); + +// added Ewerton 2023-03-14 +local wcls_input_sim = { + depos: wcls.input.depos(name="", art_tag=std.extVar('inputTag')), + deposet: g.pnode({ + type: 'wclsSimDepoSetSource', + name: "", + data: { + model: "", + scale: -1, //scale is -1 to correct a sign error in the SimDepoSource converter. + art_tag: std.extVar('inputTag'), //name of upstream art producer of depos "label:instance:processName" + assn_art_tag: "", + }, + }, nin=0, nout=1), +}; +// Collect all the wc/ls output converters for use below. Note the +// "name" MUST match what is used in theh "outputers" parameter in the +// FHiCL that loads this file. +local mega_anode = { + type: 'MegaAnodePlane', + name: 'meganodes', + data: { + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + }, +}; +local wcls_output_sim = { + // ADC output from simulation + // sim_digits: wcls.output.digits(name="simdigits", tags=["orig"]), + sim_digits: g.pnode({ + type: 'wclsFrameSaver', + name: 'simdigits', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: true, // true means save as RawDigit, else recob::Wire + frame_tags: ['daq'], + nticks: params.daq.nticks, + chanmaskmaps: ['bad'], //uncommented Ewerton 2023-10-12 + pedestal_mean: 'native', + }, + }, nin=1, nout=1, uses=[mega_anode]), + // The noise filtered "ADC" values. These are truncated for + // art::Event but left as floats for the WCT SP. Note, the tag + // "raw" is somewhat historical as the output is not equivalent to + // "raw data". + nf_digits: wcls.output.digits(name="nfdigits", tags=["raw"]), + // The output of signal processing. Note, there are two signal + // sets each created with its own filter. The "gauss" one is best + // for charge reconstruction, the "wiener" is best for S/N + // separation. Both are used in downstream WC code. + sp_signals: wcls.output.signals(name="spsignals", tags=["gauss", "wiener"]), + // save "threshold" from normal decon for each channel noise + // used in imaging + sp_thresholds: wcls.output.thresholds(name="spthresholds", tags=["threshold"]), +}; + + +local drifter = sim.drifter; + +local setdrifter = g.pnode({ + type: 'DepoSetDrifter', + data: { + drifter: "Drifter" + } + }, nin=1, nout=1, + uses=[drifter]); + +// signal plus noise pipelines +// local sn_pipes = sim.signal_pipelines; +local sn_pipes = sim.splusn_pipelines; + + +local rng = tools.random; +local wcls_deposetsimchannel_sink = g.pnode({ + type: 'wclsDepoSetSimChannelSink', + name: 'postdrift', + data: { + artlabel: 'simpleSC', // where to save in art::Event + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + rng: wc.tn(rng), + tick: 0.5 * wc.us, + start_time: -0.2 * wc.ms, + readout_time: self.tick * 3400, + nsigma: 3.0, + drift_speed: params.lar.drift_speed, + u_to_rp: 100 * wc.mm, // time to collection plane + v_to_rp: 100 * wc.mm, // time to collection plane + y_to_rp: 100 * wc.mm, + u_time_offset: 0.0 * wc.us, + v_time_offset: 0.0 * wc.us, + y_time_offset: 0.0 * wc.us, + g4_ref_time: -1700 * wc.us, + use_energy: true, + }, +}, nin=1, nout=1, uses=tools.anodes); + +local sp_maker = import 'pgrapher/experiment/sbnd/sp.jsonnet'; +local sp = sp_maker(params, tools, { sparse: sigoutform == 'sparse' }); +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; + +local magoutput = 'sbnd-data-check.root'; +local magnify = import 'pgrapher/experiment/sbnd/magnify-sinks.jsonnet'; +local sinks = magnify(tools, magoutput); + +local perfect = import 'pgrapher/experiment/sbnd/chndb-perfect.jsonnet'; +//local base = import 'chndb-base_sbnd.jsonnet'; + +local chndb = [{ + type: 'OmniChannelNoiseDB', + name: 'ocndbperfect%d' % n, + data: perfect(params, tools.anodes[n], tools.field, n){dft:wc.tn(tools.dft)}, + // data: base(params, tools.anodes[n], tools.field, n){dft:wc.tn(tools.dft)}, + uses: [tools.anodes[n], tools.field, tools.dft], +} for n in anode_iota]; + +local nf_maker = import 'pgrapher/experiment/sbnd/nf.jsonnet'; +local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in anode_iota]; + +local multipass1 = [ + g.pipeline([ + sn_pipes[n], + ], + 'multipass%d' % n) + for n in anode_iota +]; + +local multipass2 = [ + g.pipeline([ + sn_pipes[n], + //sinks.orig_pipe[n], + + nf_pipes[n], + //sinks.raw_pipe[n], + + sp_pipes[n], + + //sinks.decon_pipe[n], + //sinks.threshold_pipe[n], + ], + 'multipass%d' % n) + for n in anode_iota +]; + + +local f_sp = import 'pgrapher/experiment/sbnd/funcs.jsonnet'; + +local outtags = ['orig%d' % n for n in anode_iota]; +local bi_manifold1 = f.fanpipe('DepoSetFanout', multipass1, 'FrameFanin', 'sn_mag_nf', outtags); +local bi_manifold2 = f_sp.fanpipe('DepoSetFanout', multipass2, 'FrameFanin', 'sn_mag_nf_mod2', outtags, "true"); + +local retagger_sim = g.pnode({ + type: 'Retagger', + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'orig', + }, + merge: { + 'orig\\d': 'daq', + }, + }], + }, +}, nin=1, nout=1); + +local sink_sim = sim.frame_sink; + +//===============================NF+SP============================================ + +// Collect all the wc/ls output converters for use below. Note the +// "name" MUST match what is used in theh "outputers" parameter in the +// FHiCL that loads this file. + +local wcls_output_sp = { + // The noise filtered "ADC" values. These are truncated for + // art::Event but left as floats for the WCT SP. Note, the tag + // "raw" is somewhat historical as the output is not equivalent to + // "raw data". + nf_digits: g.pnode({ + type: 'wclsFrameSaver', + name: 'nfsaver', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: true, // true means save as RawDigit, else recob::Wire + frame_tags: ['raw'], + // nticks: params.daq.nticks, + chanmaskmaps: ['bad'], + }, + }, nin=1, nout=1, uses=[mega_anode]), + + + // The output of signal processing. Note, there are two signal + // sets each created with its own filter. The "gauss" one is best + // for charge reconstruction, the "wiener" is best for S/N + // separation. Both are used in downstream WC code. + sp_signals: g.pnode({ + type: 'wclsFrameSaver', + name: 'spsaver', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: false, // true means save as RawDigit, else recob::Wire + frame_tags: ['gauss', 'wiener'], + + // this may be needed to convert the decon charge [units:e-] to be consistent with the LArSoft default ?unit? e.g. decon charge * 0.005 --> "charge value" to GaussHitFinder + frame_scale: [0.02, 0.02], + nticks: params.daq.nticks, + chanmaskmaps: [], + //nticks: -1, + }, + }, nin=1, nout=1, uses=[mega_anode]), +}; + + +local chsel_pipes = [ + g.pnode({ + type: 'ChannelSelector', + name: 'chsel%d' % n, + data: { + channels: std.range(5632 * n, 5632 * (n + 1) - 1), + //tags: ['orig%d' % n], // traces tag + }, + }, nin=1, nout=1) + for n in anode_iota +]; + + +local nfsp_pipes = [ + g.pipeline([ + chsel_pipes[n], + //sinks.orig_pipe[n], + + nf_pipes[n], + //sinks.raw_pipe[n], + + sp_pipes[n], + //sinks.decon_pipe[n], + //sinks.threshold_pipe[n], + // sinks.debug_pipe[n], // use_roi_debug_mode=true in sp.jsonnet + ], + 'nfsp_pipe_%d' % n) + for n in anode_iota +]; + +//local fanpipe = f_sp.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf'); // commented Ewerton 2023-05-24 +local fanpipe = f_sp.fanpipe('FrameFanout', nfsp_pipes, 'FrameFanin', 'sn_mag_nf_mod'); //added Ewerton 2023-05-24 + +local retagger_sp = g.pnode({ + type: 'Retagger', + name: 'sp', //added Ewerton 2023-05-24 + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'retagger', + }, + merge: { + 'gauss\\d': 'gauss', + 'wiener\\d': 'wiener', + }, + }], + }, +}, nin=1, nout=1); + +local sink_sp = g.pnode({ type: 'DumpFrames' }, nin=1, nout=0); + +local graph1 = g.pipeline([ +wcls_input_sim.deposet, //sim +setdrifter, //sim +wcls_deposetsimchannel_sink, //sim +bi_manifold1, //sim +retagger_sim, //sim +wcls_output_sim.sim_digits, //sim +fanpipe, //sp +retagger_sp, //sp +wcls_output_sp.sp_signals, //sp +sink_sp //sp +]); + + +local graph2 = g.pipeline([ +wcls_input_sim.deposet, //sim +setdrifter, //sim +wcls_deposetsimchannel_sink, //sim +bi_manifold2, //sim +retagger_sp, //sp +wcls_output_sp.sp_signals, //sp +sink_sp //sp +]); + +local save_simdigits = std.extVar('save_simdigits'); + +local graph = if save_simdigits == "true" then graph1 else graph2; + +local app = { + type: 'TbbFlow', + data: { + edges: g.edges(graph), + }, +}; + +// Finally, the configuration sequence +g.uses(graph) + [app] + diff --git a/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-simchannel-simchannelsink.jsonnet b/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-simchannel-simchannelsink.jsonnet new file mode 100644 index 000000000..7e2c19239 --- /dev/null +++ b/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-simchannel-simchannelsink.jsonnet @@ -0,0 +1,186 @@ +// This is a main entry point for configuring a wire-cell CLI job to +// simulate SBND. It is simplest signal-only simulation with +// one set of nominal field response function. +local g = import 'pgraph.jsonnet'; +local f = import 'pgrapher/common/funcs.jsonnet'; +local wc = import 'wirecell.jsonnet'; +local io = import 'pgrapher/common/fileio.jsonnet'; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local base = import 'pgrapher/experiment/sbnd/simparams.jsonnet'; +local params = base { + lar: super.lar { + // Longitudinal diffusion constant + DL: std.extVar('DL') * wc.cm2 / wc.s, + // Transverse diffusion constant + DT: std.extVar('DT') * wc.cm2 / wc.s, + // Electron lifetime + lifetime: std.extVar('lifetime') * wc.ms, + // Electron drift speed, assumes a certain applied E-field + drift_speed: std.extVar('driftSpeed') * wc.mm / wc.us, + }, +}; +local tools = tools_maker(params); +local sim_maker = import 'pgrapher/experiment/sbnd/sim.jsonnet'; +local sim = sim_maker(params, tools); +local nanodes = std.length(tools.anodes); +local anode_iota = std.range(0, nanodes - 1); +local output = 'wct-sim-ideal-sig.npz'; +//local depos = g.join_sources(g.pnode({type:"DepoMerger", name:"BlipTrackJoiner"}, nin=2, nout=1), +// [sim.ar39(), sim.tracks(tracklist)]); +// local depos = sim.tracks(tracklist, step=1.0 * wc.mm); +local wcls_maker = import "pgrapher/ui/wcls/nodes.jsonnet"; +local wcls = wcls_maker(params, tools); +// added Ewerton 2023-03-14 +local wcls_input = { + depos: wcls.input.depos(name="", art_tag=std.extVar('inputTag')), //commented Ewerton 2023-03-15 + deposet: g.pnode({ + type: 'wclsSimDepoSetSource', + name: "", + data: { + model: "", + scale: -1, //scale is -1 to correct a sign error in the SimDepoSource converter. + art_tag: std.extVar('inputTag'), //name of upstream art producer of depos "label:instance:processName" + assn_art_tag: "", + }, + }, nin=0, nout=1), +}; +// Collect all the wc/ls output converters for use below. Note the +// "name" MUST match what is used in theh "outputers" parameter in the +// FHiCL that loads this file. +local mega_anode = { + type: 'MegaAnodePlane', + name: 'meganodes', + data: { + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + }, +}; +local wcls_output = { + // ADC output from simulation + // sim_digits: wcls.output.digits(name="simdigits", tags=["orig"]), + sim_digits: g.pnode({ + type: 'wclsFrameSaver', + name: 'simdigits', + data: { + // anode: wc.tn(tools.anode), + anode: wc.tn(mega_anode), + digitize: true, // true means save as RawDigit, else recob::Wire + frame_tags: ['daq'], + // nticks: params.daq.nticks, + // chanmaskmaps: ['bad'], + pedestal_mean: 'native', + }, + }, nin=1, nout=1, uses=[mega_anode]), + // The noise filtered "ADC" values. These are truncated for + // art::Event but left as floats for the WCT SP. Note, the tag + // "raw" is somewhat historical as the output is not equivalent to + // "raw data". + nf_digits: wcls.output.digits(name="nfdigits", tags=["raw"]), + // The output of signal processing. Note, there are two signal + // sets each created with its own filter. The "gauss" one is best + // for charge reconstruction, the "wiener" is best for S/N + // separation. Both are used in downstream WC code. + sp_signals: wcls.output.signals(name="spsignals", tags=["gauss", "wiener"]), + // save "threshold" from normal decon for each channel noise + // used in imaging + sp_thresholds: wcls.output.thresholds(name="spthresholds", tags=["threshold"]), +}; +//local deposio = io.numpy.depos(output); +local drifter = sim.drifter; +// added Ewerton 2023-03-14 +local setdrifter = g.pnode({ + type: 'DepoSetDrifter', + data: { + drifter: "Drifter" + } + }, nin=1, nout=1, + uses=[drifter]); +local bagger = sim.make_bagger(); +// signal plus noise pipelines +// local sn_pipes = sim.signal_pipelines; +local sn_pipes = sim.splusn_pipelines; +local perfect = import 'pgrapher/experiment/sbnd/chndb-perfect.jsonnet'; +local chndb = [{ + type: 'OmniChannelNoiseDB', + name: 'ocndbperfect%d' % n, + data: perfect(params, tools.anodes[n], tools.field, n){dft:wc.tn(tools.dft)}, + uses: [tools.anodes[n], tools.field, tools.dft], +} for n in anode_iota]; +//local chndb_maker = import 'pgrapher/experiment/sbnd/chndb.jsonnet'; +//local noise_epoch = "perfect"; +//local noise_epoch = "after"; +//local chndb_pipes = [chndb_maker(params, tools.anodes[n], tools.fields[n]).wct(noise_epoch) +// for n in std.range(0, std.length(tools.anodes)-1)]; +local nf_maker = import 'pgrapher/experiment/sbnd/nf.jsonnet'; +// local nf_pipes = [nf_maker(params, tools.anodes[n], chndb_pipes[n]) for n in std.range(0, std.length(tools.anodes)-1)]; +local nf_pipes = [nf_maker(params, tools.anodes[n], chndb[n], n, name='nf%d' % n) for n in anode_iota]; +local sp_maker = import 'pgrapher/experiment/sbnd/sp.jsonnet'; +local sp = sp_maker(params, tools); +local sp_pipes = [sp.make_sigproc(a) for a in tools.anodes]; +local rng = tools.random; +local wcls_simchannel_sink = g.pnode({ + type: 'wclsSimChannelSink', + name: 'postdrift', + data: { + artlabel: 'simpleSC', // where to save in art::Event + anodes_tn: [wc.tn(anode) for anode in tools.anodes], + rng: wc.tn(rng), + tick: 0.5 * wc.us, + start_time: -0.2 * wc.ms, + readout_time: self.tick * 3400, + nsigma: 3.0, + drift_speed: params.lar.drift_speed, + u_to_rp: 100 * wc.mm, // time to collection plane + v_to_rp: 100 * wc.mm, // time to collection plane + y_to_rp: 100 * wc.mm, + u_time_offset: 0.0 * wc.us, + v_time_offset: 0.0 * wc.us, + y_time_offset: 0.0 * wc.us, + g4_ref_time: -1700 * wc.us, + use_energy: true, + }, +}, nin=1, nout=1, uses=tools.anodes); +// local magoutput = 'sbnd-data-check.root'; +// local magnify = import 'pgrapher/experiment/sbnd/magnify-sinks.jsonnet'; +// local sinks = magnify(tools, magoutput); +local multipass = [ + g.pipeline([ + // wcls_simchannel_sink[n], + sn_pipes[n], + // sinks.orig_pipe[n], + // nf_pipes[n], + // sp_pipes[n], + ], + 'multipass%d' % n) + for n in anode_iota +]; +local outtags = ['orig%d' % n for n in anode_iota]; +local bi_manifold = f.fanpipe('DepoSetFanout', multipass, 'FrameFanin', 'sn_mag_nf', outtags); +// local bi_manifold = f.fanpipe('DepoFanout', multipass, 'FrameFanin', 'sn_mag_nf', outtags); +local retagger = g.pnode({ + type: 'Retagger', + data: { + // Note: retagger keeps tag_rules an array to be like frame fanin/fanout. + tag_rules: [{ + // Retagger also handles "frame" and "trace" like fanin/fanout + // merge separately all traces like gaussN to gauss. + frame: { + '.*': 'orig', + }, + merge: { + 'orig\\d': 'daq', + }, + }], + }, +}, nin=1, nout=1); +//local frameio = io.numpy.frames(output); +local sink = sim.frame_sink; +local graph = g.pipeline([wcls_input.depos, drifter, wcls_simchannel_sink, bagger, bi_manifold, retagger, wcls_output.sim_digits, sink]); //<- standard SimDepoSource source and Drifter +local app = { + type: 'TbbFlow', //TbbFlow Pgrapher changed Ewerton 2023-03-14 + data: { + edges: g.edges(graph), + }, +}; +// Finally, the configuration sequence which is emitted. +g.uses(graph) + [app] + diff --git a/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-simchannel.jsonnet b/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-simchannel.jsonnet index 33b4406e9..de329ffb8 100644 --- a/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-simchannel.jsonnet +++ b/cfg/pgrapher/experiment/sbnd/wcls-sim-drift-simchannel.jsonnet @@ -80,6 +80,9 @@ local wcls_output = { // for charge reconstruction, the "wiener" is best for S/N // separation. Both are used in downstream WC code. sp_signals: wcls.output.signals(name="spsignals", tags=["gauss", "wiener"]), + // save "threshold" from normal decon for each channel noise + // used in imaging + sp_thresholds: wcls.output.thresholds(name="spthresholds", tags=["threshold"]), }; //local deposio = io.numpy.depos(output); local drifter = sim.drifter; @@ -173,7 +176,7 @@ local retagger = g.pnode({ local sink = sim.frame_sink; local graph = g.pipeline([wcls_input.depos, drifter, wcls_simchannel_sink, bagger, bi_manifold, retagger, wcls_output.sim_digits, sink]); //<- standard SimDepoSource source and Drifter local app = { - type: 'Pgrapher', + type: 'Pgrapher', //TbbFlow Pgrapher changed Ewerton 2023-03-14 data: { edges: g.edges(graph), }, diff --git a/cfg/wirecell.jsonnet b/cfg/wirecell.jsonnet index 22a43d77f..4e615971e 100644 --- a/cfg/wirecell.jsonnet +++ b/cfg/wirecell.jsonnet @@ -324,6 +324,11 @@ then obj.type + ":" + obj.name else obj.type, + // The "plural" version of tn(). Return an array of type/names given an + // array of objects. This is simply a list comprehension to save a little + // typing. + tns(objs) :: [$.tn(obj) for obj in objs], + // Return a new list where only the first occurrence of any object is kept. unique_helper(l, x):: if std.count(l,x) == 0 then l + [x] else l, diff --git a/clus/docs/Blob_Sampler.md b/clus/docs/Blob_Sampler.md deleted file mode 100644 index 163d83101..000000000 --- a/clus/docs/Blob_Sampler.md +++ /dev/null @@ -1,359 +0,0 @@ - - -# BlobSampler Class Analysis - -## Overview - -BlobSampler is a class designed to sample points from blob-like structures in a wire chamber detector, producing point cloud representations. It's part of the Wire-Cell toolkit and implements multiple sampling strategies to convert detector blobs into point clouds for further analysis. - -## Class Structure - -### Core Components - -1. **Base Class Inheritance** - - `Aux::Logger`: For logging functionality - - `IBlobSampler`: Interface for blob sampling - - `IConfigurable`: Interface for configuration - -2. **Key Member Variables** - - `CommonConfig m_cc`: Common configuration parameters - - `std::vector> m_samplers`: Collection of sampling strategies - -### Common Configuration (CommonConfig) - -```cpp -struct CommonConfig { - double time_offset; // Time offset added to blob times - double drift_speed; // Speed to convert time to spatial position - std::string prefix; // Prefix for dataset array names - Binning tbinning; // Time binning configuration - std::vector extra; // Extra arrays to include - std::vector extra_re; // Regex patterns for extra arrays -}; -``` - -## Sampling Strategies - -The class implements six distinct sampling strategies: - -1. **Center Strategy** - - Calculates a single point at the blob's center - - Simplest sampling method - - Uses average of corner points - -2. **Corner Strategy** - - Samples points at each blob corner - - Provides structural boundary information - - Basic outline of the blob shape - -3. **Edge Strategy** - - Samples points along the edges between corners - - Takes midpoints of each edge segment - - Better boundary representation than corners alone - -4. **Grid Strategy** - - Creates uniform grid of points within the blob - - Configurable step size - - Two modes: aligned and unaligned to wire grid - -5. **Bounds Strategy** - - Samples points along blob boundaries - - Configurable step size between points - - More detailed boundary representation - -6. **Stepped Strategy** - - Advanced sampling based on wire crossings - - Adapts to blob size with variable step sizes - - Matches Wire-Cell prototype sampling - -## Core Algorithms - -### Point Sampling Process - -1. **Initialization** - ```cpp - std::tuple sample_blob( - const IBlob::pointer& iblob, int blob_index) - ``` - -2. **Coordinate Translation** - - Converts time to drift distance: - ```cpp - double time2drift(double time) const { - return xorig + xsign * (time + time_offset) * drift_speed; - } - ``` - -3. **Point Cloud Generation** - - Each strategy implements its own `sample()` method - - Points are collected in a Dataset structure - - Common processing through `intern()` method - -### Stepped Strategy Algorithm - -The most sophisticated sampling strategy follows these steps: - -1. Find wire planes with min/max number of wires -2. Calculate step sizes: - ```cpp - int nmin = std::max(min_step_size, max_step_fraction*width_min); - int nmax = std::max(min_step_size, max_step_fraction*width_max); - ``` -3. Sample points at wire crossings with offset adjustments -4. Validate points against third wire plane - -## Data Flow - -```mermaid -graph TD - A[Input Blob] --> B[BlobSampler] - B --> C{Strategy Selection} - C --> D[Center] - C --> E[Corner] - C --> F[Edge] - C --> G[Grid] - C --> H[Bounds] - C --> I[Stepped] - D --> J[Point Cloud Dataset] - E --> J - F --> J - G --> J - H --> J - I --> J - J --> K[Output Dataset] -``` - -## Implementation Details - -### Key Methods - -1. **Configuration** - ```cpp - void configure(const WireCell::Configuration& cfg); - Configuration default_configuration() const; - ``` - -2. **Strategy Addition** - ```cpp - void add_strategy(Configuration strategy) - ``` - -3. **Point Processing** - ```cpp - void intern(Dataset& ds, std::vector points) - ``` - -### Error Handling - -- Null blob check: - ```cpp - if (!iblob) { - THROW(ValueError() << errmsg{"can not sample null blob"}); - } - ``` - -- Invalid configuration checks -- Boundary validation -- Wire plane coordinate validation - -## Usage Considerations - -1. **Performance Optimization** - - Cached wire indices for faster lookup - - Efficient point cloud data structure - - Minimized memory allocation - -2. **Configuration Flexibility** - - Multiple strategy combinations - - Customizable sampling parameters - - Extensible design for new strategies - -3. **Quality Control** - - Validation of sampling points - - Boundary checking - - Wire plane alignment verification - -## Common Workflows - -1. Basic Blob Sampling: -```cpp -BlobSampler sampler; -sampler.configure(config); -auto [dataset, aux] = sampler.sample_blob(blob, index); -``` - -2. Multiple Strategy Setup: -```cpp -Configuration cfg; -cfg["strategy"] = Json::arrayValue; -cfg["strategy"].append("center"); -cfg["strategy"].append("stepped"); -sampler.configure(cfg); -``` - - -Let me create a visual representation of the BlobSampler's sampling strategies: - - - -```svg - - - - - - BlobSampler Sampling Strategies - - - - - - - - - - - Center - - - - - - - - - - - Corner - - - - - - - - - - - Edge - - - - - - - - - - - - - - - - - Grid - - - - - - - - - - - - - - - - - - Bounds - - - - - - - - - - - - - - - - - - - - - - - Stepped - - - - - - Strategy Properties: - - Center: Single point - - Corner: Vertex points - - Edge: Midpoint samples - - Grid: Regular spacing - - Stepped: Wire-aligned - - - -``` - -The BlobSampler class is a sophisticated component for sampling points from detector blobs in different ways. Here are the key points to understand: - -1. **Purpose**: - - Converts detector blob data into point cloud representations - - Supports multiple sampling strategies for different analysis needs - - Handles time/drift coordinate transformations - -2. **Configuration System**: - - Flexible configuration through JSON - - Common parameters like drift speed and time offset - - Strategy-specific parameters - -3. **Sampling Strategies**: - Each strategy serves a different purpose: - - Center: Quick blob position identification - - Corner: Basic shape outline - - Edge: Better boundary definition - - Grid: Uniform internal sampling - - Bounds: Detailed boundary sampling - - Stepped: Wire-crossing aligned sampling - -4. **Key Features**: - - Thread-safe design (except for channel cache) - - Efficient point cloud generation - - Comprehensive error checking - - Extensible strategy system - -5. **Usage Pattern**: -```cpp -// Configure sampler -BlobSampler sampler; -Configuration cfg; -cfg["strategy"] = "stepped"; // or array of strategies -cfg["drift_speed"] = 1.6; // mm/μs -sampler.configure(cfg); - -// Sample blob -auto [dataset, aux] = sampler.sample_blob(blob, index); -``` - -The visualization above shows how each strategy samples points differently from the same blob shape. The Stepped strategy is particularly important as it aligns with the wire chamber geometry. - diff --git a/clus/docs/Blob_Sampling.md b/clus/docs/Blob_Sampling.md deleted file mode 100644 index 1125de310..000000000 --- a/clus/docs/Blob_Sampling.md +++ /dev/null @@ -1,177 +0,0 @@ - - -# BlobSampling Class Analysis - -## Class Overview -BlobSampling is a Wire-Cell Toolkit class that transforms blob data into point cloud trees and outputs them as tensors. It implements multiple interfaces: -- `IBlobSampling`: Core blob sampling functionality -- `IConfigurable`: Configuration management -- `INamed`: Named component interface -- Inherits from `Aux::Logger` for logging capabilities - -## Core Components - -### 1. Member Variables -- `m_samplers`: Map of sampler name to IBlobSampler pointer -- `m_datapath`: String template for output data path (default: "pointtrees/%d") -- `m_count`: Operation counter for logging - -### 2. Configuration System - -#### Configuration Parameters -```json -{ - "datapath": "pointtrees/%d", // Output path template - "samplers": { // Map of sampler configurations - "": "" // Name -> sampler type mapping - } -} -``` - -#### Configuration Logic -1. Sets datapath from config or uses default -2. Validates samplers configuration exists -3. For each sampler entry: - - Extracts name and type - - Validates non-empty type - - Creates sampler instance via Factory system - - Stores in m_samplers map - -### 3. Primary Algorithm (operator()) - -The main processing algorithm follows these steps: - -1. Input Validation: - - Accepts IBlobSet input pointer - - Returns true with null output for end-of-stream (EOS) - - Extracts blobs from input blobset - -2. Point Cloud Generation: - - Creates root node for point tree - - Iterates through each blob: - - Skips null blobs - - For each configured sampler: - - Samples blob to create point cloud - - Associates point cloud with sampler name - - Inserts resulting named point clouds into tree - -3. Tensor Creation: - - Formats output datapath using blobset identifier - - Converts point tree to tensors - - Creates tensor set with blobset identifier - - Returns tensor set via output parameter - -## Key Features - -1. **Multi-Sampler Support** - - Supports multiple sampling strategies simultaneously - - Each sampler produces differently named point clouds - - Results combined in single point cloud tree - -2. **Configurable Output Path** - - Supports templated paths with %d formatter - - Interpolates blobset identifier into path - -3. **Robust Error Handling** - - Validates configuration - - Handles null inputs - - Skips invalid blobs - - Includes detailed logging - -4. **Factory Integration** - - Implements WIRECELL_FACTORY macro - - Supports dynamic sampler creation - - Integrates with Wire-Cell component system - -## Usage Patterns - -1. Configuration: -```python -config = { - "datapath": "output/points/%d", - "samplers": { - "charge": "ChargeSpaceSampler", - "time": "TimeSpaceSampler" - } -} -sampler.configure(config) -``` - -2. Processing: -```cpp -IBlobSet::pointer input = /* blob set */; -ITensorSet::pointer output; -bool more = sampler(input, output); -``` - -## Performance Considerations - -1. Memory Management: - - Uses smart pointers for safety - - Creates point tree structure efficiently - - Manages tensor conversion in-place - -2. Processing Efficiency: - - Processes blobs sequentially - - Allows parallel sampling strategies - - Minimizes data copying - -3. Scalability: - - Handles variable blob counts - - Supports multiple sampling strategies - - Flexible output path management - - - - -```mermaid -classDiagram - class BlobSampling { - -map m_samplers - -string m_datapath - -size_t m_count - +configure(Configuration cfg) - +default_configuration() Configuration - +operator()(input_pointer, output_pointer) bool - } - - class IBlobSampling { - <> - +operator()(input_pointer, output_pointer) bool - } - - class IConfigurable { - <> - +configure(Configuration) - +default_configuration() Configuration - } - - class Logger { - +debug(string) - +error(string) - } - - class IBlobSampler { - <> - +sample_blob() - } - - BlobSampling --|> IBlobSampling - BlobSampling --|> IConfigurable - BlobSampling --|> Logger - BlobSampling o-- IBlobSampler - - note for BlobSampling "Converts blobs to point cloud tensors" - -``` - -The BlobSampling class is a complex component in the Wire-Cell Toolkit that serves as a bridge between blob-based data structures and tensor-based output formats. I've provided a detailed analysis of its structure and operation in the first artifact, and a visual representation of its class relationships in the second artifact. - -Key points to highlight that weren't covered in detail in the artifacts: - -1. The class uses the Wire-Cell factory system for dependency injection, allowing for runtime configuration of different sampling strategies. - -2. The logging system is integrated throughout the code to provide detailed debugging information, which is crucial for monitoring the sampling process. - -3. The design allows for extensibility through the sampler system - new sampling strategies can be added without modifying the core BlobSampling class. - diff --git a/clus/docs/Clustering_close.md b/clus/docs/Clustering_close.md deleted file mode 100644 index 4a64e596f..000000000 --- a/clus/docs/Clustering_close.md +++ /dev/null @@ -1,322 +0,0 @@ -I'll analyze the clustering_close function and explain its logic and algorithm. - - - -```mermaid -flowchart TD - START([Start]) --> A[Initialize Graph and Maps] - A --> B[Create Vertex for Each Live Cluster] - B --> C[Start Cluster Comparison Loop] - - C --> D{Cluster 1\nLength > 1.5cm?} - D -->|No| C - D -->|Yes| E{Already Used?} - E -->|Yes| C - E -->|No| F[Compare with\nRemaining Clusters] - - F --> G{Cluster 2\nLength > 1.5cm?} - G -->|No| F - G -->|Yes| H{Cluster 2\nAlready Used?} - - H -->|Yes| F - H -->|No| I{Call\nClustering_3rd_round} - - I -->|True| J[Add Edge to Graph] - J --> K{Cluster 1\nLength < 5cm?} - K -->|Yes| L[Mark Cluster 1 Used\nBreak Inner Loop] - K -->|No| M{Cluster 2\nLength < 5cm?} - - M -->|Yes| N[Mark Cluster 2 Used] - M -->|No| F - - L --> C - N --> F - - C -- All Clusters Processed --> O[Merge Clusters\nBased on Graph] - O --> END([End]) - -``` - - - -# clustering_close Algorithm Analysis - -## Purpose -The `clustering_close` function implements a graph-based clustering algorithm to merge nearby clusters based on their geometric properties. It uses the `Clustering_3rd_round` function as its core decision maker for whether clusters should be connected. - -## Algorithm Components - -### 1. Data Structures -- **Graph (g)**: Boost undirected graph to represent cluster connectivity -- **Map (ilive2desc)**: Maps cluster indices to graph vertex descriptors -- **Map (map_cluster_index)**: Maps Cluster pointers to their indices -- **Set (used_clusters)**: Tracks clusters that have been processed and marked as used - -### 2. Graph Construction Phase - -#### Initial Setup -```cpp -Graph g; -std::unordered_map ilive2desc; -std::map map_cluster_index; -``` - -#### Vertex Creation -- Creates vertices for each live cluster -- Assigns indices and maintains mappings for later reference - -### 3. Edge Creation Process - -#### Cluster Selection Criteria -1. **Primary Length Filter** - - Both clusters must be longer than 1.5cm - - Prevents very small clusters from being considered - -2. **Usage Status Check** - - Skips clusters already marked as "used" - - Helps prevent over-clustering - -#### Connection Logic -1. **Proximity Test** - - Uses `Clustering_3rd_round` to determine if clusters should be connected - - Considers: - - Distance between clusters - - Geometric alignment - - Point distribution patterns - -2. **Small Cluster Handling** - - Special processing for clusters < 5cm - - Once connected, these small clusters are marked as "used" - - Prevents them from forming multiple connections - -### 4. Graph Edge Addition -When clusters are determined to be connected: -1. Adds edge between corresponding vertices in the graph -2. Updates usage status based on cluster sizes -3. May break inner loop for small clusters - -### 5. Final Merging Phase -- Calls `merge_clusters` function to: - - Process the constructed graph - - Combine connected clusters - - Update the cluster set - -## Implementation Details - -### Key Parameters -- **length_cut**: Distance threshold for considering cluster connections -- **1.5cm**: Minimum cluster length for consideration -- **5cm**: Threshold for special handling of small clusters - -### Efficiency Considerations -1. **Early Filtering** - - Length checks before expensive computations - - Usage status checks to prevent redundant processing - -2. **Loop Structure** - - Nested loop for comparing all cluster pairs - - Break conditions for small clusters to reduce comparisons - -3. **Memory Management** - - Uses references where possible - - Maintains maps for efficient lookups - -### Safety Features -1. **Used Cluster Tracking** - - Prevents over-clustering - - Ensures small clusters don't form too many connections - -2. **Size-based Processing** - - Different handling for different cluster sizes - - Protects small clusters from excessive merging - -## Interaction with Other Components - -### Input -- **live_grouping**: Contains the clusters to be processed -- **cluster_connected_dead**: Set of clusters connected to dead channels -- **length_cut**: Distance threshold parameter - -### Output -- Modified cluster connections in the input grouping -- Updated cluster_connected_dead set - -### Dependencies -- Relies on `Clustering_3rd_round` for connection decisions -- Uses `merge_clusters` for final cluster combination - - -The `clustering_close` function implements a graph-based clustering algorithm. Here are the key points: - -1. **Algorithm Structure**: - - Uses a graph representation where: - - Vertices represent clusters - - Edges represent connections between clusters - - Implements a two-phase process: - - Graph construction with connection logic - - Final merging based on graph connectivity - -2. **Key Features**: - - Filters out clusters smaller than 1.5cm - - Special handling for clusters under 5cm - - Uses `Clustering_3rd_round` for connection decisions - - Maintains a "used clusters" set to prevent over-clustering - -3. **Efficiency Considerations**: - - Early filtering to avoid unnecessary computations - - Break conditions for small clusters - - Efficient data structures for lookups - -4. **Safety Mechanisms**: - - Length thresholds to filter inappropriate clusters - - Usage tracking to prevent excessive connections - - Size-based processing rules - -The function is particularly interesting in how it: -- Uses a graph structure to represent cluster relationships -- Implements different rules based on cluster sizes -- Prevents over-clustering through usage tracking -- Combines geometric and topological approaches - - - - - - -Let me analyze the Clustering_3rd_round function and explain its logic and algorithm. - - - -```mermaid -flowchart TD - START([Start]) --> A{Distance < 0.5cm?} - A -->|Yes| MERGE([Return True]) - A -->|No| B{Distance < 1.0cm AND\nboth lengths < 12cm?} - B -->|Yes| MERGE - B -->|No| C{Distance < 2.0cm AND\nany length >= 12cm?} - C -->|No| D{Distance < length_cut AND\nany length >= 12cm?} - C -->|Yes| E[Calculate Hough Transform\nDirections & Point Counts] - E --> F{Both lengths > 25cm?} - F -->|Yes| G{Check dipole ratios:\n≤2% OR ≤3 points each OR\n≤3.5% with special cases} - G -->|Yes| MERGE - G -->|No| D - F -->|No| D - - D -->|Yes| H[Calculate Average Positions\nand Vectors] - H --> I{Small-Large Cluster Check:\nOne length < 12cm?} - I -->|Yes| J{Check Point Distribution\nRatios} - J -->|Pass| MERGE - J -->|Fail| REJECT([Return False]) - - I -->|No| K{Check Point Counts\n>25 or >25% total} - K -->|Pass| L{Check Angles between\nVectors and Directions} - L -->|Pass| MERGE - L -->|Fail| REJECT - K -->|Fail| REJECT - - D -->|No| REJECT - -``` - - - -# Clustering_3rd_round Algorithm Analysis - -## Purpose -The function determines whether two clusters should be merged based on their geometric properties, primarily considering: -- Distance between closest points -- Cluster lengths -- Point distributions -- Directional alignment - -## Core Decision Logic - -### 1. Quick Merge Conditions -The algorithm first checks for immediate merge conditions: -- If distance < 0.5cm: Merge immediately -- If distance < 1.0cm AND both clusters < 12cm: Merge immediately - -### 2. Medium Distance Analysis (< 2.0cm) -For clusters where at least one is ≥ 12cm: -- Performs Hough transform to determine cluster directions -- Counts points using dipole method -- For large clusters (> 25cm), checks specific dipole ratios: - - Both clusters must have ratio < 2% OR ≤ 3 points - - OR special case with 3.5% threshold and additional constraints - -### 3. Extended Analysis (< length_cut) -For more distant clusters where at least one is ≥ 12cm: - -#### Small-Large Cluster Interaction -- Special handling when one cluster < 12cm: - - Small cluster must have > 50% points near connection - - Large cluster must have significant point density - -#### Large Cluster Interaction -When both clusters are larger: -1. Checks point density requirements: - - > 25 points or > 25% of total points -2. Performs angle analysis: - - Between cluster directions - - Between directions and connection vector - - Uses both local (near connection) and global directions - -## Key Metrics Used - -### Length-based Classification -- Very small: < 12cm -- Medium: 12-25cm -- Large: 25-60cm -- Very large: > 60cm - -### Point Distribution Metrics -- Nearby points (within 10cm) -- Total points in cluster -- Dipole distribution (points on either side of connection) - -### Angular Measurements -- Direction vectors from Hough transform -- Angles between: - - Cluster directions - - Connection vector - - Average position vectors - -## Implementation Notes - -1. The algorithm uses multiple geometric calculations: - - Closest point finding - - Hough transforms for direction - - Point counting in regions - - Vector angle calculations - -2. Different thresholds are applied based on cluster sizes: - - Stricter conditions for very large clusters - - More lenient conditions for small clusters - - Special handling for small-large cluster pairs - -3. The code includes multiple safety checks to prevent inappropriate merging: - - Point density requirements - - Directional alignment constraints - - Size-dependent threshold adjustments - - -The Clustering_3rd_round function implements a sophisticated clustering algorithm that decides whether two clusters should be merged based on various geometric and point distribution properties. I've created two artifacts that explain the algorithm: - -1. A flowchart showing the decision tree structure -2. A detailed markdown document explaining the algorithm's components and logic - -The key aspects of the algorithm are: - -1. It uses a hierarchical decision process, starting with simple distance checks and progressing to more complex geometric analyses. - -2. The behavior varies significantly based on cluster sizes: - - Very close clusters (< 0.5cm) are merged immediately - - Small clusters (< 12cm) have special handling rules - - Large clusters (> 25cm) require more stringent alignment checks - -3. The algorithm employs multiple geometric tools: - - Hough transforms for direction finding - - Point density analysis - - Vector angle calculations - - Dipole distribution analysis - diff --git a/clus/docs/Clustering_connect.md b/clus/docs/Clustering_connect.md deleted file mode 100644 index a8abf3100..000000000 --- a/clus/docs/Clustering_connect.md +++ /dev/null @@ -1,196 +0,0 @@ -The flow diagram shows the high-level process, while the detailed explanation covers the specific algorithms and criteria used at each step. - - -```mermaid -flowchart TD - A[Start: Input Live Clusters] --> B[Sort Clusters by Length] - B --> C[Process First Cluster] - C --> D[Create Global Skeleton Cloud] - D --> E[Process Remaining Clusters] - - E --> F{Cluster Length Check} - F -->|Length < 100cm or special conditions| G[Point Analysis] - F -->|Otherwise| H[Skip Analysis] - - G --> I[Check Points Against Global Cloud] - I --> J[Analyze in U/V/W Views] - J --> K[Calculate Uniqueness Scores] - - K --> L{Merge Decision} - L -->|Yes| M[Add to Graph] - L -->|No| N[Continue] - - M --> O[Update Global Skeleton] - N --> O - - O --> P[Next Cluster] - P --> E - - E --> Q[Final Graph Processing] - Q --> R[Merge Connected Components] - R --> S[End: Updated Clusters] - -``` - - - -# Clustering Connect Algorithm Detailed Explanation - -## Overview -The `clustering_connect1` function implements a sophisticated clustering algorithm for connecting track segments in a 3D particle detector. It uses geometric and topological properties to determine which clusters should be merged. - -## Key Components - -### 1. Data Structures -- **Live Grouping**: Collection of current clusters -- **Global Point Cloud**: Reference point cloud for spatial comparisons -- **Dead Wire Maps**: Maps containing dead wire regions for U/V/W views -- **Boost Graph**: Used to track cluster connectivity for merging - -### 2. Direction Vectors -The algorithm uses several important direction vectors: -- **drift_dir**: (1,0,0) - Drift direction -- **U_dir**: (0, cos(60°), sin(60°)) - U wire direction -- **V_dir**: (0, cos(60°), -sin(60°)) - V wire direction -- **W_dir**: (0, 1, 0) - W wire direction - -## Algorithm Steps - -### Phase 1: Initialization -1. Sort clusters by length in descending order -2. Create empty global skeleton cloud -3. Initialize variables: - - extending_dis = 50 cm - - angle = 7.5° - - loose_dis_cut = 7.5 cm - -### Phase 2: First Cluster Processing -1. Process the longest cluster first -2. Calculate extreme points and directions -3. Add points to global skeleton cloud in both directions - -### Phase 3: Remaining Clusters Processing -For each subsequent cluster: - -1. **Length Filter**: - - Process if length < 100 cm - - Or if special parallel conditions are met - -2. **Direction Analysis**: - ```cpp - Calculate main_dir from extreme points - Calculate dir1 and dir2 using: - - Direct calculation for long segments - - Hough transform for shorter segments - ``` - -3. **Point Analysis**: - - Check each point against global skeleton - - Analyze in U, V, W views separately - - Track unique points and overlapping regions - -4. **Merge Decision Criteria**: - a) Primary Overlap Check: - ```cpp - if (overlap_points > 75% of total points && - unique_points < 24% of total points) - ``` - - b) Secondary Geometric Check: - ```cpp - if (angle_difference > 70° && - distance < specified_threshold) - ``` - - c) PCA-based Analysis: - ```cpp - if (significant_overlap && - PCA_directions_aligned && - distance < threshold) - ``` - -### Phase 4: Graph Processing -1. Build connectivity graph based on merge decisions -2. Process graph to identify connected components -3. Merge clusters within each connected component - -### Phase 5: Secondary Pass -1. Perform additional connectivity analysis -2. Apply stricter geometric criteria -3. Create final cluster groupings - -## Key Algorithms - -### Direction Calculation -```cpp -if (main_dir.magnitude() > 10 cm && - abs(main_dir.angle(drift_dir) - 90°) < 5°) { - // Use direct calculation -} else if (cluster->get_length() > 25 cm) { - // Use cluster-based Hough transform -} else { - // Use global point cloud Hough transform -} -``` - -### Merge Criteria -The algorithm uses multiple layers of criteria: -1. Spatial proximity -2. Directional alignment -3. Point overlap percentage -4. PCA-based geometric alignment - -### Distance Calculations -- Point-to-cluster distance -- Cluster-to-cluster minimum distance -- Directional alignment angles - -## Important Parameters - -1. **Length Thresholds**: - - Primary length cut: 100 cm - - Secondary length cut: 25 cm - - Minimum length: 3 cm - -2. **Angular Thresholds**: - - Basic angle: 7.5° - - Parallel threshold: 5° - - Merge angle threshold: 70° - -3. **Distance Thresholds**: - - Extension distance: 50 cm - - Loose distance cut: 7.5 cm - - Merge distance thresholds: 1.2-4.0 cm - -## Error Handling - -The algorithm includes several safeguards: -1. Dead wire region handling -2. Length-based filtering -3. Multiple validation steps before merging -4. Graph-based connectivity validation - -## Performance Considerations - -1. **Optimization Techniques**: - - Early filtering of small clusters - - Use of spatial indexing in point cloud - - Efficient graph-based merging - -2. **Memory Management**: - - Point cloud structure for spatial queries - - Efficient graph representation - - Smart pointers for resource management - - - -This clustering algorithm is designed to connect track segments in a 3D particle detector, specifically focusing on handling complex geometries and noise. The algorithm works in multiple phases, starting with the longest clusters and progressively processing smaller ones while maintaining a global view of the detector space. - -The key innovation is its multi-view approach, analyzing clusters in the U, V, and W wire planes while accounting for dead regions in each view. It uses a combination of geometric properties (angles, distances) and topological features (point overlaps, connectivity) to make merging decisions. - -The algorithm is particularly robust because it: -1. Handles dead wire regions explicitly -2. Uses multiple geometric criteria for validation -3. Employs both local and global analyses -4. Implements a two-pass approach for better accuracy - diff --git a/clus/docs/Clustering_deghost.md b/clus/docs/Clustering_deghost.md deleted file mode 100644 index afef6444c..000000000 --- a/clus/docs/Clustering_deghost.md +++ /dev/null @@ -1,123 +0,0 @@ -I'll explain the clustering_deghost function's logic and algorithm in detail. This function appears to be a ghost track removal algorithm for particle tracking detectors. - - - -```mermaid -flowchart TD - A[Start] --> B[Sort clusters by length] - B --> C[Initialize point clouds] - C --> D[Process each cluster] - D --> E{Length < cut?} - E -- Yes --> F[Analyze point matches] - E -- No --> G[Add to point clouds] - - F --> H{Check overlap criteria} - H -- High overlap --> I[Check views match] - H -- Low overlap --> J[Keep cluster] - - I --> K{2+ views match?} - K -- Yes --> L[Check distance] - K -- No --> J - - L --> M{Close enough?} - M -- Yes --> N[Merge clusters] - M -- No --> O[Remove cluster] - - J --> P[Add to point clouds] - N --> Q[Update graph] - O --> R[Add to removal list] - - P --> S[Continue] - Q --> S - R --> S - - S --> T[Process merges & removals] - T --> U[End] - - subgraph "Point Analysis" - F --> F1[Count points per view] - F1 --> F2[Find dead wire regions] - F2 --> F3[Find unique points] - F3 --> H - end - -``` - -Here's a detailed explanation of the algorithm: - -1. Purpose: -- The function aims to identify and remove ghost tracks (false tracks) by analyzing the consistency of track projections across three wire planes (U, V, W views) -- It also handles cases where some wires are dead (non-functioning) - -2. Key Components: -- Two point clouds are maintained: - - global_point_cloud: Stores all points from valid tracks - - global_skeleton_cloud: Stores simplified representations of longer tracks (>30cm) -- A connectivity graph (g) to track which clusters should be merged -- Maps of dead wire regions for each view (dead_u_index, dead_v_index, dead_w_index) - -3. Main Algorithm Steps: - -a) Initialization: -```cpp -- Sort clusters by length (descending order) -- Create empty point clouds for tracking points and skeletons -- Initialize graph structures for cluster merging -``` - -b) For each cluster: - -- If cluster length > length_cut: - - Add directly to point clouds - - If length > 30cm, construct skeleton - -- If cluster length ≤ length_cut: - - Analyze points in each view: - - Count total points - - Identify points in dead regions - - Find points matching existing clusters - - Count unique points (no matches) - -c) Decision Criteria: -```cpp -- Primary criteria for ghost identification: - - Low unique point count (≤10% per view) - - High point overlap with existing clusters - - Consistent matches across at least 2 views - - Physical proximity (<20cm) to matched cluster -``` - -4. Key Thresholds: -- Distance cuts: - - Close point matching: 1.2cm/3 - - Skeleton matching: 2.4cm - - Cluster merging distance: 20cm -- Overlap criteria: - - Point matching: 80% in primary views - - Secondary view: 65% match -- Uniqueness thresholds: - - Per view: ≤10% unique points - - Total: ≤5% unique points across views - -5. Actions: -- For identified ghosts: - - If close to matching cluster: Merge clusters - - Otherwise: Remove cluster -- For valid clusters: - - Add to point clouds - - Create skeleton if long enough - -6. Final Processing: -- Execute cluster merges based on graph connectivity -- Remove identified ghost clusters -- Update cluster grouping structure - -The algorithm is particularly sophisticated in how it: -- Handles dead wire regions -- Uses multi-view consistency checks -- Combines both point-by-point and skeleton-based matching -- Applies different criteria based on cluster length -- Uses a hierarchical decision process for ghost identification - -This deghosting is crucial for improving track reconstruction quality in particle detectors by removing false tracks that can arise from ambiguous wire crossings or detector noise. - diff --git a/clus/docs/Clustering_examine_x_boundary.md b/clus/docs/Clustering_examine_x_boundary.md deleted file mode 100644 index a6303115b..000000000 --- a/clus/docs/Clustering_examine_x_boundary.md +++ /dev/null @@ -1,222 +0,0 @@ -I'll explain the `clustering_examine_x_boundary` function which applies the x-boundary examination across a group of clusters. - -```mermaid -flowchart TD - Start([Start]) --> GetClusters[Get live clusters
from grouping] - - GetClusters --> FilterLoop{For each cluster} - - FilterLoop --> |Size Filter| SizeCheck{5cm < length < 150cm?} - SizeCheck -->|No| NextCluster[Continue to next cluster] - NextCluster --> FilterLoop - - SizeCheck -->|Yes| ExamineCluster[Examine X Boundary:
1. Count points distribution
2. Check for splitting criteria
3. Create new clusters if needed] - - ExamineCluster --> HandleResults{New clusters
created?} - HandleResults -->|Yes| UpdateGroups[Update cluster groupings] - HandleResults -->|No| FilterLoop - - UpdateGroups --> FilterLoop - - FilterLoop -->|Done| End([End]) - -``` - -Here's the detailed breakdown of the algorithm: - -1. Cluster Selection and Filtering: -```cpp -// Get vector of current clusters -std::vector live_clusters = live_grouping.children(); - -// Only examine clusters within specific length bounds -for (size_t i = 0; i != live_clusters.size(); i++) { - Cluster *cluster = live_clusters.at(i); - if (cluster->get_length() > 5 * units::cm && - cluster->get_length() < 150 * units::cm) { - // Process cluster... - } -} -``` - -2. Key Characteristics: -- Works on a collection of clusters in a `Grouping` object -- Only processes clusters between 5cm and 150cm in length -- Uses the individual `examine_x_boundary` function we discussed earlier for each qualifying cluster - -3. Process Flow: - - Gets all current clusters from the grouping - - For each cluster: - - Checks if it meets the size criteria (5-150cm) - - If it qualifies, examines its x-boundary distribution - - May split the cluster based on x-coordinate boundaries - - Updates the grouping with any new clusters created - -4. Implementation Details: -```cpp -void clustering_examine_x_boundary( - Grouping& live_grouping, // Collection of clusters to examine - const double low_limit, // Lower x-coordinate boundary - const double high_limit // Upper x-coordinate boundary -) { - std::vector live_clusters = live_grouping.children(); - - for (size_t i = 0; i != live_clusters.size(); i++) { - Cluster *cluster = live_clusters.at(i); - - // Size filter - if (cluster->get_length() > 5 * units::cm && - cluster->get_length() < 150 * units::cm) { - - // Examine and potentially split cluster - std::unordered_map id2clusters = - cluster->examine_x_boundary(low_limit, high_limit); - - // Further processing of split clusters would happen here - // (though currently commented out in the source) - } - } -} -``` - -5. Important Parameters: -- `live_grouping`: The collection of clusters to examine -- `low_limit`: The lower x-coordinate boundary for analysis -- `high_limit`: The upper x-coordinate boundary for analysis - -6. Key Features: -- Operates at the grouping level rather than individual cluster level -- Provides batch processing of multiple clusters -- Maintains size-based filtering to focus on relevant clusters -- Preserves the group structure while allowing for cluster splits - -7. Usage Context: -This function is typically used in larger clustering systems where: -- You need to analyze and potentially split multiple clusters based on x-coordinate distribution -- The clusters represent physical detector data that might span multiple regions -- You want to maintain clean boundaries between different detector regions -- You need to handle multiple clusters efficiently in a batch process - -8. Considerations: -- The function focuses on clusters of moderate size (5-150cm) -- Very small or very large clusters are skipped -- The actual splitting logic is handled by the individual `examine_x_boundary` function -- The function preserves the overall grouping structure while allowing for cluster modifications - -This clustering function serves as a higher-level manager for the x-boundary examination process, applying the individual cluster examination across a collection of clusters while maintaining appropriate size filters and grouping structures. It's particularly useful in particle physics applications where understanding and maintaining proper spatial boundaries is crucial for accurate event reconstruction. - - -Let me explain the `examine_x_boundary` function which appears to be used for analyzing and potentially splitting clusters based on x-coordinate boundaries. - -```mermaid -flowchart TD - Start([Start]) --> Count[Count points in 3 regions:
- below low_limit
- between limits
- above high_limit] - - Count --> Track[Track boundary points:
x_max for points below low_limit
x_min for points above high_limit] - - Track --> CheckRatio{Points outside < 7.5%
of points between?} - - CheckRatio -->|No| ReturnOriginal[Return Original
Single Cluster] - - CheckRatio -->|Yes| CheckBoundaries{Check boundary
conditions} - - CheckBoundaries -->|x_max < low_limit - 1cm| Group1[Create Group 1
for low x region] - CheckBoundaries -->|x_min > high_limit + 1cm| Group3[Create Group 3
for high x region] - - Group1 --> CreateMiddle[Create Group 2
for middle region] - Group3 --> CreateMiddle - - CreateMiddle --> AssignPoints[Assign points to groups:
- x < low_limit: Group 1 or 2
- x > high_limit: Group 3 or 2
- between: Group 2] - - AssignPoints --> Return[Return Map of
Separated Clusters] - - ReturnOriginal --> End([End]) - Return --> End - -``` - -The flowchart now better matches the actual algorithm implementation and makes the decision process more clear to understand. - -The algorithm's main purpose is to examine if a cluster should be split based on its distribution across x-coordinate boundaries. Here's the detailed breakdown: - -1. Point Distribution Analysis: -- The function takes two parameters: `low_limit` and `high_limit` defining x-coordinate boundaries -- It counts points in three regions: - - Below low_limit (num_points[0]) - - Between limits (num_points[1]) - - Above high_limit (num_points[2]) -- Tracks maximum x value for points below low_limit (x_max) -- Tracks minimum x value for points above high_limit (x_min) - -2. Decision Making: -- The main criterion is: `num_points[0] + num_points[2] < num_points[1] * 0.075` - - This checks if the total points outside boundaries are less than 7.5% of points between boundaries - - If true, the cluster is a candidate for splitting - -3. Boundary Analysis: -- If x_max is significantly below low_limit (< low_limit - 1cm), creates Group 1 -- If x_min is significantly above high_limit (> high_limit + 1cm), creates Group 3 -- Always creates Group 2 for the middle region if any splitting occurs - -4. Point Assignment: -- For each measurement cell (blob): - - If x < low_limit: assigns to Group 1 (if exists) or Group 2 - - If x > high_limit: assigns to Group 3 (if exists) or Group 2 - - Otherwise: assigns to Group 2 - -5. Result: -- Returns a map of group IDs to new clusters if splitting occurred -- Otherwise, maintains original cluster structure - -The key implementation can be highlighted in this code excerpt: - - - -```cpp -// Checking if points outside boundaries are less than 7.5% of points between boundaries -if (num_points[0] + num_points[2] < num_points[1] * 0.075) { - // Initialize potential groups - std::set groupids; - - // Check if need group 1 (low x boundary) - if (x_max < low_limit - 1.0 * units::cm && x_max > -1e8) { - groupids.insert(1); - } - - // Check if need group 3 (high x boundary) - if (x_min > high_limit + 1.0 * units::cm && x_min < 1e8) { - groupids.insert(3); - } - - // If any boundary groups needed, create middle group and assign points - if (!groupids.empty()) { - groupids.insert(2); // Middle group - - // Assign each blob to appropriate group - for (size_t idx=0; idx < mcells.size(); idx++) { - Blob *mcell = mcells.at(idx); - if (mcell->points()[0].x() < low_limit) { - // Assign to group 1 if it exists, otherwise group 2 - b2groupid[idx] = (groupids.find(1) != groupids.end()) ? 1 : 2; - } - else if (mcell->points()[0].x() > high_limit) { - // Assign to group 3 if it exists, otherwise group 2 - b2groupid[idx] = (groupids.find(3) != groupids.end()) ? 3 : 2; - } - else { - // Points between boundaries go to group 2 - b2groupid[idx] = 2; - } - } - } -} - -``` - -This algorithm is particularly useful for: -1. Identifying and separating clusters that span significant x-coordinate boundaries -2. Handling cases where a small portion of points lie outside main boundaries -3. Maintaining cluster integrity when the distribution doesn't warrant splitting -4. Creating clean separations based on spatial distribution of points - -The function appears to be used in a larger clustering system, likely for particle tracking or detector data analysis, where understanding the spatial distribution of points along the x-axis is crucial for proper event reconstruction. \ No newline at end of file diff --git a/clus/docs/Clustering_extend.md b/clus/docs/Clustering_extend.md deleted file mode 100644 index 74e828b2c..000000000 --- a/clus/docs/Clustering_extend.md +++ /dev/null @@ -1,959 +0,0 @@ -# Analysis of clustering_extend Algorithm - -## Overview -`clustering_extend` is a comprehensive clustering function that manages different clustering strategies based on a flag parameter. It builds a graph of cluster connections and merges clusters based on various geometric criteria. - -```mermaid -flowchart TD - A[Start] --> B[Initialize Directions, Graph, and Clusters] - B --> C{Loop through Live Clusters} - C --> D1{Check cluster_1 Length > Length_1_Cut} - D1 -->|Yes| E{Flag Check} - D1 -->|No| C - - %% Prolong Case - E -->|Flag == 1| F1[Calculate Earliest and Latest Points] - F1 --> G1[Calculate Directions for Earliest and Latest Points] - G1 --> H1{Angle < Threshold?} - H1 -->|Yes| I1[Loop through Clusters for Merging] - I1 --> J1{If cluster_2 Valid} - J1 -->|Yes| K1[Call Clustering_4th_prol] - K1 --> L1[Add Edge in Graph g if Merged] - L1 --> C - - %% Parallel Case - E -->|Flag == 2| F2[Calculate Highest and Lowest Points] - F2 --> G2[Calculate Directions for Highest and Lowest Points] - G2 --> H2{Angle < Threshold?} - H2 -->|Yes| I2[Loop through Clusters for Merging] - I2 --> J2{If cluster_2 Valid} - J2 -->|Yes| K2[Call Clustering_4th_para] - K2 --> L2[Add Edge in Graph g if Merged] - L2 --> C - - %% Regular Case - E -->|Flag == 3| F3[Determine Points for Merging] - F3 --> G3[Loop through Clusters for Merging] - G3 --> H3{If cluster_2 Valid} - H3 -->|Yes| I3[Call Clustering_4th_reg] - I3 --> J3[Add Edge in Graph g if Merged] - J3 --> C - - %% Dead Cluster Case - E -->|Flag == 4| F4{Is Cluster in cluster_connected_dead?} - F4 -->|Yes| G4[Loop through Clusters for Merging] - G4 --> H4{If cluster_2 Length > Length_2_Cut} - H4 -->|Yes| I4[Call Clustering_4th_dead] - I4 --> J4[Add Edge in Graph g if Merged] - J4 --> C - - %% End Process - C -->|All Clusters Processed| M[Merge Clusters] - M --> N[End] - - -``` - -## Function Signature -```cpp -void clustering_extend( - Grouping& live_grouping, - cluster_set_t& cluster_connected_dead, // in/out - const int flag, // clustering strategy flag - const double length_cut = 150*units::cm, // distance threshold - const int num_try = 0, // number of attempts - const double length_2_cut = 3*units::cm, // secondary length threshold - const int num_dead_try = 3 // attempts for dead region case -) -``` - -## Core Algorithm Components - -### 1. Initial Setup -```cpp -geo_point_t drift_dir(1, 0, 0); // drift direction along X -const auto [angle_u,angle_v,angle_w] = live_grouping.wire_angles(); - -// Direction vectors for wire planes -geo_point_t U_dir(0, cos(angle_u), sin(angle_u)); -geo_point_t V_dir(0, cos(angle_v), sin(angle_v)); -geo_point_t W_dir(0, cos(angle_w), sin(angle_w)); -``` - -### 2. Graph Construction -```cpp -typedef cluster_connectivity_graph_t Graph; -Graph g; -std::unordered_map ilive2desc; // live index to graph descriptor -std::map map_cluster_index; -``` -- Creates a graph to represent cluster connections -- Maps clusters to graph vertices - -### 3. Length Threshold Calculation -```cpp -int length_1_cut = 40*units::cm + num_try * 10*units::cm; -if (flag==1) - length_1_cut = 20*units::cm + num_try*10*units::cm; //prolong case -``` -- Adjusts length threshold based on attempt number -- Special case for prolonged clustering - -### 4. Clustering Strategy Selection -The function implements four different clustering strategies based on the flag parameter: - -#### Flag 1: Prolonged Case -```cpp -if (flag==1) { - // Handle prolonged clustering - std::tie(earliest_p, latest_p) = cluster_1->get_earliest_latest_points(); - - // Check angles with wire directions - // Process both earliest and latest points -} -``` -- Uses earliest/latest points -- Checks angles with wire directions -- Calls `Clustering_4th_prol` - -#### Flag 2: Parallel Case -```cpp -else if (flag==2) { - // Handle parallel clustering - std::tie(highest_p, lowest_p) = cluster_1->get_highest_lowest_points(); - - // Check for parallel alignment with drift direction - // Process both highest and lowest points -} -``` -- Uses highest/lowest points -- Checks alignment with drift direction -- Calls `Clustering_4th_para` - -#### Flag 3: Regular Case -```cpp -else if (flag==3) { - // Handle regular clustering - auto hl_ps = cluster_1->get_highest_lowest_points(); - auto el_ps = cluster_1->get_earliest_latest_points(); - - // Choose best points based on separation - // Process both points for clustering -} -``` -- Considers both highest/lowest and earliest/latest points -- Selects points with maximum separation -- Calls `Clustering_4th_reg` - -#### Flag 4: Dead Region Case -```cpp -else if (flag==4) { - // Handle dead region clustering - if (cluster_connected_dead.find(cluster_1)!=cluster_connected_dead.end()) { - // Process clusters connected to dead regions - } -} -``` -- Processes clusters connected to dead regions -- Uses length_2_cut threshold -- Calls `Clustering_4th_dead` - -### 5. Cluster Processing Loop -```cpp -for (size_t i=0; i!=live_clusters.size(); i++) { - auto cluster_1 = live_clusters.at(i); - - if (cluster_1->get_length() > length_1_cut) { - // Process cluster based on selected strategy - } -} -``` -- Processes each cluster above length threshold -- Applies selected clustering strategy -- Adds edges to graph for connected clusters - -### 6. Graph-based Merging -```cpp -merge_clusters(g, live_grouping, cluster_connected_dead); -``` -- Uses graph to merge connected clusters -- Updates cluster_connected_dead set - -## Key Features - -1. Multiple Clustering Strategies - - Prolonged clustering for extended tracks - - Parallel clustering for aligned segments - - Regular clustering for general cases - - Dead region handling for detector gaps - -2. Adaptive Thresholds - - Length thresholds adjust with attempts - - Different criteria for each strategy - - Special handling of short clusters - -3. Graph-based Connectivity - - Represents cluster connections as graph - - Enables efficient merging - - Maintains connectivity information - -4. Wire Plane Integration - - Uses wire angles for geometry - - Considers drift direction - - Plane-specific direction vectors - -5. Cluster Protection - - Tracks used clusters - - Prevents multiple use of small clusters - - Length-based filtering - -## Usage Context - -This function serves as a high-level orchestrator for: -1. Track Reconstruction - - Connects broken track segments - - Handles detector effects - - Maintains geometric consistency - -2. Detector Specifics - - Accounts for wire plane geometry - - Handles dead regions - - Drift direction considerations - -3. Multi-pass Processing - - Allows multiple attempts - - Adjusts criteria per attempt - - Progressive clustering strategy - -## Implementation Details - -1. Cluster Selection -```cpp -if (used_clusters.find(cluster_2)!=used_clusters.end()) continue; -if (cluster_2==cluster_1) continue; -``` -- Prevents reuse of processed clusters -- Avoids self-clustering - -2. Small Cluster Protection -```cpp -if (cluster_2->get_length()<10*units::cm) - used_clusters.insert(cluster_2); -``` -- Marks small clusters as used after merging -- Prevents over-clustering - -3. Graph Edge Addition -```cpp -boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], - ilive2desc[map_cluster_index[cluster_2]], g); -``` -- Creates connections between related clusters -- Prepares for final merging step - - -# Analysis of Clustering_4th_prol Algorithm - -## Overview -`Clustering_4th_prol` determines if two clusters should be merged in cases where one cluster might be a prolongation (extension) of another. It focuses on directional alignment and spatial continuity. - -## Function Signature -```cpp -bool Clustering_4th_prol( - const Cluster& cluster_1, - const Cluster& cluster_2, - double length_2, - geo_point_t& earliest_p, - geo_point_t& dir_earlp, - double length_cut -) -``` - -## Core Algorithm - -### 1. Initial Distance Check -```cpp -auto temp_results = cluster_2.get_closest_point_blob(earliest_p); -geo_point_t p2 = temp_results.first; -geo_point_t diff = earliest_p - p2; -double dis = diff.magnitude(); -``` -- Finds closest point in cluster_2 to the earliest point -- Calculates distance between clusters - -### 2. Primary Clustering Analysis -```cpp -if (dis < length_cut) { - // Perform detailed analysis -} -``` -Only proceeds if clusters are within the length_cut threshold. - -### 3. Direction Analysis -If within distance threshold, analyzes directional alignment: -```cpp -geo_point_t dir_bp(p2.x()-earliest_p.x(), - p2.y()-earliest_p.y(), - p2.z()-earliest_p.z()); -double angle_diff = (3.1415926-dir_bp.angle(dir_earlp))/3.1415926*180.; -``` -Checks for one of two conditions: -```cpp -if (angle_diff < 3 || angle_diff > 177 || - dis * sin(angle_diff/180.*3.1415926) < 6*units::cm) -``` -- Nearly parallel alignment (angle < 3° or > 177°) -- Small perpendicular distance (< 6cm) - -### 4. Secondary Direction Validation -If directional alignment is good, performs additional validation: -```cpp -geo_point_t dir = cluster_2.vhough_transform(p2, 60*units::cm); -``` - -### 5. Final Decision Logic -Two paths for acceptance: - -#### Path 1: Short Cluster Case -```cpp -if (length_2 < 10*units::cm && - fabs(dir.angle(dir_earlp)-3.141926/2.) > 30/180.*3.1415926) { - return true; -} -``` -- For clusters shorter than 10cm -- Direction not perpendicular to reference direction - -#### Path 2: Direction Alignment Case -```cpp -if ((3.14151926-dir.angle(dir_earlp))/3.1415926*180. < 5. || - dir.angle(dir_earlp)/3.1415926*180. < 5.) - return true; -``` -- Directions nearly parallel (within 5 degrees) - -## Detailed Analysis of Key Components - -### 1. Distance Metrics -The algorithm uses two types of distances: - -1. Direct Distance: -```cpp -double dis = diff.magnitude(); -``` -- Straight-line distance between closest points -- Must be less than length_cut - -2. Perpendicular Distance: -```cpp -dis * sin(angle_diff/180.*3.1415926) -``` -- Distance perpendicular to the direction vector -- Used for alignment checking - -### 2. Angle Calculations - -1. Initial Angle Difference: -```cpp -double angle_diff = (3.1415926-dir_bp.angle(dir_earlp))/3.1415926*180.; -``` -- Measures alignment between connection vector and reference direction -- Converted to degrees for threshold comparisons - -2. Secondary Direction Angle: -```cpp -fabs(dir.angle(dir_earlp)-3.141926/2.) -``` -- Checks if directions are perpendicular -- Used specifically for short cluster validation - -### 3. Direction Vector Calculation -```cpp -geo_point_t dir = cluster_2.vhough_transform(p2, 60*units::cm); -``` -- Uses Hough transform with 60cm radius -- Determines predominant direction around point p2 - -## Key Features - -1. Multi-level Validation - - Initial distance check - - Direction alignment check - - Perpendicular distance check - - Secondary direction validation - -2. Special Case Handling - - Different criteria for short clusters (<10cm) - - Accommodates both aligned and slightly offset prolongations - -3. Robust Direction Analysis - - Uses multiple direction calculations - - Considers both local and extended directions - - Handles various geometric configurations - -4. Flexible Acceptance Criteria - - Multiple paths to acceptance - - Different thresholds for different cases - - Balances sensitivity and specificity - -## Usage Context - -This function is specialized for: -1. Detecting cluster extensions - - One cluster continuing another - - Broken tracks that should be connected - - Split segments that belong together - -2. Handling Different Cluster Types - - Short fragments (<10cm) - - Longer segments with clear direction - - Offset but aligned segments - -3. Specific Detector Scenarios - - Track splitting due to detector effects - - Gap crossing in detector regions - - Direction-based reconstruction - -## Acceptance Criteria Summary -A cluster pair is accepted as prolonged if: -1. They are within length_cut distance AND -2. Either: - - Their directions are nearly parallel (<5° difference) - - The shorter cluster (<10cm) is not perpendicular - - The connecting vector is well-aligned with minimal perpendicular offset - -The algorithm is particularly useful in reconstructing tracks that may have been artificially split but maintain directional continuity. - - -# Analysis of Clustering_4th_para Algorithm - -## Overview -`Clustering_4th_para` determines if two clusters should be merged specifically in cases where they might be parallel to each other. It focuses on analyzing spatial relationships and checking for consistent point spacing along projected paths. - -## Function Signature -```cpp -bool Clustering_4th_para( - const Cluster& cluster_1, - const Cluster& cluster_2, - double length_1, double length_2, - geo_point_t& earliest_p, - geo_point_t& dir_earlp, - double length_cut -) -``` - -## Core Algorithm - -### 1. Initial Distance Check -```cpp -auto temp_results = cluster_2.get_closest_point_blob(earliest_p); -geo_point_t p2 = temp_results.first; -geo_point_t diff = p2 - earliest_p; -double dis = diff.magnitude(); -``` -- Finds the closest point in cluster_2 to the earliest point of cluster_1 -- Calculates initial distance between clusters - -### 2. Main Clustering Check -```cpp -if (dis < length_cut) { - // Perform detailed analysis -} -``` -Only proceeds with detailed analysis if clusters are within the length_cut threshold. - -### 3. Point Projection Analysis -The core of the algorithm involves checking points along a projected path: - -```cpp -for (int i = -5; i != 10; i++) { - // Calculate test point along the early direction - test_point.set( - earliest_p.x() - dir_earlp.x() * (dis + i*2*units::cm), - earliest_p.y() - dir_earlp.y() * (dis + i*2*units::cm), - earliest_p.z() - dir_earlp.z() * (dis + i*2*units::cm) - ); - - // Find closest point in cluster_2 to test point - auto temp_results = cluster_2.get_closest_point_blob(test_point); - geo_point_t test_point1 = temp_results.first; -``` - -### 4. Point Distance Analysis -For each projected point: -```cpp -if (sqrt(pow(test_point1.x()-test_point.x(), 2) + - pow(test_point1.y()-test_point.y(), 2) + - pow(test_point1.z()-test_point.z(), 2)) < 1.5*units::cm) { - // Calculate projected distance - double temp_dis = (test_point1.x() - earliest_p.x()) * dir_earlp.x() + - (test_point1.y() - earliest_p.y()) * dir_earlp.y() + - (test_point1.z() - earliest_p.z()) * dir_earlp.z(); - temp_dis = (-1) * temp_dis; - - // Track minimum and maximum distances - if (temp_dis < min_dis) min_dis = temp_dis; - if (temp_dis > max_dis) max_dis = temp_dis; -} -``` - -### 5. Final Decision -```cpp -if ((max_dis - min_dis) > 2.5*units::cm) - return true; -``` -- Returns true if the range of projected distances exceeds 2.5cm -- This indicates consistent parallel structure between clusters - -## Detailed Analysis of Key Components - -### 1. Point Sampling Strategy -- Samples 15 points (-5 to 9) along the projected direction -- Each point is spaced 2cm apart -- This covers a total range of 30cm for analysis - -### 2. Projection Mathematics -The projection calculation uses vector arithmetic to: -1. Project points along the early direction vector -2. Scale the projection by the initial distance -3. Add incremental offsets for sampling - -### 3. Distance Metrics -The algorithm uses two types of distances: -1. Perpendicular Distance: -```cpp -sqrt(pow(test_point1.x()-test_point.x(), 2) + - pow(test_point1.y()-test_point.y(), 2) + - pow(test_point1.z()-test_point.z(), 2)) -``` -- Must be less than 1.5cm to consider points matching - -2. Projected Distance: -```cpp -(test_point1.x() - earliest_p.x()) * dir_earlp.x() + -(test_point1.y() - earliest_p.y()) * dir_earlp.y() + -(test_point1.z() - earliest_p.z()) * dir_earlp.z() -``` -- Measures distance along the projection direction -- Used to determine extent of parallel overlap - -### 4. Parallel Structure Detection -The algorithm identifies parallel structures by: -1. Finding points in cluster_2 that closely match projected points -2. Measuring the extent of these matching points along the projection -3. Requiring a minimum extent (2.5cm) to confirm parallel structure - -## Key Features -1. Robust Point Sampling - - Wide sampling range (-5 to 9 points) - - Fine-grained spacing (2cm) - - Bidirectional sampling around reference point - -2. Multi-level Distance Checks - - Initial proximity check (length_cut) - - Point-to-point matching threshold (1.5cm) - - Parallel extent requirement (2.5cm) - -3. Vector-based Projection - - Uses direction vector for consistent projection - - Maintains spatial relationships in 3D - - Accounts for cluster orientation - -## Usage Context -This function is specialized for: -- Detecting parallel cluster segments -- Verifying consistent spatial relationships -- Identifying structures that should be merged -- Handling specific detector geometry cases - -The algorithm is particularly useful in cases where: -- Clusters may be broken into parallel segments -- Detector effects create parallel track artifacts -- Reconstruction requires merging parallel structures - - - -# Analysis of Clustering_4th_reg Algorithm - -## Overview -`Clustering_4th_reg` is a regular clustering function that determines whether two clusters should be merged based on their spatial relationships and geometric properties. It uses a sophisticated set of criteria including distances, angles, and direction vectors. - -## Function Signature -```cpp -bool Clustering_4th_reg( - const Cluster& cluster_1, - const Cluster& cluster_2, - double length_1, double length_2, - geo_point_t p1, double length_cut -) -``` - -## Core Algorithm - -### 1. Initial Distance Checks -```cpp -auto temp_results = cluster_2.get_closest_point_blob(p1); -geo_point_t p2 = temp_results.first; -geo_point_t diff = p1 - p2; -double dis1 = diff.magnitude(); - -temp_results = cluster_1.get_closest_point_blob(p2); -p1 = temp_results.first; -diff = p1 - p2; -double dis = diff.magnitude(); -``` -- Finds closest points between clusters -- Calculates two distances: - - `dis1`: Initial distance from p1 to cluster_2 - - `dis`: Refined distance after finding best matching points - -### 2. Special Case Rejection -```cpp -if (dis1 > 15*units::cm && dis < 3*units::cm && - length_2 > 80*units::cm && length_1 > 80*units::cm) - return false; -``` -- Rejects cases where: - - Initial distance is large (>15cm) - - Refined distance is very small (<3cm) - - Both clusters are long (>80cm) -- This prevents merging of likely unrelated long clusters - -### 3. Main Clustering Logic -The function has two main paths based on cluster properties: - -#### Path 1: Long Clusters with Valid Distance -```cpp -if (dis < length_cut && (length_2 >= 40*units::cm || dis < 3*units::cm)) -``` -For longer clusters or very close pairs: - -1. Calculate average positions: -```cpp -geo_point_t cluster1_ave_pos = cluster_1.calc_ave_pos(p1, 5*units::cm); -geo_point_t cluster2_ave_pos = cluster_2.calc_ave_pos(p2, 5*units::cm); -``` - -2. Determine direction vectors using adaptive radius: -```cpp -if (cluster_1.nnearby(cluster1_ave_pos, 30*units::cm) > 50 && length_1 < 120*units::cm) { - dir1 = cluster_1.vhough_transform(cluster1_ave_pos, 30*units::cm); -} else { - dir1 = cluster_1.vhough_transform(cluster1_ave_pos, 80*units::cm); -} -``` -- Uses smaller radius (30cm) for dense, shorter clusters -- Uses larger radius (80cm) for sparse or longer clusters - -3. Check for directional consistency: -- If clusters point away from each other: -```cpp -if (dir2.angle(dir1) > 3.1415926/2.) { - // Check points along dir1 - // Look for consistent spacing -} -``` -- If clusters point toward each other: -```cpp -if (dir2.angle(dir3) < 3.1415926/2.) { - // Check points along dir3 - // Look for consistent spacing -} -``` - -#### Path 2: Short Clusters or Large Distance -```cpp -else if (dis < 2 * length_cut && length_2 < 40*units::cm) -``` -For shorter clusters or larger distances: - -1. Check for parallel alignment: -```cpp -double angle1 = fabs(dir2.angle(drift_dir)-3.1415926/2.)/3.1415926*180.; -if (angle1 < 5 && dis < 2*length_cut || angle1 < 2) - flag_para = true; -``` - -2. Check for prolonged alignment: -```cpp -if (angle2 < 7.5 || angle3 < 7.5) - flag_prol = true; -``` - -3. Apply specific criteria based on flags: -- For parallel cases: -```cpp -if (flag_para && fabs(dir3.angle(drift_dir)-3.141592/2.) < 10/180.*3.1415926) { - if (angle4 < 30 && (length_2 < 12*units::cm && fabs(angle5-90.) > 30 || angle5 < 45)) - return true; -} -``` -- For prolonged cases: -```cpp -if (flag_prol) { - if (angle4 < 25 && (length_2 < 15*units::cm && fabs(angle5-90.) > 30 || angle5 < 25)) - return true; -} -``` - -### 4. Non-Parallel Case Analysis -```cpp -if (fabs(dir2.angle(drift_dir)-3.1415926/2.)/3.1415926*180. > 7.5) { - if (is_angle_consistent(dir1, dir2, false, 10, angle_u, angle_v, angle_w, 2)) { - // Additional checks for short clusters and angle consistency - } -} -``` -- Performs special checks for clusters not parallel to drift direction -- Uses wire angles for additional geometric validation - -## Key Features -1. Adaptive radius selection based on cluster density -2. Multiple geometric criteria for different cluster configurations -3. Special handling of parallel and prolonged cases -4. Direction consistency checks using drift direction -5. Wire angle validation for non-parallel cases - -## Usage Context -This function is part of a complex clustering system used in particle detector reconstruction, specifically designed to: -- Handle regular clustering cases (not dead regions) -- Merge clusters that show geometric consistency -- Avoid false mergers through multiple validation criteria -- Account for detector geometry (wire angles and drift direction) - -The algorithm uses different strategies for long vs. short clusters and includes special cases for parallel and prolonged configurations, making it highly adaptable to various cluster geometries. - - -# Analysis of Find_Closest_Points Algorithm - -## Overview -`Find_Closest_Points` finds the closest points between two clusters using an iterative approach. It tries two different starting points to ensure it finds the globally closest points between the clusters. - -## Function Signature -```cpp -double Find_Closest_Points( - const Cluster& cluster1ref, - const Cluster& cluster2ref, - double length_1, - double length_2, - double length_cut, - geo_point_t& p1_save, // Output parameter - geo_point_t& p2_save, // Output parameter - bool flag_print // Debug printing -) -``` - -## Core Algorithm - -### 1. Initial Setup and Cluster Ordering -```cpp -bool swapped = false; -if (length_1 >= length_2) { - swapped = true; - std::swap(cluster1, cluster2); - std::swap(length_1, length_2); -} -``` -- The algorithm swaps clusters if needed to ensure cluster1 is shorter than cluster2 -- This standardizes the process regardless of input order -- The final points are swapped back at the end if a swap occurred - -### 2. Input Validation -```cpp -if (!cluster1->nchildren() || !cluster2->nchildren()) { - raise("Find_Closest_Points: given empty cluster"); -} -``` -- Checks that neither cluster is empty - -### 3. Two-Pass Search -The algorithm makes two passes, each using a different starting point: - -#### First Pass (Starting from First Blob) -```cpp -mcell1 = cluster1->get_first_blob(); -p1 = mcell1->center_pos(); -``` -1. Starts from the first blob of cluster1 -2. Iteratively finds closest points until convergence: -```cpp -while (mcell1 != prev_mcell1 || mcell2 != prev_mcell2) { - prev_mcell1 = mcell1; - prev_mcell2 = mcell2; - - // Find closest point in cluster2 to p1 - auto temp_results = cluster2->get_closest_point_blob(p1); - p2 = temp_results.first; - mcell2 = temp_results.second; - - // Find closest point in cluster1 to p2 - temp_results = cluster1->get_closest_point_blob(p2); - p1 = temp_results.first; - mcell1 = temp_results.second; -} -``` - -#### Second Pass (Starting from Last Blob) -```cpp -mcell1 = cluster1->get_last_blob(); -p1 = mcell1->center_pos(); -``` -1. Starts from the last blob of cluster1 -2. Uses the same iterative process as the first pass -3. Updates the saved points if a closer pair is found - -### 4. Convergence Process -- For each pass: - 1. Find closest point in cluster2 to current point in cluster1 - 2. Find closest point in cluster1 to found point in cluster2 - 3. Repeat until the points stop moving (convergence) - 4. Keep track of closest points found so far - -### 5. Distance Calculation and Result -```cpp -geo_point_t diff = p1 - p2; -dis = diff.magnitude(); - -if (dis < dis_save) { - dis_save = dis; - p1_save = p1; - p2_save = p2; -} -``` -- Calculates the distance between each pair of points -- Keeps track of the minimum distance found -- Updates the saved points when a closer pair is found - -### 6. Final Output -```cpp -if (swapped) { - std::swap(p1_save, p2_save); -} -return dis_save; -``` -- If clusters were swapped initially, swaps the points back -- Returns the minimum distance found - -## Key Features -1. Two-pass approach to avoid local minima - - One pass starting from first blob - - One pass starting from last blob - -2. Iterative convergence - - Alternates between clusters until finding stable points - - Handles complex cluster geometries - -3. Robust handling of cluster ordering - - Standardizes processing by ensuring consistent order - - Preserves original order in output - -4. Distance optimization - - Keeps track of global minimum distance - - Updates points only when better ones are found - -## Usage Context -This function is fundamental to many clustering operations because it: -- Provides a reliable measure of cluster proximity -- Identifies the actual points where clusters are closest -- Supports higher-level clustering decisions -- Handles complex cluster geometries through its iterative approach - -The algorithm is particularly useful in particle physics detector data processing where accurate spatial relationships between clusters need to be determined for reconstruction purposes. - - - -# Analysis of Clustering_4th_dead Algorithm - -## Overview -The `Clustering_4th_dead` function determines whether two clusters should be merged based on their geometric properties and spatial relationships. It's specifically designed to handle "dead" regions in the detector. - -## Key Parameters -- `cluster_1`, `cluster_2`: The two clusters being compared -- `length_1`, `length_2`: Lengths of the respective clusters -- `length_cut`: Maximum allowed distance for clustering -- `num_dead_try`: Number of attempts to find valid clustering points (default: 3) - -## Core Algorithm Flow - -### 1. Initial Distance Check -```cpp -double dis = Find_Closest_Points(cluster_1, cluster_2, length_1, length_2, length_cut, p1, p2); -``` -The function first finds the closest points between the two clusters. The clusters are considered for merging if either: -- The distance is less than `length_cut` -- The second cluster is longer than 50cm and distance is less than 80cm - -### 2. Multiple Attempt Analysis -The algorithm makes up to `num_dead_try` attempts (default 3) to validate the clustering: - -#### Attempt 1 (i==0): -- Calculates average positions around the closest points using 5cm radius -- Computes direction vectors using either: - - 20cm radius if `num_dead_try==1` - - 80cm radius otherwise -- Stores these initial calculations for later attempts - -#### Attempt 2 (i==1): -- Only proceeds if length_2 ≥ 15cm and not a special case (length_2 > 150cm with dis < 15cm) -- Uses the initial cluster_1 position/direction -- Tries to find a matching point in cluster_2 along the opposite direction - -#### Attempt 3 (i==2): -- Similar to attempt 2 but starts from cluster_2 and looks for matches in cluster_1 - -### 3. Geometric Analysis -For each attempt, the algorithm performs several geometric checks: - -1. Non-parallel Case Analysis: -```cpp -if (fabs(ave_dir.angle(drift_dir)-3.1415926/2.)/3.1415926*180.>7.5) { - // Checks angle consistency between direction vectors -} -``` - -2. Angle Analysis: -```cpp -double angle1 = (3.1415926-dir1.angle(dir2))/3.1415926*180.; // Angle between dir1 and connection vector -double angle2 = dir3.angle(dir2)/3.1415926*180.; // Angle between dir3 and connection vector -double angle3 = (3.1415926-dir1.angle(dir3))/3.1415926*180.; // Angle between cluster directions -``` - -### 4. Clustering Conditions - -#### For Short Clusters (≤10cm): -```cpp -if (length_2 <= 10*units::cm) { - if (angle1 < 15 && (angle2 < 60 || length_2 < 5*units::cm)) - return true; -} -``` - -#### For Longer Clusters: -```cpp -if (angle1 < 15 && angle2 < 15 && angle3 < 25 || - angle3 < 10 && (angle1+angle2) < 45 && dis < 5*units::cm) - return true; -``` - -#### Additional Proximity Test -For distances under 30cm, performs an additional test by sampling points along the cluster directions to verify consistent spacing. - -## Key Features -1. Multiple validation attempts to ensure robust clustering -2. Different criteria for short vs long clusters -3. Consideration of both spatial proximity and directional alignment -4. Special handling of non-parallel cases -5. Protection against false positives through multiple geometric constraints - -## Usage Context -This function is part of a larger clustering system used in particle physics detector data processing, specifically designed to: -- Handle dead regions in the detector -- Connect cluster fragments that belong together -- Maintain geometric consistency in the reconstruction -- Avoid false mergers through multiple validation steps - -The algorithm is particularly conservative with short clusters (<10cm) and becomes more permissive with longer clusters when there's strong directional agreement. - - -The key insight is that this algorithm uses a multi-stage approach to determine if clusters should be merged, with different criteria based on cluster lengths and geometric relationships. It's particularly focused on handling dead regions in the detector while being careful to avoid false mergers through multiple validation steps and geometric constraints. - diff --git a/clus/docs/Clustering_live_dead.md b/clus/docs/Clustering_live_dead.md deleted file mode 100644 index 552331650..000000000 --- a/clus/docs/Clustering_live_dead.md +++ /dev/null @@ -1,114 +0,0 @@ -I'll create a detailed explanation of the `clustering_live_dead` function's algorithm and logic. - - - -```mermaid -flowchart TD - A[Start] --> B[Sort live and dead clusters] - B --> C[Map dead clusters to live clusters] - - subgraph MapCreation["Dead to Live Mapping"] - C --> D[For each live cluster] - D --> E[For each dead cluster] - E --> F{Are clusters connected?} - F -->|Yes| G[Add to mapping] - F -->|No| H[Skip] - G --> I[Store connecting blobs] - H --> E - E --> D - end - - subgraph GraphConstruction["Graph Construction"] - J[Create connectivity graph] --> K[For each dead cluster with multiple lives] - K --> L[For each pair of live clusters] - L --> M{Is pair already tested?} - M -->|No| N[Calculate closest points] - N --> O{Should clusters merge?} - O -->|Yes| P[Add edge to graph] - O -->|No| Q[Skip pair] - M -->|Yes| Q - Q --> L - end - - MapCreation --> GraphConstruction - GraphConstruction --> R[Merge clusters based on graph] - R --> S[End] - - subgraph MergeCriteria["Merge Decision Logic"] - direction LR - MC1[Distance < 60cm] --> MC2{Check lengths} - MC2 --> MC3[Both short <= 12cm] - MC2 --> MC4[One short <= 12cm] - MC2 --> MC5[Both long > 12cm] - MC3 --> MC6[Apply short-short criteria] - MC4 --> MC7[Apply long-short criteria] - MC5 --> MC8[Apply long-long criteria] - end - -``` - -The `clustering_live_dead` function implements a sophisticated clustering algorithm that connects "live" clusters through their relationships with "dead" clusters. Here's a detailed breakdown of how it works: - -1. **Initial Setup and Sorting**: - - Takes live clusters, dead clusters, and parameters as input - - Sorts both live and dead clusters for consistent processing - - Creates data structures to map relationships between clusters - -2. **Mapping Dead to Live Clusters**: - - For each live-dead cluster pair: - - Checks if they are connected using `is_connected()` method - - If connected, stores the relationship and the connecting blobs - - Creates a mapping from dead clusters to their connected live clusters - -3. **Graph Construction for Merging**: - - Creates an undirected graph where: - - Vertices represent live clusters - - Edges represent mergeable relationships - - For each dead cluster with multiple connected live clusters: - - Examines each pair of connected live clusters - - Determines if they should be merged based on complex criteria - -4. **Merge Criteria** (Key geometric checks): - - Distance between clusters must be < 60cm - - Analyzes cluster directions using: - - Local direction (vhough_transform) - - Connection direction between clusters - - Drift direction considerations - -5. **Length-Based Merge Rules**: - - Different criteria based on cluster lengths: - - Short clusters (≤ 12cm) - - Long clusters (> 12cm) - - Mixed length pairs - - More stringent angle requirements for longer clusters - -6. **Specific Merge Conditions**: - ```cpp - For short-short pairs (both ≤ 12cm): - - Very close (≤ 3cm): Relaxed angle requirements - - Medium distance (≤ 15cm): Stricter angle alignment - - Far distance (≤ 60cm): Very strict angle alignment - - For long-short pairs (one > 12cm): - - Stricter criteria for the longer cluster - - More emphasis on direction consistency - - For long-long pairs (both > 12cm): - - Most stringent angle and alignment requirements - - Must show strong directional consistency - ``` - -7. **Final Merging**: - - Uses the constructed graph to determine final cluster groupings - - Calls `merge_clusters()` to combine the connected clusters - - Updates the `cluster_connected_dead` set with affected clusters - -The algorithm is particularly sophisticated in how it handles: -- Parallel track detection and merging -- Different criteria based on cluster lengths -- Multiple geometric checks (distance, angle, direction) -- Protection against incorrect merging through strict validation - -The code also includes extensive debug capabilities (commented out in the source) for analyzing merge decisions and cluster relationships. - -Would you like me to elaborate on any specific aspect of the algorithm or create additional visualizations for particular components? \ No newline at end of file diff --git a/clus/docs/Clustering_neutrino.md b/clus/docs/Clustering_neutrino.md deleted file mode 100644 index 355d06b6d..000000000 --- a/clus/docs/Clustering_neutrino.md +++ /dev/null @@ -1,159 +0,0 @@ -I'll analyze the clustering_neutrino function and explain its logic and algorithm in detail. - - -```mermaid -flowchart TD - A[Start] --> B[Sort clusters by length] - B --> C[Filter clusters] - C --> D{For each cluster pair} - D --> E[Calculate closest distances] - E --> F[Create extended point clouds] - F --> G{Check merge conditions} - G -->|Yes| H[Add to merge pairs] - G -->|No| D - H --> I[Build connectivity graph] - I --> J[Merge connected clusters] - J --> K[End] - - subgraph Filtering - C1[Check x boundaries] - C2[Check length > 6cm] - C3[Check box boundaries] - C1 --> C2 --> C3 - end - - subgraph Merge Conditions - M1[Check distance thresholds] - M2[Check angles] - M3[Check PCA values] - M4[Check cluster properties] - M1 --> M2 --> M3 --> M4 - end - -``` - -```python -def clustering_neutrino(live_grouping, num_try, low_limit, high_limit): - """ - Algorithm to cluster neutrino-related point clouds with the following steps: - 1. Initial filtering and sorting - 2. Extended point cloud creation - 3. Merge condition checking - 4. Graph-based cluster merging - """ - # 1. Sort and filter clusters - clusters = sort_clusters_by_length(live_grouping.children()) - contained_clusters = [] - candidate_clusters = [] - - for cluster in clusters: - # Filter clusters based on boundaries - if not (low_limit < cluster.earliest_point.x < high_limit and - cluster.length > 6.0 cm): - continue - - # Check box boundaries (y, z coordinates) - if check_boundary_points(cluster): - if len(saved_boundary_points) <= 1: - candidate_clusters.append(cluster) - contained_clusters.append(cluster) - else: - contained_clusters.append(cluster) - - # 2. Find closest clusters - cluster_distances = {} - for cluster1, cluster2 in combinations(clusters, 2): - distance = calculate_closest_distance(cluster1, cluster2) - update_cluster_distance_map(cluster_distances, cluster1, cluster2, distance) - - # 3. Create extended point clouds and check merge conditions - to_be_merged_pairs = set() - used_clusters = set() - - for cluster1 in candidate_clusters: - for cluster2 in contained_clusters: - if cluster1 == cluster2: - continue - - # Create extended point clouds if not exists - cloud1 = get_or_create_extended_cloud(cluster1) - cloud2 = get_or_create_extended_cloud(cluster2) - - # Check various merge conditions: - # - Distance thresholds - # - Angle between directions - # - PCA values - # - Cluster properties (length, points) - if should_merge(cluster1, cluster2, cloud1, cloud2): - if validate_merge_type(cluster1, cluster2, used_clusters): - to_be_merged_pairs.add((cluster1, cluster2)) - update_used_clusters(used_clusters, cluster1, cluster2) - - # 4. Build connectivity graph and merge clusters - graph = build_connectivity_graph(to_be_merged_pairs) - merge_clusters(graph, live_grouping) - -``` - -The `clustering_neutrino` function implements a sophisticated clustering algorithm specifically designed for neutrino detector data. Here's a detailed explanation of its key components: - -1. **Initial Sorting and Filtering**: - - Clusters are first sorted by length in descending order - - Each cluster must pass basic criteria: - - X coordinates within specified limits (low_limit to high_limit) - - Minimum length of 6 cm - - Specific boundary conditions for y and z coordinates - -2. **Extended Point Cloud Creation**: - - For each cluster pair being considered, the algorithm creates "extended" point clouds - - These extended clouds include: - - Original cluster points - - Projected points along principal directions - - Points based on PCA (Principal Component Analysis) axes - - The extension helps in better understanding potential connections between clusters - -3. **Merge Conditions**: - The algorithm uses several sophisticated criteria to decide if clusters should be merged: - - a) **Distance-based conditions**: - - Close proximity between clusters (typically < 80 cm) - - Special handling for clusters > 80 cm in length - - Consideration of closest point distances - - b) **Directional conditions**: - - Angle checks between cluster directions - - Vertical direction alignment checks - - Beam direction considerations - - c) **PCA-based conditions**: - - Checks on PCA eigenvalues ratios - - Special handling for clusters with specific PCA characteristics - -4. **Merge Process**: - - Uses a graph-based approach for merging - - Builds an undirected graph where: - - Vertices represent clusters - - Edges represent merge relationships - - Final merging is done using connected components of the graph - -5. **Special Considerations**: - - Handles very long tracks (> 150 cm) differently - - Special cases for clusters near detector boundaries - - Protection against over-clustering - - Consideration of time slice width for temporal coherence - -The algorithm is particularly sophisticated in how it handles different cluster geometries and relationships, making it suitable for neutrino detector data where tracks can have complex topologies. - -Key parameters that affect the clustering: -- `num_try`: Controls number of clustering attempts -- `low_limit` and `high_limit`: Define spatial boundaries -- Various distance thresholds (e.g., 15 cm, 30 cm, etc.) -- Angular thresholds for direction checks -- PCA value ratios for shape analysis - -The algorithm is designed to be robust against: -- Noise in the detector -- Track splitting -- Complex event topologies -- Detector boundary effects - diff --git a/clus/docs/Clustering_parallel_prolong.md b/clus/docs/Clustering_parallel_prolong.md deleted file mode 100644 index 10c872c24..000000000 --- a/clus/docs/Clustering_parallel_prolong.md +++ /dev/null @@ -1,379 +0,0 @@ -I'll explain the `clustering_parallel_prolong` function which appears to be a higher-level function for managing the parallel and prolonged track clustering process. - - - -```mermaid -flowchart TD - A[Start] --> B[Create Boost Graph] - B --> C[Map Cluster Indices] - C --> D[Initialize Vertex Map] - D --> E["Loop Through All Cluster Pairs"] - - E --> F{"Check Each Pair (i,j)"} - F --> G{Call Clustering_2nd_round} - - G -->|True| H[Add Edge to Graph] - G -->|False| I[Skip Pair] - - H --> J[Next Pair] - I --> J - - J -->|More Pairs| F - J -->|Done| K[Merge Connected Components] - - K --> L[End] - -``` - -```svg - - - Cluster Graph Structure - - - - - - C1 - C2 - C3 - - - - - - - - Connected Component - - - - Cluster Vertex - - Mergeable Connection - - Final Merged Group - - -``` - -```cpp -// Main clustering function for parallel and prolonged tracks -void clustering_parallel_prolong( - Grouping& live_grouping, // Contains all live clusters - cluster_set_t& cluster_connected_dead, // Tracks connected dead clusters - const double length_cut // Distance threshold for merging -) { - // Step 1: Initialize Boost Graph - typedef cluster_connectivity_graph_t Graph; // Undirected graph type - Graph g; // Graph to track cluster connectivity - - // Step 2: Create mappings - std::unordered_map ilive2desc; // Maps cluster index to graph vertex - std::map map_cluster_index; // Maps cluster pointer to index - - // Step 3: Get live clusters and create vertices - const auto& live_clusters = live_grouping.children(); - for (size_t ilive = 0; ilive < live_clusters.size(); ++ilive) { - const auto& live = live_clusters[ilive]; - // Create mapping from cluster to index - map_cluster_index[live] = ilive; - // Add vertex to graph with cluster index - ilive2desc[ilive] = boost::add_vertex(ilive, g); - } - - // Step 4: Build graph edges - for (size_t i = 0; i != live_clusters.size(); i++) { - auto cluster_1 = live_clusters.at(i); - // Compare with all clusters after current one - for (size_t j = i + 1; j < live_clusters.size(); j++) { - auto cluster_2 = live_clusters.at(j); - - // Check if clusters should be merged using Clustering_2nd_round - if (Clustering_2nd_round(*cluster_1, *cluster_2, - cluster_1->get_length(), - cluster_2->get_length(), - length_cut)) { - // Add edge between mergeable clusters - boost::add_edge( - ilive2desc[map_cluster_index[cluster_1]], - ilive2desc[map_cluster_index[cluster_2]], - g - ); - } - } - } - - // Step 5: Merge connected components - merge_clusters(g, live_grouping, cluster_connected_dead); -} - -``` - -The `clustering_parallel_prolong` function implements a graph-based clustering algorithm: - -1. **Graph Initialization** - - Creates a Boost undirected graph to represent cluster connectivity - - Each vertex represents a cluster - - Edges will represent mergeable cluster pairs - -2. **Mapping Setup** - - Creates two mappings: - - `ilive2desc`: Maps cluster indices to graph vertices - - `map_cluster_index`: Maps cluster pointers to indices - - These mappings help maintain relationships between clusters and graph vertices - -3. **Vertex Creation** - - Iterates through all live clusters - - Creates a vertex for each cluster in the graph - - Establishes mappings for each cluster - -4. **Edge Creation** - - Uses nested loops to compare each pair of clusters - - For each pair, calls `Clustering_2nd_round` to determine if they should be merged - - If clusters should be merged, adds an edge between their vertices - - Key criteria checked by `Clustering_2nd_round`: - - Parallel track detection - - Prolonged track analysis - - Distance thresholds - - Angular relationships - -5. **Final Merging** - - Calls `merge_clusters` to combine connected components - - Connected components in the graph represent groups of clusters that should be merged - - Updates both live clusters and connected dead clusters - -The algorithm is efficient because: -- It only compares each pair of clusters once -- Uses graph structure to handle complex connectivity -- Maintains relationships between original clusters and graph representation - -The graph-based approach allows for: -- Natural representation of cluster relationships -- Efficient handling of transitive relationships -- Easy identification of connected components for merging -- Clean separation of connectivity detection and actual merging - - - - -I'll create an organized explanation using artifacts to better visualize and explain this complex algorithm of `Clustering_2nd_round` - - - -```mermaid -flowchart TD - A[Start] --> B{Length Check} - B -->|Both < 10cm| C[Return False] - B -->|At least one >= 10cm| D[Find Closest Points] - - D --> E{Distance Check} - E -->|dis < length_cut| F[Parallel Analysis] - E -->|dis < 80cm & sum > 50cm & both > 15cm| F - E -->|else| C - - F --> G{Perpendicular to Drift?} - G -->|No| H[Prolonged Analysis] - G -->|Yes| I[Check U/V Wire Angles] - - I --> J{U Wire Parallel?} - J -->|Yes| K[Check U Criteria] - J -->|No| L{V Wire Parallel?} - - L -->|Yes| M[Check V Criteria] - L -->|No| H - - K --> N{Meets U Conditions?} - M --> O{Meets V Conditions?} - - H --> P{Meets Prolonged Criteria?} - - N -->|Yes| Q[Return True] - N -->|No| C - - O -->|Yes| Q - O -->|No| C - - P -->|Yes| Q - P -->|No| C - -``` - -```svg - - - - - X (Drift) - Y - - - - U Wire - - - - V Wire - - - - Cluster 1 - - - Cluster 2 - - - - distance - - - - θ - - -``` - -```cpp -// Main clustering decision function -bool Clustering_2nd_round(const Cluster& cluster1, const Cluster& cluster2, - double length_1, double length_2, double length_cut) { - // Step 1: Basic length validation - if (length_1 < 10*units::cm && length_2 < 10*units::cm) { - return false; // Both clusters too short to consider - } - - // Step 2: Find closest points between clusters - geo_point_t p1, p2; - double distance = Find_Closest_Points(cluster1, cluster2, length_1, length_2, - length_cut, p1, p2); - - // Step 3: Basic distance criteria - bool basic_distance_check = ( - distance < length_cut || // Close enough by default threshold - (distance < 80*units::cm && // OR moderately distant but long tracks - length_1 + length_2 > 50*units::cm && - length_1 > 15*units::cm && - length_2 > 15*units::cm) - ); - - if (!basic_distance_check) { - return false; - } - - // Step 4: Parallel track analysis - if (length_1 > 10*units::cm && length_2 > 10*units::cm) { - // Calculate vectors and angles - geo_point_t connection_vector(p2.x() - p1.x(), - p2.y() - p1.y(), - p2.z() - p1.z()); - - // Check if perpendicular to drift direction - double drift_angle = connection_vector.angle(drift_dir); - if (is_perpendicular_to_drift(drift_angle)) { - // Analyze U wire alignment - if (is_aligned_with_U_wire(connection_vector)) { - return check_U_wire_criteria(cluster1, cluster2, p1, p2); - } - - // Analyze V wire alignment - if (is_aligned_with_V_wire(connection_vector)) { - return check_V_wire_criteria(cluster1, cluster2, p1, p2); - } - } - } - - // Step 5: Prolonged track analysis - return check_prolonged_criteria(cluster1, cluster2, p1, p2); -} - -// Helper function for U-wire parallel case -bool check_U_wire_criteria(const Cluster& c1, const Cluster& c2, - const geo_point_t& p1, const geo_point_t& p2) { - // Get cluster directions using Hough transform - geo_point_t dir1 = c1.vhough_transform(p1, 60*units::cm); - geo_point_t dir2 = c2.vhough_transform(p2, 60*units::cm); - - // Multiple criteria for different cases - if (is_short_distance_case(p1, p2)) { - return check_short_distance_criteria(dir1, dir2); - } - - if (is_medium_length_case(c1, c2)) { - return check_medium_length_criteria(dir1, dir2); - } - - // Long track specific criteria - return check_long_track_criteria(dir1, dir2, p1, p2); -} - -``` - -This algorithm is complex and operates in several steps: - -1. **Initial Filtering** (shown in flow diagram): - - Rejects pairs where both clusters are shorter than 10cm - - Finds closest points between clusters - -2. **Geometric Analysis** (shown in SVG): - - Analyzes angles relative to three wire directions (U, V, W) - - Considers drift direction (X-axis) - - Measures distances between cluster endpoints - -3. **Decision Logic** (shown in code): - - Multiple criteria for different scenarios - - Special handling for parallel tracks - - Different thresholds based on track lengths - -Let me break down the `Clustering_2nd_round` function, which appears to be part of a particle physics detector clustering algorithm that determines whether two clusters should be merged based on their geometric properties. - -Key Logic: -1. Initial Length Filter: -```cpp -if (length_1 < 10*units::cm && length_2 < 10*units::cm) return false; -``` -- Rejects pairs where both clusters are shorter than 10cm - -2. Main Distance Check: -```cpp -double dis = Find_Closest_Points(cluster1, cluster2, length_1, length_2, length_cut, p1, p2); -``` -- Finds closest points between clusters and their distance - -3. First Merge Criteria: -```cpp -if ((dis < length_cut || (dis < 80*units::cm && length_1 + length_2 > 50*units::cm && length_1>15*units::cm && length_2 > 15*units::cm))) -``` -- Merges if either: - - Distance is less than length_cut (default 35cm) - - OR if clusters are long (>15cm each, sum >50cm) and relatively close (<80cm) - -4. Parallel Track Analysis: -The function then does detailed analysis for parallel tracks by: -- Checking if tracks are perpendicular to drift direction (X-axis) -- Analyzing angles relative to U and V wire directions -- Using Hough transforms to get cluster directions - -5. Key Parallel Cases: -- U-wire parallel case: -```cpp -if ((fabs(angle2-3.1415926/2.)<7.5/180.*3.1415926 || (fabs(angle2-3.1415926/2.)<15/180.*3.1415926)&&dis <6*units::cm)) -``` -- V-wire parallel case: -```cpp -if ((fabs(angle3-3.1415926/2.)<7.5/180.*3.1415926 || (fabs(angle3-3.1415926/2.)<15/180.*3.1415926)&&dis <6*units::cm)) -``` - -6. Prolonged Track Analysis: -- Checks for tracks that might be prolongations of each other -- Analyzes angles with respect to U, V, and W wire directions -- Special handling for very close tracks (<5cm) - -The algorithm appears designed to handle several specific cases in particle tracking: -1. Parallel tracks that might be segments of the same particle -2. Extended tracks that should be joined -3. Tracks with small gaps that should be connected -4. Special cases for different wire orientations (U, V, W) - -The function uses multiple geometric criteria and angle checks to make decisions, with different thresholds for different scenarios. It's particularly careful about long tracks (>60cm) and has special handling for shorter tracks with different distance thresholds. - -The overall goal seems to be to join track segments that are likely from the same particle while avoiding false merges of truly separate tracks. This is achieved through careful geometric analysis and multiple validation steps for different track configurations. \ No newline at end of file diff --git a/clus/docs/Clustering_protect_overclustering.md b/clus/docs/Clustering_protect_overclustering.md deleted file mode 100644 index bd5eab4ff..000000000 --- a/clus/docs/Clustering_protect_overclustering.md +++ /dev/null @@ -1,238 +0,0 @@ -Let me explain the logic and algorithm of `Examine_overclustering` using a diagram and detailed breakdown. - - - -```mermaid -stateDiagram-v2 - [*] --> InitializeGraph: Create MCUGraph - - InitializeGraph --> BuildPointMaps: Create maps for each plane - note right of BuildPointMaps - Maps blob->wire->points - for each plane (u,v,w) - end note - - BuildPointMaps --> CreateIntraBlobEdges: Process points within blobs - note right of CreateIntraBlobEdges - Connect points inside - same blob based on - wire intervals - end note - - CreateIntraBlobEdges --> CreateInterBlobEdges: Process connected blobs - note right of CreateInterBlobEdges - Connect points between - adjacent time slices - and overlapping blobs - end note - - CreateInterBlobEdges --> FindComponents: Run connected components - - FindComponents --> SingleComponent: Only 1 component - FindComponents --> MultipleComponents: Multiple components - - SingleComponent --> [*]: Return empty map - - MultipleComponents --> CreatePointClouds: Create point cloud per component - - CreatePointClouds --> AnalyzeConnectivity: Analyze component connectivity - note right of AnalyzeConnectivity - - Find closest points - - Check MST - - Verify paths - - Add new edges - end note - - AnalyzeConnectivity --> RecomputeComponents: Recompute connected components - - RecomputeComponents --> NoSplit: Still single component - RecomputeComponents --> Split: Multiple components remain - - NoSplit --> [*]: Return empty map - Split --> [*]: Return separated clusters - -``` - -Let me explain the key steps of this algorithm: - -1. **Graph Initialization**: - - Creates a boost graph with N vertices, where N is the number of points in the cluster - - Each vertex represents a 3D point in the cluster - -2. **Point Mapping**: - - Creates maps that associate blobs (detector regions) with wire indices and point indices for each plane (u,v,w) - - This allows efficient lookup of points that share wire intersections - -3. **Intra-Blob Edge Creation**: - - For each blob, connects points within it based on wire intervals - - Uses maximum and minimum wire intervals as constraints - - Calculates distances between points and adds edges to the graph with distance weights - -4. **Inter-Blob Edge Creation**: - - Identifies connected blobs: - - Blobs in the same time slice that overlap - - Blobs in adjacent time slices that overlap - - Creates edges between points in connected blobs - - Uses closest point pairs between blobs - - Stores distances for edge weights - -5. **Component Analysis**: - - Runs connected components algorithm on the graph - - If only one component is found, returns empty (no overclustering) - - If multiple components are found, proceeds with detailed analysis - -6. **Detailed Component Analysis** (for multiple components): - - Creates point clouds for each component - - Analyzes connectivity between components: - - Finds closest points between components - - Creates Minimum Spanning Tree (MST) - - Checks path validity through detector space - - Adds new edges based on directionality analysis - - Recomputes connected components after adding new edges - -7. **Final Split Decision**: - - If components remain disconnected after detailed analysis: - - Creates new clusters for each component - - Maps points to their new clusters - - If components become connected, returns empty (no overclustering) - -The key innovation in this algorithm is its multi-stage approach: -- First pass with basic connectivity -- Second pass with detailed analysis for potential splits -- Path validation to ensure physically meaningful splits -- Use of MST to find optimal connection points between components - -This algorithm helps prevent overclustering by identifying genuinely separate particle trajectories that may have been incorrectly grouped together, while avoiding splitting truly connected trajectories that may appear disconnected due to detector effects or noise. - -The output is either: -- An empty map (if no valid splits are found) -- A map of new clusters (if valid splits are identified) - -Each split decision is validated using multiple criteria including physical space constraints, wire plane geometry, and temporal relationships between detector hits. - - -```cpp -// Key data structures -struct ClusteringParams { - // Graph creation - using MCUGraph = boost::adjacency_list - boost::vecS, // OutEdgeList: vector for edges - boost::vecS, // VertexList: vector for vertices - boost::undirectedS, // Directionality: undirected - VertexProperties, // Vertex properties: index - EdgeProperties // Edge properties: distance - >; - - // Point mapping structures - using wire_point_map_t = std::map>; // wire_index -> set - using blob_wire_points_map_t = std::map; // blob -> (wire -> points) - - // Distance thresholds (in cm) - static constexpr double CLOSE_DISTANCE_THRESHOLD = 3.0; // Direct connection threshold - static constexpr double PATH_STEP_SIZE = 1.0; // Path validation step size - static constexpr double MAX_CONNECTION_DISTANCE = 80.0; // Maximum allowed connection - static constexpr double STANDARD_DISTANCE = 5.0; // Standard connection threshold - - // Path validation parameters - static constexpr int MAX_BAD_POINTS = 7; // Maximum allowed bad points in path - static constexpr double MAX_BAD_RATIO = 0.75; // Maximum ratio of bad points - - // Time slice parameters - static constexpr int MAX_TIME_GAP = 2; // Maximum allowed time slice gap - static constexpr int MIN_POINTS_LARGE_CLUSTER = 500; // Min points for detailed analysis -}; - -// Key algorithmic steps -class ClusteringAlgorithm { - // Component analysis thresholds - bool shouldPerformDetailedAnalysis(const PointCloud& cloud1, const PointCloud& cloud2) { - return (cloud1.size() > 100 && cloud2.size() > 100 && - (cloud1.size() + cloud2.size()) > 400) || - (cloud1.size() > 500 && cloud2.size() > 500); - } - - // Path validation - bool isValidPath(const Point& p1, const Point& p2, double step_size) { - double distance = p1.distance(p2); - int num_steps = distance / step_size + 1; - int bad_points = 0; - - for (int i = 0; i < num_steps; i++) { - Point test_p = p1.interpolate(p2, (i + 1.0) / num_steps); - if (!grouping->is_good_point(test_p, 0)) { - bad_points++; - if (bad_points > MAX_BAD_POINTS || - (bad_points > 2 && bad_points >= 0.75 * num_steps)) { - return false; - } - } - } - return true; - } - - // Directional analysis parameters - struct DirectionalParams { - static constexpr double SEARCH_RADIUS = 30.0; // cm, for Hough transform - static constexpr double MAX_ANGLE = 7.5; // degrees, for direction matching - static constexpr double TOLERANCE = 3.0; // cm, for point matching - static constexpr double MAX_EXTENSION = 80.0; // cm, maximum extension length - }; -}; - -``` - -Here are the key technical details added to the algorithm explanation: - -1. **Graph Data Structure Details**: - - Uses Boost's adjacency_list with vector storage for both vertices and edges - - Undirected graph with vertex properties (index) and edge properties (distance) - - Efficient lookup through vertex descriptors - -2. **Point Mapping Refinements**: - - Three-plane mapping (u,v,w) with separate wire indices - - Hierarchical maps: blob → wire_index → set - - Optimized for quick lookup of points sharing wire intersections - -3. **Connectivity Analysis Parameters**: - - Close distance threshold: 3cm for direct connections - - Standard distance: 5cm for regular connections - - Maximum connection distance: 80cm for extended searches - - Path validation step size: 1cm for fine-grained checking - -4. **Time Slice Processing**: - - Handles gaps of up to 2 time slices - - Uses nticks_live_slice for time slice spacing - - Processes adjacent slices in groups of t, t+1, t+2 - - Maintains temporal ordering in connectivity analysis - -5. **Component Analysis Thresholds**: - - Detailed analysis triggered for clusters with: - - Both components > 100 points AND combined > 400 points - - OR both components > 500 points - - Uses point cloud KD-trees for efficient nearest neighbor searches - -6. **Path Validation Criteria**: - - Maximum 7 bad points allowed in path - - Bad point ratio threshold: 75% - - Step size: 1.0cm for path checking - - Validates points using grouping->is_good_point() - -7. **Directional Analysis**: - - Search radius: 30cm for Hough transform - - Maximum angle deviation: 7.5 degrees - - Point matching tolerance: 3cm - - Maximum extension length: 80cm - -8. **MST (Minimum Spanning Tree) Analysis**: - - Creates MSTs for both standard and directional connections - - Uses Prim's algorithm with distance weights - - Handles multiple connected components separately - - Validates MST edges with path checking - -This enhanced detail shows how the algorithm balances multiple factors: -- Physical constraints of the detector -- Computational efficiency through appropriate data structures -- Robust validation through multiple criteria -- Flexible handling of different cluster sizes and configurations - -The final output maintains consistency with the detector's physical characteristics while properly separating genuinely distinct clusters that may have been incorrectly merged. \ No newline at end of file diff --git a/clus/docs/Clustering_regular.md b/clus/docs/Clustering_regular.md deleted file mode 100644 index 1b0ebe58c..000000000 --- a/clus/docs/Clustering_regular.md +++ /dev/null @@ -1,331 +0,0 @@ -I'll explain the `clustering_regular` function which handles the high-level clustering logic for groups of clusters. - -```mermaid -stateDiagram-v2 - [*] --> Initialize: Create connectivity graph - - Initialize --> ClusterPairs: Loop through cluster pairs - note right of Initialize - Create graph to track - cluster connectivity - end note - - ClusterPairs --> LengthCheck: For each pair i,j - note right of ClusterPairs - Process all cluster pairs - where j > i - end note - - LengthCheck --> Clustering1st: Both clusters > length_cut - LengthCheck --> SkipPair: Either cluster too short - note right of LengthCheck - Check if clusters meet - minimum length threshold - (10cm or 15cm) - end note - - Clustering1st --> AddEdge: Merge condition met - Clustering1st --> SkipPair: Merge condition not met - note right of Clustering1st - Run detailed geometric - analysis via - Clustering_1st_round() - end note - - AddEdge --> ClusterPairs: Process next pair - note right of AddEdge - Add edge between - clusters in graph - end note - - SkipPair --> ClusterPairs: Process next pair - - ClusterPairs --> MergeClusters: All pairs processed - note right of MergeClusters - Call merge_clusters() - to combine connected - components - end note - - MergeClusters --> [*] - -``` - -Let's analyze the code and logic in detail: - -1. **Function Parameters and Initialization**: -```cpp -void WireCell::PointCloud::Facade::clustering_regular( - Grouping& live_grouping, - cluster_set_t& cluster_connected_dead, // in/out - const double length_cut, // default 45cm - bool flag_enable_extend // enable extended analysis -) -{ - // Internal length threshold depends on extend mode - double internal_length_cut = flag_enable_extend ? 15*units::cm : 10*units::cm; -``` -Key points: -- Takes a group of "live" clusters and already connected "dead" clusters -- `length_cut` determines maximum merge distance (default 45cm) -- `flag_enable_extend` enables more aggressive merging checks -- Sets internal minimum cluster length threshold - -2. **Graph Setup**: -```cpp -typedef cluster_connectivity_graph_t Graph; -Graph g; -std::unordered_map ilive2desc; // live index to graph descriptor -std::map map_cluster_index; -const auto& live_clusters = live_grouping.children(); - -// Create vertices for each cluster -for (size_t ilive = 0; ilive < live_clusters.size(); ++ilive) { - const auto& live = live_clusters.at(ilive); - map_cluster_index[live] = ilive; - ilive2desc[ilive] = boost::add_vertex(ilive, g); -} -``` -Key points: -- Uses Boost Graph Library to track cluster connectivity -- Creates a vertex for each cluster in the graph -- Maintains mappings between: - - Cluster pointers and their indices - - Indices and graph vertex descriptors - -3. **Main Clustering Loop**: -```cpp -for (size_t i=0; i!=live_clusters.size(); i++) { - auto cluster_1 = live_clusters.at(i); - if (cluster_1->get_length() < internal_length_cut) continue; - - for (size_t j=i+1; jget_length() < internal_length_cut) continue; - - if (Clustering_1st_round(*cluster_1, *cluster2, - cluster_1->get_length(), - cluster_2->get_length(), - length_cut, - flag_enable_extend)) { - boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], - ilive2desc[map_cluster_index[cluster2]], g); - } - } -} -``` -Key points: -- Double loop processes each unique pair of clusters once -- Skips clusters shorter than internal_length_cut -- Calls `Clustering_1st_round` to check if clusters should merge -- Adds graph edge when merge condition is met - -4. **Final Merging**: -```cpp -merge_clusters(g, live_grouping, cluster_connected_dead); -``` -Key points: -- Takes connectivity graph and applies merges -- Updates both live clusters and connected dead clusters - -The algorithm works by: - -1. **Preprocessing**: - - Sets up a graph structure to track which clusters should be merged - - Filters out clusters that are too short to consider - - Creates necessary index mappings - -2. **Pair Analysis**: - - Systematically examines each unique pair of clusters - - Uses detailed geometric analysis via Clustering_1st_round - - Records merge decisions in graph structure - -3. **Merge Decisions** are based on: - - Cluster lengths (must exceed minimum threshold) - - Geometric relationships (via Clustering_1st_round) - - Distance between clusters (vs length_cut) - - Extended analysis if enabled - -4. **Graph Building**: - - Each cluster is a vertex - - Edges represent merge decisions - - Connected components in final graph become merged clusters - -5. **Final Processing**: - - Graph is passed to merge_clusters - - Connected components are combined - - Results update both live and dead cluster sets - -This approach has several advantages: -- Efficient handling of multiple clusters -- Consistent merge decisions -- Ability to handle complex merge patterns -- Clean separation between decision logic and merge implementation - -The function acts as a high-level coordinator for the clustering process, delegating detailed geometric analysis to `Clustering_1st_round` while managing the overall merge structure through graph theory concepts. - - - - - - - -I'll break down the detailed logic of `Clustering_1st_round` function step by step. - - -```mermaid -flowchart TB - Start([Start]) --> Init[Get wire angles & find closest points] - Init --> DistCheck{Distance < length_cut?} - DistCheck -->|No| End([Return False]) - DistCheck -->|Yes| FlagCheck[Calculate flags for clustering modes] - - FlagCheck --> ParaCheck{Is Parallel Case?
flag_para} - ParaCheck -->|Yes| ParaProcess[Process Parallel Case:
1. Check U/V wire alignment
2. Check drift direction angles
3. Check distance criteria] - - FlagCheck --> ProlCheck{Is Prolonged Case?
flag_prolong_U/V/W} - ProlCheck -->|Yes| ProlProcess[Process Prolonged Case:
1. Check wire direction alignment
2. Calculate projection angles
3. Verify continuity] - - FlagCheck --> RegCheck{Is Regular Case?
flag_regular} - RegCheck -->|Yes| RegProcess[Process Regular Case:
1. Check distance ≤ 15cm
2. Special case for long clusters] - - ParaProcess --> AngleCheck{Angle Criteria Met?} - ProlProcess --> AngleCheck - RegProcess --> AngleCheck - - AngleCheck -->|No| End - AngleCheck -->|Yes| ExtendCheck{Enable Extended Analysis?} - - ExtendCheck -->|No| Success([Return True]) - ExtendCheck -->|Yes| ExtendProcess[Extended Analysis:
1. Test multiple points
2. Check cluster continuity
3. Verify alignment] - - ExtendProcess --> ExtendResult{Extended Criteria Met?} - ExtendResult -->|Yes| Success - ExtendResult -->|No| End - -``` - -Let's analyze the detailed implementation: - -1. **Initialization and Basic Distance Check**: -```cpp -const auto [angle_u,angle_v,angle_w] = cluster1.grouping()->wire_angles(); -geo_point_t p1, p2; -double dis = Find_Closest_Points(cluster1, cluster2, length_1, length_2, length_cut, p1, p2); -if (dis >= length_cut) return false; -``` - -2. **Flag Calculation and Direction Analysis**: -```cpp -// Calculate average positions and directions -geo_point_t cluster1_ave_pos = cluster1.calc_ave_pos(p1, 5*units::cm); -geo_point_t cluster2_ave_pos = cluster2.calc_ave_pos(p2, 5*units::cm); - -// Calculate direction vectors -geo_point_t dir2_1(p2.x() - p1.x()+1e-9, p2.y() - p1.y()+1e-9, p2.z() - p1.z()+1e-9); -geo_point_t dir2(cluster2_ave_pos.x() - cluster1_ave_pos.x()+1e-9, - cluster2_ave_pos.y() - cluster1_ave_pos.y()+1e-9, - cluster2_ave_pos.z() - cluster1_ave_pos.z()+1e-9); -``` - -3. **Parallel Case Analysis**: -```cpp -// Check if clusters are perpendicular to drift direction -double angle1 = dir2_1.angle(drift_dir); -double angle2 = dir2.angle(drift_dir); - -if (fabs(angle1-3.1415926/2.) < 7.5/180.*3.1415926 || - fabs(angle2-3.1415926/2.) < 7.5/180.*3.1415926) { - flag_para = true; - - // Check alignment with U/V wires - angle3 = dir2_1.angle(U_dir); - angle4 = dir2_1.angle(V_dir); - - if (fabs(angle3-3.1415926/2.) < 7.5/180.*3.1415926) flag_para_U = true; - if (fabs(angle4-3.1415926/2.) < 7.5/180.*3.1415926) flag_para_V = true; -} -``` - -4. **Prolonged Case Analysis**: -```cpp -if (!flag_para) { - // Calculate projections onto wire planes - geo_point_t tempV3(0, p2.y() - p1.y(), p2.z() - p1.z()); - - // Check alignment with wire directions - double angle6 = tempV3.angle(U_dir); - double angle7 = tempV3.angle(V_dir); - double angle8 = tempV3.angle(W_dir); - - if (angle6 < 15/180.*3.1415926) flag_prolong_U = true; - if (angle7 < 15/180.*3.1415926) flag_prolong_V = true; - if (angle8 < 15/180.*3.1415926) flag_prolong_W = true; -} -``` - -5. **Regular Case Analysis**: -```cpp -if (dis <= 15*units::cm) { - flag_regular = true; -} else if (length_1 > 30*units::cm && length_2 > 30*units::cm) { - if (dis <= 25*units::cm) flag_regular = true; -} -``` - -6. **Extended Analysis**: -```cpp -if (flag_extend && flag_enable_extend || flag_force_extend) { - // Get cluster directions using Hough transform - geo_point_t dir1 = cluster1.vhough_transform(cluster1_ave_pos, 30*units::cm); - geo_point_t dir3 = cluster2.vhough_transform(cluster2_ave_pos, 30*units::cm); - - // Test multiple points along clusters - for (int i=-5; i!=6; i++) { - test_point.set(cluster1_ave_pos.x() - dir1.x() * (ave_dis + i*2*units::cm), - cluster1_ave_pos.y() - dir1.y() * (ave_dis + i*2*units::cm), - cluster1_ave_pos.z() - dir1.z() * (ave_dis + i*2*units::cm)); - - // Check continuity and alignment - auto temp_results = cluster2.get_closest_point_blob(test_point); - // ... distance calculations and checks - } -} -``` - -7. **Angle Criteria Adjustment**: -```cpp -double angle_cut = 2.5; -if (dis < 5*units::cm) { - angle_cut = 12; -} else if (dis < 15*units::cm) { - angle_cut = 7.5; -} else { - angle_cut = 5; -} -``` - -The algorithm uses several key parameters: -1. **Distance Thresholds**: - - Basic cutoff: 45cm (default length_cut) - - Regular case: 15cm - - Extended analysis: various multiples of cluster length - -2. **Angle Thresholds**: - - Parallel case: 7.5° from perpendicular - - Wire alignment: 15° from wire directions - - Distance-dependent angle cuts: 5° to 12° - -3. **Length Requirements**: - - Minimum length: 10cm (15cm in extend mode) - - Long cluster threshold: 30cm - -The function balances between: -- Geometric constraints (angles, distances) -- Physical considerations (wire directions, drift direction) -- Cluster properties (length, continuity) -- Multiple validation methods (direct measurement, Hough transform) - -This creates a robust clustering algorithm that can handle various track configurations while minimizing false merges. - - diff --git a/clus/docs/Clustering_separate.md b/clus/docs/Clustering_separate.md deleted file mode 100644 index 41f2d9084..000000000 --- a/clus/docs/Clustering_separate.md +++ /dev/null @@ -1,380 +0,0 @@ -# clustering_separate Function Analysis - -## Function Signature -```cpp -void clustering_separate( - Grouping& live_grouping, - std::map>& dead_u_index, - std::map>& dead_v_index, - std::map>& dead_w_index, - const bool use_ctpc -) -``` - -## Purpose -Primary function for analyzing and separating potentially merged or crossed particle tracks based on geometric and topological criteria. - -## Core Algorithm Flow - -### 1. Initial Setup -```cpp -// Get and sort clusters -std::vector live_clusters = live_grouping.children(); -std::sort(live_clusters.begin(), live_clusters.end(), - [](const Cluster* cluster1, const Cluster* cluster2) { - return cluster1->get_length() > cluster2->get_length(); - }); - -// Define reference directions -geo_point_t drift_dir(1, 0, 0); -geo_point_t beam_dir(0, 0, 1); -geo_point_t vertical_dir(0, 1, 0); - -// Get time slice parameters -const auto& mp = live_grouping.get_params(); -double live_time_slice_width = mp.nticks_live_slice * mp.tick_drift; - -// Storage for processing results -std::vector new_clusters; -std::vector del_clusters; -``` - -### 2. Main Processing Loop -```cpp -for (size_t i = 0; i != live_clusters.size(); i++) { - Cluster* cluster = live_clusters.at(i); - - // Only process large clusters - if (cluster->get_length() > 100 * units::cm) { - // Processing logic here - } -} -``` - -### 3. Cluster Analysis Decision Tree -```cpp -if (cluster->get_length() > 100 * units::cm) { - std::vector boundary_points; - std::vector independent_points; - - // First decision criterion - bool flag_proceed = JudgeSeparateDec_2(cluster, drift_dir, - boundary_points, - independent_points, - cluster->get_length()); - - // Secondary analysis if first fails - if (!flag_proceed && - cluster->get_length() > 100 * units::cm && - JudgeSeparateDec_1(cluster, drift_dir, cluster->get_length(), live_time_slice_width) && - independent_points.size() > 0) { - // Additional analysis - } -} -``` - -### 4. Topology Analysis -```cpp -bool flag_top = false; -for (size_t j = 0; j != independent_points.size(); j++) { - if (independent_points.at(j).y() > 101.5 * units::cm) { - flag_top = true; - break; - } -} - -// Get main direction from PCA -geo_point_t main_dir(cluster->get_pca_axis(0).x(), - cluster->get_pca_axis(0).y(), - cluster->get_pca_axis(0).z()); -``` - -### 5. Position-Based Analysis -For top region clusters (flag_top true): -```cpp -if (flag_top) { - if (fabs(main_dir.angle(beam_dir) - 3.1415926/2.)/3.1415926*180. < 16 || - fabs(main_dir.angle(beam_dir) - 3.1415926/2.)/3.1415926*180. < 33 && - cluster->get_length() > 160*units::cm || - fabs(main_dir.angle(beam_dir) - 3.1415926/2.)/3.1415926*180. < 40 && - cluster->get_length() > 260*units::cm || - // Additional angle/length conditions... - ) { - flag_proceed = true; - } - else { - // Secondary analysis for specific angles - if (fabs(main_dir.angle(beam_dir) - 3.1415926/2.)/3.1415926*180. < 40 && - cluster->get_pca_value(1) > 0.2 * cluster->get_pca_value(0)) { - // Try separation with larger distance cut - std::vector temp_sep_clusters = Separate_2(cluster, 10*units::cm); - // Check results - } - } -} -``` - -For non-top region clusters: -```cpp -else { - if (fabs(main_dir.angle(beam_dir) - 3.1415926/2.)/3.1415926*180. < 4 && - cluster->get_length() > 170*units::cm || - fabs(main_dir.angle(beam_dir) - 3.1415926/2.)/3.1415926*180. < 25 && - cluster->get_length() > 210*units::cm || - // Additional criteria... - ) { - flag_proceed = true; - } -} -``` - - -# Separation Execution Analysis - -## Entry Point -```cpp -if (flag_proceed) { - if (JudgeSeparateDec_1(cluster, drift_dir, cluster->get_length(), live_time_slice_width)) { - // Main separation path - } - else if (cluster->get_length() < 6*units::m) { - // Alternative separation path for shorter clusters - } -} -``` - -## 1. Main Separation Path - -### Initial Separation -```cpp -// Primary separation attempt -std::vector sep_clusters = Separate_1(use_ctpc, cluster, - boundary_points, independent_points, - dead_u_index, dead_v_index, dead_w_index, - cluster->get_length()); - -// Process first separated cluster -Cluster* cluster1 = sep_clusters.at(0); -new_clusters.push_back(cluster1); -del_clusters.push_back(cluster); -``` - -### Multiple Cluster Handling -```cpp -if (sep_clusters.size() >= 2) { // If more than one cluster produced - // Add additional clusters (beyond first two) - for (size_t k = 2; k < sep_clusters.size(); k++) { - new_clusters.push_back(sep_clusters.at(k)); - } - - // Process second cluster - std::vector temp_del_clusters; - Cluster* cluster2 = sep_clusters.at(1); - double length_1 = cluster2->get_length(); - Cluster* final_sep_cluster = cluster2; -``` - -### Recursive Separation Level 1 -```cpp -if (length_1 > 100 * units::cm) { - boundary_points.clear(); - independent_points.clear(); - - if (JudgeSeparateDec_1(cluster2, drift_dir, length_1, live_time_slice_width) && - JudgeSeparateDec_2(cluster2, drift_dir, boundary_points, independent_points, length_1)) { - - // Second level separation - std::vector sep_clusters = Separate_1(use_ctpc, cluster2, - boundary_points, independent_points, - dead_u_index, dead_v_index, dead_w_index, length_1); - - Cluster* cluster3 = sep_clusters.at(0); - new_clusters.push_back(cluster3); - temp_del_clusters.push_back(cluster2); -``` - -### Recursive Separation Level 2 -```cpp - if (sep_clusters.size() >= 2) { - // Add additional clusters from second separation - for (size_t k = 2; k < sep_clusters.size(); k++) { - new_clusters.push_back(sep_clusters.at(k)); - } - - Cluster* cluster4 = sep_clusters.at(1); - final_sep_cluster = cluster4; - length_1 = cluster4->get_length(); - - // Third level check - if (length_1 > 100 * units::cm) { - boundary_points.clear(); - independent_points.clear(); -``` - -### Recursive Separation Level 3 -```cpp - if (JudgeSeparateDec_1(cluster4, drift_dir, length_1, live_time_slice_width) && - JudgeSeparateDec_2(cluster4, drift_dir, boundary_points, independent_points, length_1)) { - - std::vector sep_clusters = Separate_1(use_ctpc, cluster4, - boundary_points, independent_points, - dead_u_index, dead_v_index, dead_w_index, length_1); - - Cluster* cluster5 = sep_clusters.at(0); - new_clusters.push_back(cluster5); - temp_del_clusters.push_back(cluster4); - - if (sep_clusters.size() >= 2) { - for (size_t k = 2; k < sep_clusters.size(); k++) { - new_clusters.push_back(sep_clusters.at(k)); - } - Cluster* cluster6 = sep_clusters.at(1); - final_sep_cluster = cluster6; - } - else { - final_sep_cluster = 0; - } - } - } - } -``` - -### Final Cluster Processing -```cpp -if (final_sep_cluster != 0) { - length_1 = final_sep_cluster->get_length(); - - if (length_1 > 60 * units::cm) { - boundary_points.clear(); - independent_points.clear(); - - // Final separation attempt for long clusters - if (JudgeSeparateDec_1(final_sep_cluster, drift_dir, length_1, live_time_slice_width) && - JudgeSeparateDec_2(final_sep_cluster, drift_dir, boundary_points, independent_points, length_1) && - independent_points.size() > 0) { - - std::vector sep_clusters = Separate_1(use_ctpc, final_sep_cluster, - boundary_points, independent_points, - dead_u_index, dead_v_index, dead_w_index, length_1); - - Cluster* cluster5 = sep_clusters.at(0); - new_clusters.push_back(cluster5); - temp_del_clusters.push_back(final_sep_cluster); - - if (sep_clusters.size() >= 2) { - // Process additional clusters - for (size_t k = 2; k < sep_clusters.size(); k++) { - new_clusters.push_back(sep_clusters.at(k)); - } - final_sep_cluster = sep_clusters.at(1); - } - else { - final_sep_cluster = 0; - } - } - } -``` - -### Final Separation Using Separate_2 -```cpp - if (final_sep_cluster != 0) { - // Use simpler separation algorithm for final pass - std::vector final_sep_clusters = Separate_2(final_sep_cluster); - for (auto it = final_sep_clusters.begin(); it != final_sep_clusters.end(); it++) { - new_clusters.push_back(*it); - } - temp_del_clusters.push_back(final_sep_cluster); - } -} -``` - -## 2. Alternative Path for Shorter Clusters -```cpp -else if (cluster->get_length() < 6*units::m) { - std::vector sep_clusters = Separate_1(use_ctpc, cluster, - boundary_points, independent_points, - dead_u_index, dead_v_index, dead_w_index, - cluster->get_length()); - - Cluster* cluster1 = sep_clusters.at(0); - new_clusters.push_back(cluster1); - del_clusters.push_back(cluster); - - if (sep_clusters.size() >= 2) { - // Process additional clusters similar to main path - // but with simplified logic - } -} -``` - -## Key Features of Separation Execution - -1. **Recursive Structure** - - Up to 3 levels of recursive separation - - Each level handles progressively smaller clusters - - Different criteria at each level - -2. **Length-Based Processing** - - Primary threshold: 100 cm - - Secondary threshold: 60 cm - - Maximum length: 6 meters - -3. **Memory Management** - - Tracks clusters to be deleted - - Manages temporary clusters - - Maintains cluster hierarchy - -4. **Protection Mechanisms** - - Multiple validation checks - - Size-based restrictions - - Geometric criteria at each level - -5. **Alternative Processing** - - Special handling for shorter clusters - - Simplified logic for certain cases - - Final cleanup using Separate_2 - -This detailed separation execution shows how the function handles complex cluster configurations through multiple levels of recursive separation while maintaining cluster integrity and proper memory management. - - - - -### 7. Result Management -```cpp -// Update cluster collections -for (auto it = new_clusters.begin(); it != new_clusters.end(); it++) { - Cluster* ncluster = (*it); - live_clusters.push_back(ncluster); -} - -// Remove processed clusters -for (auto it = del_clusters.begin(); it != del_clusters.end(); it++) { - Cluster* ocluster = (*it); - live_clusters.erase(find(live_clusters.begin(), live_clusters.end(), ocluster)); -} -``` - -## Key Decision Points - -1. **Initial Size Filter**: Only processes clusters > 100 cm -2. **Top/Bottom Region**: Different criteria based on vertical position -3. **Angle Analysis**: Multiple angle thresholds relative to beam direction -4. **Length Considerations**: Various length thresholds for different conditions -5. **PCA-based Decisions**: Uses cluster shape characteristics - -## Processing Thresholds - -- **Minimum Cluster Size**: 100 cm -- **Top Region Boundary**: 101.5 cm -- **Length Thresholds**: Various (160 cm, 210 cm, 260 cm, etc.) -- **Angle Thresholds**: Multiple values (16°, 33°, 40°, etc.) -- **Maximum Length**: 6 meters - -## Function Outcomes - -1. **Cluster Separation**: Creates new separated clusters -2. **Cluster Cleanup**: Removes processed clusters -3. **Collection Updates**: Maintains live cluster collection -4. **Dead Region Handling**: Considers dead wire regions in separation - -This function serves as the main coordinator for cluster separation, making decisions based on geometric and topological characteristics while maintaining the integrity of the detector's physical constraints. \ No newline at end of file diff --git a/clus/docs/Clustering_separate_JudgeSeparateDec_1.md b/clus/docs/Clustering_separate_JudgeSeparateDec_1.md deleted file mode 100644 index 960dc5af8..000000000 --- a/clus/docs/Clustering_separate_JudgeSeparateDec_1.md +++ /dev/null @@ -1,149 +0,0 @@ -# JudgeSeparateDec_1 Function Analysis - -## Function Signature -```cpp -bool JudgeSeparateDec_1( - const Cluster* cluster, // The cluster to analyze - const geo_point_t& drift_dir, // Drift direction vector - const double length, // Cluster length - const double time_slice_length // Length of a time slice -) -``` - -## Core Purpose -Determines if a cluster should be separated based on Principal Component Analysis (PCA) of the cluster's shape, its alignment with the drift direction, and time-based characteristics. - -## Algorithm Flow - -### 1. PCA Direction Analysis -```cpp -// Get the principal component axes -geo_point_t dir1(cluster->get_pca_axis(0).x(), - cluster->get_pca_axis(0).y(), - cluster->get_pca_axis(0).z()); // Primary axis -geo_point_t dir2(cluster->get_pca_axis(1).x(), - cluster->get_pca_axis(1).y(), - cluster->get_pca_axis(1).z()); // Secondary axis -geo_point_t dir3(cluster->get_pca_axis(2).x(), - cluster->get_pca_axis(2).y(), - cluster->get_pca_axis(2).z()); // Tertiary axis -``` - -### 2. Angular Analysis -```cpp -// Calculate angles relative to drift direction -double angle1 = fabs(dir2.angle(drift_dir) - 3.1415926/2.) / 3.1415926 * 180.; -double angle2 = fabs(dir3.angle(drift_dir) - 3.1415926/2.) / 3.1415926 * 180.; -``` -- Calculates angles between secondary/tertiary axes and drift direction -- Converts angles to degrees -- Normalizes relative to perpendicular (90 degrees) - -### 3. Time-Based Analysis -```cpp -// Calculate angle based on time slice characteristics -double temp_angle1 = asin(cluster->get_num_time_slices() * - time_slice_length / length) / 3.1415926 * 180.; -``` -- Uses cluster span in time slices -- Considers physical length and time slice width -- Converts to comparable angular measure - -### 4. PCA Value Analysis -```cpp -// Calculate ratios of PCA eigenvalues -double ratio1 = cluster->get_pca_value(1) / cluster->get_pca_value(0); // Secondary/Primary -double ratio2 = cluster->get_pca_value(2) / cluster->get_pca_value(0); // Tertiary/Primary -``` -- Compares relative strengths of principal components -- Indicates cluster shape characteristics -- Higher ratios suggest less linear structure - -### 5. Decision Formula -```cpp -if (ratio1 > pow(10, exp(1.38115 - 1.19312 * pow(angle1, 1./3.)) - 2.2) || - ratio1 > pow(10, exp(1.38115 - 1.19312 * pow(temp_angle1, 1./3.)) - 2.2) || - ratio2 > pow(10, exp(1.38115 - 1.19312 * pow(angle2, 1./3.)) - 2.2) || - ratio1 > 0.75) -{ - return true; -} -return false; -``` - -## Decision Criteria Breakdown - -### 1. Primary Angular Criterion -```cpp -ratio1 > pow(10, exp(1.38115 - 1.19312 * pow(angle1, 1./3.)) - 2.2) -``` -- Evaluates secondary/primary ratio against angle-dependent threshold -- Uses empirically derived formula -- More stringent for larger angles - -### 2. Time-Based Criterion -```cpp -ratio1 > pow(10, exp(1.38115 - 1.19312 * pow(temp_angle1, 1./3.)) - 2.2) -``` -- Similar to angular criterion but uses time-based angle -- Accounts for temporal distribution of cluster - -### 3. Tertiary Direction Criterion -```cpp -ratio2 > pow(10, exp(1.38115 - 1.19312 * pow(angle2, 1./3.)) - 2.2) -``` -- Evaluates tertiary/primary ratio -- Uses same formula structure -- Checks for significant tertiary component - -### 4. Simple Ratio Threshold -```cpp -ratio1 > 0.75 -``` -- Direct threshold on secondary/primary ratio -- Catches cases where cluster is notably non-linear -- Serves as a catch-all criterion - -## Mathematical Components - -### 1. Angle Normalization -- Normalizes angles relative to perpendicular -- Converts to degrees for formula application -- Handles both real space and time-slice space - -### 2. PCA Ratio Analysis -- Primary ratio: Secondary/Primary eigenvalues -- Secondary ratio: Tertiary/Primary eigenvalues -- Indicates deviation from linear shape - -### 3. Threshold Formula -``` -Threshold = 10^(e^(1.38115 - 1.19312 * angle^(1/3)) - 2.2) -``` -- Exponential relationship with angle -- Cube root smoothing of angle -- Offset and scaling factors from empirical tuning - -## Key Features - -1. **Multiple Criteria** - - Angular relationships - - Time-based characteristics - - PCA shape analysis - - Simple ratio threshold - -2. **Complementary Checks** - - Spatial configuration (PCA) - - Temporal distribution - - Overall shape characteristics - -3. **Tuned Parameters** - - Empirically derived constants - - Angle-dependent thresholds - - Fixed ratio threshold - -## Return Value Meaning -- `true`: Cluster exhibits characteristics suggesting it should be separated -- `false`: Cluster appears to be a single, coherent track - -The function provides a sophisticated analysis of cluster shape and orientation, using both geometric and temporal characteristics to make separation decisions. \ No newline at end of file diff --git a/clus/docs/Clustering_separate_JudgeSeparateDec_2.md b/clus/docs/Clustering_separate_JudgeSeparateDec_2.md deleted file mode 100644 index 661e054cb..000000000 --- a/clus/docs/Clustering_separate_JudgeSeparateDec_2.md +++ /dev/null @@ -1,182 +0,0 @@ -# JudgeSeparateDec_2 Function Analysis - -## Function Signature -```cpp -bool JudgeSeparateDec_2( - const Cluster* cluster, - const geo_point_t& drift_dir, - std::vector& boundary_points, - std::vector& independent_points, - const double cluster_length -) -``` - -## Core Purpose -Analyzes cluster geometry and boundary points to determine if a cluster should be separated, particularly focusing on detector boundary interactions and spatial distributions. - -## Algorithm Flow - -### 1. Initial Boundary Points Collection -```cpp -boundary_points = cluster->get_hull(); -``` -- Gets the convex hull points of the cluster - -### 2. Point Classification Setup -```cpp -std::vector hy_points, ly_points; // High/Low Y coordinates -std::vector hz_points, lz_points; // High/Low Z coordinates -std::vector hx_points, lx_points; // High/Low X coordinates -std::set independent_surfaces; // Tracks which detector surfaces are involved -``` - -### 3. Initial Point Classification -```cpp -for (size_t j = 0; j != boundary_points.size(); j++) { - if (j == 0) { - // Initialize all vectors with first point - hy_points.push_back(boundary_points.at(j)); - ly_points.push_back(boundary_points.at(j)); - // ... same for other directions - } - else { - // Check density criterion - if (cluster->nnearby(test_p, 15 * units::cm) > 75) { - // Update extreme points if current point is more extreme - if (boundary_points.at(j).y() > hy_points.at(0).y()) - hy_points.at(0) = boundary_points.at(j); - // ... similar for other directions - } - } -} -``` - -### 4. Out-of-X-Bounds Check -```cpp -bool flag_outx = false; -if (hx_points.at(0).x() > 257 * units::cm || - lx_points.at(0).x() < -1 * units::cm) { - flag_outx = true; -} -``` - -### 5. Detailed Boundary Analysis -For each direction (Y, Z, X), analyze points near boundaries: - -```cpp -// Example for high Y boundary -if (hy_points.at(0).y() > 101.5 * units::cm) { - for (size_t j = 0; j != boundary_points.size(); j++) { - if (boundary_points.at(j).y() > 101.5 * units::cm) { - bool flag_save = true; - // Check for nearby existing high-Y points - for (size_t k = 0; k != hy_points.size(); k++) { - double dis = sqrt(pow(...)); // Distance calculation - if (dis < 25 * units::cm) { - if (boundary_points.at(j).y() > hy_points.at(k).y()) - hy_points.at(k) = boundary_points.at(j); - flag_save = false; - } - } - if (flag_save) - hy_points.push_back(boundary_points.at(j)); - } - } -} -``` - -### 6. Independent Point Collection -```cpp -for (auto extreme_points : {hy_points, ly_points, hz_points, lz_points, hx_points, lx_points}) { - for (const auto& point : extreme_points) { - if (IsWithinDetectorBounds(point) && !flag_outx) - continue; - - bool flag_save = true; - // Check distance to existing independent points - for (const auto& indep_point : independent_points) { - if (Distance(point, indep_point) < 15 * units::cm) { - flag_save = false; - break; - } - } - - if (flag_save) { - independent_points.push_back(point); - // Identify which surface this point belongs to - UpdateIndependentSurfaces(point, independent_surfaces); - } - } -} -``` - -### 7. Final Decision Logic -```cpp -// Count points outside boundaries -int num_outside_points = 0; -int num_outx_points = 0; - -// Decision criteria -if ((num_outside_points > 1 && independent_surfaces.size() > 1) || - (num_outside_points > 2 && cluster_length > 250 * units::cm) || - num_outx_points > 0) && - (independent_points.size() > 2 || - (independent_points.size() == 2 && num_far_points > 0))) -{ - return true; -} -``` - -### 8. Additional Protection Checks -If not returning true, perform additional analysis: -```cpp -double max_x = -1e9, min_x = 1e9; // And similar for Y, Z -// Calculate extent in each direction - -// Additional geometric checks -if (max_x - min_x < 2.5 * units::cm && - sqrt(pow(max_y - min_y, 2) + pow(max_z - min_z, 2) + - pow(max_x - min_x, 2)) > 150 * units::cm) { - independent_points.clear(); - return false; -} -``` - -## Key Decision Criteria - -1. **Boundary Interactions** - - Number of surfaces interacted with (independent_surfaces) - - Number of points outside detector bounds - - Points in X-direction out of bounds - -2. **Spatial Distribution** - - Distance between extreme points - - Cluster density near boundaries - - Minimum separation between independent points - -3. **Geometric Properties** - - Cluster extent in each direction - - Total cluster length - - Point density requirements - -## Protection Mechanisms - -1. **Density Requirements** - - Minimum point density (75 points within 15cm) - - Maximum separation between related points (25cm) - -2. **False Positive Prevention** - - Minimum cluster size requirements - - Multiple surface interaction requirements - - Point separation validation - -3. **Edge Cases** - - Special handling for X-direction outliers - - Protection against thin, long clusters - - Validation of point distributions - -## Return Value Meaning -- `true`: Cluster should be separated -- `false`: Cluster should remain intact - -The function makes its decision based on a complex interplay of geometric properties, detector boundary interactions, and point distributions, with multiple layers of validation to ensure reliable separation decisions. \ No newline at end of file diff --git a/clus/docs/Clustering_separate_Separate_1.md b/clus/docs/Clustering_separate_Separate_1.md deleted file mode 100644 index b33e4ac79..000000000 --- a/clus/docs/Clustering_separate_Separate_1.md +++ /dev/null @@ -1,420 +0,0 @@ -# Separate_1 Function Analysis - -## Function Signature -```cpp -std::vector Separate_1( - const bool use_ctpc, // Control flag for processing method - Cluster* cluster, // Input cluster to separate - std::vector& boundary_points, - std::vector& independent_points, - std::map>& dead_u_index, - std::map>& dead_v_index, - std::map>& dead_w_index, - double length // Cluster length -) -``` - -## Core Purpose -Performs detailed separation of clusters based on path finding, point classification, and geometric analysis, particularly focusing on track-like structures. - -## Algorithm Flow - -### 1. Initial Setup and Direction Analysis -```cpp -auto temp_cloud = std::make_shared(tp.angle_u, tp.angle_v, tp.angle_w); -geo_point_t dir_drift(1, 0, 0); -geo_point_t dir_cosmic(0, 1, 0); -geo_point_t dir_beam(0, 0, 1); -geo_point_t cluster_center = cluster->get_center(); -``` - -### 2. Principal Direction Analysis -```cpp -geo_point_t main_dir = cluster->get_pca_axis(0); -geo_point_t second_dir = cluster->get_pca_axis(1); - -// Special case handling for cosmic rays near beam direction -if (cluster->get_pca_value(1) > 0.08 * cluster->get_pca_value(0) && - fabs(main_dir.angle(dir_beam) - 3.1415926/2.) > 75/180.*3.1415926 && - fabs(second_dir.angle(dir_cosmic) - 3.1415926/2.) > 60/180.*3.1415926) { - main_dir = second_dir; -} - -main_dir = main_dir.norm(); -if (main_dir.y() > 0) - main_dir = main_dir * -1; // Point downward -``` - -### 3. Start/End Point Identification -```cpp -// Find extremal points along main direction -double min_dis = 1e9, max_dis = -1e9; -int min_index = 0, max_index = 0; - -for (size_t j = 0; j != independent_points.size(); j++) { - geo_point_t dir(independent_points.at(j).x() - cluster_center.x(), - independent_points.at(j).y() - cluster_center.y(), - independent_points.at(j).z() - cluster_center.z()); - double dis = dir.dot(main_dir); - - // Check point density for connectivity - bool flag_connect = false; - int num_points = cluster->nnearby(temp_p, 15 * units::cm); - if (num_points > 100) { - flag_connect = true; - } - else if (num_points > 75) { - num_points = cluster->nnearby(temp_p, 30 * units::cm); - if (num_points > 160) flag_connect = true; - } - - // Update extremal points - if (dis < min_dis && flag_connect) { - min_dis = dis; - min_index = j; - } - if (dis > max_dis && flag_connect) { - max_dis = dis; - max_index = j; - } -} -``` - -### 4. Path Finding -```cpp -// Initial path establishment -geo_point_t start_wcpoint = independent_points.at(min_index); -geo_point_t end_wcpoint; - -// Direction determination using Hough transform -dir = cluster->vhough_transform(start_point, 100 * units::cm); -geo_point_t dir1 = cluster->vhough_transform(start_point, 30 * units::cm); - -// Adjust direction based on angles -if (dir.angle(dir1) > 20 * 3.1415926/180.) { - if (fabs(dir.angle(drift_dir) - 3.1415926/2.) < 5 * 3.1415926/180. || - fabs(dir1.angle(drift_dir) - 3.1415926/2.) < 5 * 3.1415926/180.) { - dir = cluster->vhough_transform(start_point, 200 * units::cm); - } - else { - dir = dir1; - } -} -``` - -### 5. Path Extension and Refinement -```cpp -// Find path endpoints -dir = dir.norm(); -geo_point_t inv_dir = dir * (-1); -start_wcpoint = cluster->get_furthest_wcpoint(start_wcpoint, inv_dir, 1*units::cm, 0); -end_wcpoint = cluster->get_furthest_wcpoint(start_wcpoint, dir); - -// Parallel direction adjustment -if (fabs(test_dir.angle(drift_dir) - 3.1415926/2.) < 2.5 * 3.1415926/180.) { - cluster->adjust_wcpoints_parallel(start_wcpoint_idx, end_wcpoint_idx); -} -``` - -### 6. Path Following and Point Classification -```cpp -// Dijkstra path finding -cluster->dijkstra_shortest_paths(start_wcpoint_idx, use_ctpc); -cluster->cal_shortest_path(end_wcpoint_idx); - -// Create point sequence along path -const auto& path_wcps = cluster->get_path_wcps(); -std::vector pts; - -// Interpolate points along path -for (auto it = path_wcps.begin(); it != path_wcps.end(); it++) { - // Point interpolation logic - // Distance-based point addition -} -``` - -### 7. Point Classification -```cpp -// Initialize classification flags for each wire plane -std::vector flag_u_pts(cluster->npoints(), false); -std::vector flag_v_pts(cluster->npoints(), false); -std::vector flag_w_pts(cluster->npoints(), false); - -// Classify points based on distance to path -for (size_t j = 0; j != flag_u_pts.size(); j++) { - geo_point_t test_p = cluster->point3d(j); - - // Check distances in each wire plane view - std::pair temp_results = temp_cloud->get_closest_2d_dis(test_p, 0); - // Similar checks for v and w planes - - // Consider dead regions - if (dead_u_index.find(winds[0][j]) != dead_u_index.end()) { - // Special handling for dead regions - } -} -``` - -# Cluster Formation Details in Separate_1 - -## 1. Initial Classification of Blobs -```cpp -const auto& mcells = cluster->children(); -std::map mcell_np_map, mcell_np_map1; - -// Initialize maps -for (auto it = mcells.begin(); it != mcells.end(); it++) { - mcell_np_map[*it] = 0; - mcell_np_map1[*it] = 0; -} - -// Count points satisfying different criteria for each blob -for (size_t j = 0; j != flag_u_pts.size(); j++) { - const Blob* mcell = cluster->blob_with_point(j); - - // Primary classification criterion - if (flag_u_pts.at(j) && flag_v_pts.at(j) && flag1_w_pts.at(j) || - flag_u_pts.at(j) && flag_w_pts.at(j) && flag1_v_pts.at(j) || - flag_w_pts.at(j) && flag_v_pts.at(j) && flag1_u_pts.at(j)) { - mcell_np_map[mcell]++; - } - - // Secondary classification criterion - if (flag_u_pts.at(j) && flag_v_pts.at(j) && (flag2_w_pts.at(j) || flag1_w_pts.at(j)) || - flag_u_pts.at(j) && flag_w_pts.at(j) && (flag2_v_pts.at(j) || flag1_v_pts.at(j)) || - flag_w_pts.at(j) && flag_v_pts.at(j) && (flag2_u_pts.at(j) || flag1_u_pts.at(j))) { - mcell_np_map1[mcell]++; - } -} -``` - -## 2. Initial Blob Assignment -```cpp -// blob (index) -> cluster_id mapping -std::vector b2groupid(cluster->nchildren(), 0); -std::set groupids; - -for (size_t idx=0; idx < mcells.size(); idx++) { - Blob* mcell = mcells.at(idx); - - // Calculate total wire coverage - const size_t total_wires = mcell->u_wire_index_max() - mcell->u_wire_index_min() + - mcell->v_wire_index_max() - mcell->v_wire_index_min() + - mcell->w_wire_index_max() - mcell->w_wire_index_min(); - - // Assign blobs to groups based on point counts and wire coverage - if (mcell_np_map[mcell] > 0.5 * mcell->nbpoints() || - (mcell_np_map[mcell] > 0.25 * mcell->nbpoints() && total_wires < 25)) { - b2groupid[idx] = 0; // Main cluster - groupids.insert(0); - } - else if (mcell_np_map1[mcell] >= 0.95 * mcell->nbpoints()) { - b2groupid[idx] = -1; // To be deleted (ghost cell) - groupids.insert(-1); - } - else { - b2groupid[idx] = 1; // Secondary cluster - groupids.insert(1); - } -} -``` - -## 3. Initial Cluster Separation -```cpp -// Perform initial separation based on group IDs -auto clusters_step0 = cluster->separate(b2groupid); -``` - -## 4. Secondary Separation and Processing -```cpp -std::vector other_clusters; -if (clusters_step0.find(1) != clusters_step0.end()) { - // Apply Separate_2 to secondary clusters - other_clusters = Separate_2(clusters_step0[1], 5 * units::cm); -} - -// Process main cluster if it exists -if (clusters_step0.find(0) != clusters_step0.end()) { -``` - -## 5. Cluster Merging Logic -```cpp -// Check for clusters that should be merged with main cluster -std::vector temp_merge_clusters; -for (size_t i = 0; i != other_clusters.size(); i++) { - std::tuple temp_dis = - other_clusters.at(i)->get_closest_points(*clusters_step0[0]); - - if (std::get<2>(temp_dis) < 0.5 * units::cm) { - double length_1 = other_clusters.at(i)->get_length(); - geo_point_t p1(end_wcpoint.x(), end_wcpoint.y(), end_wcpoint.z()); - double close_dis = other_clusters.at(i)->get_closest_dis(p1); - - // Check merging criteria - if (close_dis < 10 * units::cm && length_1 < 50 * units::cm) { - geo_point_t temp_dir1 = clusters_step0[0]->vhough_transform(p1, 15 * units::cm); - geo_point_t temp_dir2 = other_clusters.at(i)->vhough_transform(p1, 15 * units::cm); - - // Angle-based merging decisions - if (temp_dir1.angle(temp_dir2) / 3.1415926 * 180. > 145 && - length_1 < 30 * units::cm && close_dis < 3 * units::cm || - fabs(temp_dir1.angle(drift_dir) - 3.1415926 / 2.) / 3.1415926 * 180. < 3 && - fabs(temp_dir2.angle(drift_dir) - 3.1415926 / 2.) / 3.1415926 * 180. < 3) { - temp_merge_clusters.push_back(other_clusters.at(i)); - } - } - } -} -``` - -## 6. Executing Mergers -```cpp -// Perform merging operations -for (auto temp_cluster : temp_merge_clusters) { - clusters_step0[0]->take_children(*temp_cluster, true); - grouping->remove_child(*temp_cluster); -} - -final_clusters.push_back(clusters_step0[0]); -``` - -## 7. Additional Cluster Analysis -```cpp -// Further analysis of remaining clusters -std::vector saved_clusters; -std::vector to_be_merged_clusters; - -for (size_t i = 0; i != other_clusters.size(); i++) { - bool flag_save = false; - double length_1 = other_clusters.at(i)->get_length(); - - // Analysis for short clusters - if (length_1 < 30 * units::cm && std::get<2>(temp_dis) < 5 * units::cm) { - int temp_total_points = other_clusters.at(i)->npoints(); - int temp_close_points = 0; - - // Count close points - for (size_t j = 0; j != other_clusters.at(i)->npoints(); j++) { - geo_point_t test_point = other_clusters.at(i)->point3d(j); - if (clusters_step0[0]->get_closest_dis(test_point) < 10 * units::cm) { - temp_close_points++; - } - } - - // Decision based on point proximity - if (temp_close_points > 0.7 * temp_total_points) { - saved_clusters.push_back(other_clusters.at(i)); - flag_save = true; - } - } - - // Analysis for longer clusters - else if (std::get<2>(temp_dis) < 2.5 * units::cm && length_1 >= 30 * units::cm) { - // Similar point proximity analysis with different thresholds - } - - if (!flag_save) - to_be_merged_clusters.push_back(other_clusters.at(i)); -} -``` - -## 8. Final Protection and Cleanup -```cpp -// Additional protection checks -std::vector temp_save_clusters; -for (size_t i = 0; i != saved_clusters.size(); i++) { - Cluster* cluster1 = saved_clusters.at(i); - if (cluster1->get_length() < 5 * units::cm) - continue; - - // Check against to-be-merged clusters - for (size_t j = 0; j != to_be_merged_clusters.size(); j++) { - Cluster* cluster2 = to_be_merged_clusters.at(j); - if (cluster2->get_length() < 10 * units::cm) - continue; - - // Additional geometric checks - std::tuple temp_dis = - cluster1->get_closest_points(*cluster2); - if (std::get<2>(temp_dis) < 15 * units::cm && - fabs(dir1.angle(dir2) - 3.1415926/2.)/3.1415926*180 > 75) { - temp_save_clusters.push_back(cluster1); - break; - } - } -} -``` - -## 9. Final Cluster Organization -```cpp -// Create final cluster for merged segments -Cluster& cluster2 = grouping->make_child(); -for (size_t i = 0; i != to_be_merged_clusters.size(); i++) { - cluster2.take_children(*to_be_merged_clusters[i], true); - grouping->remove_child(*to_be_merged_clusters[i]); -} - -// Add clusters to final result -final_clusters.push_back(&cluster2); -for (size_t i = 0; i != saved_clusters.size(); i++) { - final_clusters.push_back(saved_clusters.at(i)); -} -``` - -This detailed breakdown shows how the function handles the complex task of organizing and merging clusters based on various geometric and proximity criteria, with multiple levels of protection against incorrect merging decisions. - - - - - -## Key Features - -1. **Path Finding** - - Uses Dijkstra's algorithm - - Considers wire plane geometry - - Handles dead regions - -2. **Point Classification** - - Multi-plane analysis - - Dead region consideration - - Proximity-based grouping - -3. **Cluster Formation** - - Initial separation - - Secondary refinement - - Merging of related segments - -## Algorithm Parameters - -1. **Distance Thresholds** - - 15 cm for near point density - - 30 cm for extended point density - - Various angular thresholds - -2. **Density Requirements** - - >100 points in 15 cm radius - - >160 points in 30 cm radius - -## Return Value -- Vector of separated Cluster pointers -- Maintains physical and geometric relationships -- Preserves wire plane hit patterns - -## Special Considerations - -1. **Dead Regions** - - Special handling for dead wire regions - - Modified proximity criteria - - Adjusted connectivity rules - -2. **Geometric Constraints** - - Wire plane angles - - Drift direction alignment - - Beam direction relationships - -3. **Track Continuity** - - Path coherence - - Point density requirements - - Multi-view consistency - -This function provides a sophisticated approach to cluster separation, particularly suited for track-like structures in wire chamber detectors. It combines geometric analysis, path finding, and point classification to achieve reliable separation of merged or crossed tracks. \ No newline at end of file diff --git a/clus/docs/Clustering_separate_Separate_2.md b/clus/docs/Clustering_separate_Separate_2.md deleted file mode 100644 index 6a99b681a..000000000 --- a/clus/docs/Clustering_separate_Separate_2.md +++ /dev/null @@ -1,209 +0,0 @@ -# Separate_2 Function Analysis - -## Function Signature -```cpp -std::vector Separate_2( - Cluster* cluster, // Input cluster to separate - const double dis_cut = 5*units::cm, // Distance threshold for connectivity - const size_t ticks_per_slice = 4 // Time ticks per slice -) -``` - -## Core Purpose -Separates clusters based on time slice connectivity and spatial proximity, using graph theory to identify disconnected components. - -## Algorithm Components - -### 1. Time-Based Organization -```cpp -const auto& time_cells_set_map = cluster->time_blob_map(); -std::vector& mcells = cluster->children(); - -std::vector time_slices; -for (auto it1 = time_cells_set_map.begin(); it1 != time_cells_set_map.end(); it1++) { - time_slices.push_back((*it1).first); -} -``` -- Gets map of blobs organized by time slices -- Extracts time slice indices -- Prepares for time-ordered processing - -### 2. Connectivity Analysis - -#### a. Same Time Slice Connectivity -```cpp -std::vector> connected_mcells; -for (size_t i = 0; i != time_slices.size(); i++) { - const BlobSet& mcells_set = time_cells_set_map.at(time_slices.at(i)); - - if (mcells_set.size() >= 2) { - for (auto it2 = mcells_set.begin(); it2 != mcells_set.end(); it2++) { - const Blob* mcell1 = *it2; - auto it2p = it2; - it2p++; - for (auto it3 = it2p; it3 != mcells_set.end(); it3++) { - const Blob* mcell2 = *(it3); - if (mcell1->overlap_fast(*mcell2, 5)) { - connected_mcells.push_back(std::make_pair(mcell1, mcell2)); - } - } - } - } -} -``` -- Checks blob connectivity within same time slice -- Uses fast overlap check with threshold of 5 -- Stores connected blob pairs - -#### b. Adjacent Time Slice Connectivity -```cpp -std::vector vec_mcells_set; -if (i + 1 < time_slices.size()) { - if (time_slices.at(i + 1) - time_slices.at(i) == 1*ticks_per_slice) { - vec_mcells_set.push_back(time_cells_set_map.at(time_slices.at(i + 1))); - // Check next slice if gap is 2 ticks - if (i + 2 < time_slices.size() && - time_slices.at(i + 2) - time_slices.at(i) == 2*ticks_per_slice) { - vec_mcells_set.push_back(time_cells_set_map.at(time_slices.at(i + 2))); - } - } - else if (time_slices.at(i + 1) - time_slices.at(i) == 2*ticks_per_slice) { - vec_mcells_set.push_back(time_cells_set_map.at(time_slices.at(i + 1))); - } -} -``` -- Checks connectivity across adjacent time slices -- Handles both single and double time slice gaps -- Maintains temporal continuity - -### 3. Graph Construction -```cpp -const int N = mcells.size(); -MCUGraph graph(N); - -std::map mcell_index_map; -for (size_t i = 0; i != mcells.size(); i++) { - Blob* curr_mcell = mcells.at(i); - mcell_index_map[curr_mcell] = i; - - auto v = vertex(i, graph); - (graph)[v].index = i; -} -``` -- Creates undirected graph -- Maps blobs to graph vertices -- Prepares for connectivity analysis - -### 4. Edge Addition -```cpp -for (auto it = connected_mcells.begin(); it != connected_mcells.end(); it++) { - int index1 = mcell_index_map[it->first]; - int index2 = mcell_index_map[it->second]; - auto edge = add_edge(index1, index2, graph); - if (edge.second) { - (graph)[edge.first].dist = 1; - } -} -``` -- Adds edges for connected blobs -- Sets distance weight for edges -- Builds connectivity structure - -### 5. Component Analysis and Refinement -```cpp -std::vector component(num_vertices(graph)); -const int num = connected_components(graph, &component[0]); - -if (num > 1) { - // Additional spatial proximity check - std::vector> pt_clouds; - std::vector> vec_vec(num); - - // Create point clouds for each component - for (int j = 0; j != num; j++) { - pt_clouds.push_back(std::make_shared()); - } - - // Fill point clouds - for (size_t i = 0; i != component.size(); ++i) { - vec_vec.at(component[i]).push_back(i); - Blob* mcell = mcells.at(i); - for (const auto& pt : mcell->points()) { - pt_clouds.at(component[i])->add({pt.x(), pt.y(), pt.z()}); - } - } -``` - -### 6. Component Distance Check -```cpp -// Check distances between components -for (int j = 0; j != num; j++) { - for (int k = j + 1; k != num; k++) { - std::tuple temp_results = - pt_clouds.at(j)->get_closest_points(*(pt_clouds.at(k))); - if (std::get<2>(temp_results) < dis_cut) { - // Add edge to reconnect close components - int index1 = vec_vec[j].front(); - int index2 = vec_vec[k].front(); - auto edge = add_edge(index1, index2, graph); - if (edge.second) { - (graph)[edge.first].dist = 1; - } - } - } -} -``` -- Checks spatial proximity between components -- Reconnects components within distance threshold -- Uses provided dis_cut parameter - -### 7. Final Separation -```cpp -std::vector component(num_vertices(graph)); -const int num = connected_components(graph, &component[0]); -auto id2cluster = cluster->separate(component); -std::vector ret; -for (auto [id, cluster] : id2cluster) { - ret.push_back(cluster); -} -return ret; -``` -- Performs final component analysis -- Separates cluster based on components -- Returns vector of separated clusters - -## Key Features - -1. **Multi-Level Connectivity** - - Same time slice connectivity - - Adjacent time slice connectivity - - Spatial proximity checks - -2. **Temporal Flexibility** - - Handles single time slice gaps - - Accommodates double time slice gaps - - Maintains temporal continuity - -3. **Graph-Based Processing** - - Uses undirected graph structure - - Connected components analysis - - Distance-based refinement - -## Algorithm Parameters - -1. **dis_cut** - - Default: 5 cm - - Controls spatial proximity threshold - - Used for component merging - -2. **ticks_per_slice** - - Default: 4 - - Defines time slice granularity - - Used for temporal connectivity - -## Return Value -- Vector of separated Cluster pointers -- Each cluster represents a connected component -- Maintains both spatial and temporal relationships - -The function provides a sophisticated approach to cluster separation using both temporal and spatial information, with graph theory providing the underlying structure for connectivity analysis. \ No newline at end of file diff --git a/clus/docs/Facade_Cluster.md b/clus/docs/Facade_Cluster.md deleted file mode 100644 index cdef648ec..000000000 --- a/clus/docs/Facade_Cluster.md +++ /dev/null @@ -1,243 +0,0 @@ -I'll create a detailed explanation of the Facade_Cluster class structure, logic, and algorithms. - - - - -# Facade_Cluster Class Architecture - -## 1. Class Hierarchy and Design Pattern - -### Inheritance Structure -``` -NaryTree::FacadeParent - ↑ - Facade_Cluster -``` - -The Facade_Cluster class inherits from NaryTree::FacadeParent and implements the Facade design pattern to: -- Provide a simplified interface to a complex point cloud data structure -- Manage relationships between blobs (collections of points) -- Handle spatial and temporal organization of detector data - -### Core Components -1. **Point Cloud Management** - - 3D point storage and indexing - - 2D projections for wire planes - - K-d trees for efficient spatial queries - -2. **Blob Organization** - - Hierarchical structure of points grouped into blobs - - Time-based organization - - Spatial relationships between blobs - -3. **Graph Structure** - - Represents connectivity between points - - Supports path finding and trajectory analysis - - Handles cluster connectivity - -## 2. Data Structures - -### Core Data Members -```cpp -// Cached values -mutable double m_length{0}; // Cluster length -mutable int m_npoints{0}; // Total point count -mutable time_blob_map_t m_time_blob_map; // Time-ordered blob mapping - -// PCA related -mutable bool m_pca_calculated{false}; -mutable geo_point_t m_center; -mutable geo_vector_t m_pca_axis[3]; -mutable double m_pca_values[3]; - -// Graph related -mutable std::unique_ptr m_graph; -mutable std::vector m_parents; -mutable std::vector m_distances; -mutable int m_source_pt_index{-1}; -mutable std::list m_path_wcps; -mutable std::list m_path_mcells; -``` - -### Key Scopes -```cpp -const Tree::Scope scope = {"3d", {"x", "y", "z"}}; -const Tree::Scope scope_wire_index = {"3d", {"uwire_index", "vwire_index", "wwire_index"}}; -Tree::Scope scope2ds[3] = { - {"2dp0", {"x", "y"}}, - {"2dp1", {"x", "y"}}, - {"2dp2", {"x", "y"}} -}; -``` - -## 3. Core Algorithms - -### 3.1 Spatial Organization - -#### K-d Tree Construction and Usage -```mermaid -graph TD - A[Point Cloud Data] --> B[Build K-d Tree] - B --> C[3D Spatial Index] - B --> D[2D Projections] - C --> E[Nearest Neighbor Queries] - C --> F[Range Queries] - D --> G[Wire Plane Analysis] -``` - -#### PCA Analysis Flow -```mermaid -graph TD - A[Point Cloud] --> B[Calculate Center] - B --> C[Build Covariance Matrix] - C --> D[Eigenvalue Decomposition] - D --> E[Sort Principal Components] - E --> F[Cache Results] -``` - -### 3.2 Graph Construction Algorithm - -```cpp -1. Initialize graph structure -2. Establish_close_connected_graph: - - Create edges within blobs - - Connect overlapping blobs - - Apply distance-based cuts -3. Connect_graph: - - Find connected components - - Create additional edges between components - - Validate connections using CTPC (optional) -``` - -### 3.3 Path Finding Process - -```mermaid -graph TD - A[Start Point] --> B[Create Graph] - B --> C[Dijkstra's Algorithm] - C --> D[Calculate Shortest Path] - D --> E[Store Path Points] - E --> F[Path Refinement] -``` - -## 4. Key Workflows - -### 4.1 Cluster Analysis Workflow -```mermaid -graph TD - A[Raw Data] --> B[Create Blobs] - B --> C[Organize in Time] - C --> D[Build Spatial Index] - D --> E[Compute Properties] - E --> F[Analysis Results] -``` - -### 4.2 Track Finding Process -1. **Initial Setup** - ```cpp - - Create point cloud structure - - Build k-d trees - - Establish blob relationships - ``` - -2. **Graph Construction** - ```cpp - - Create_graph() - - Establish_close_connected_graph() - - Connect_graph() - ``` - -3. **Path Finding** - ```cpp - - dijkstra_shortest_paths() - - cal_shortest_path() - - get_path_wcps() - ``` - -## 5. Implementation Details - -### 5.1 Point Cloud Management -- Uses lazy initialization for k-d trees -- Maintains separate 2D projections for wire planes -- Implements efficient spatial queries - -### 5.2 Blob Organization -- Hierarchical structure of points -- Time-based mapping -- Spatial relationship tracking - -### 5.3 Graph Management -- Boost Graph Library integration -- Custom edge and vertex properties -- Path finding algorithms - -## 6. Performance Considerations - -### 6.1 Caching Strategy -```cpp -// Example of lazy initialization and caching -const time_blob_map_t& time_blob_map() const { - if (m_time_blob_map.empty()) { - // Build map only when needed - for (const Blob* blob : children()) { - m_time_blob_map[blob->slice_index_min()].insert(blob); - } - } - return m_time_blob_map; -} -``` - -### 6.2 Memory Management -- Lazy initialization of expensive structures -- Caching of computed values -- Smart pointer usage for graph management - -### 6.3 Computational Efficiency -- K-d tree for spatial queries: O(log n) -- Dijkstra's algorithm: O(E log V) -- PCA computation: O(n) - -## 7. Usage Examples - -### 7.1 Basic Point Cloud Operations -```cpp -// Create cluster and add points -Cluster cluster; -// ... add points and blobs ... - -// Perform spatial query -geo_point_t query_point(0, 0, 0); -auto nearby = cluster.nnearby(query_point, 5.0 * units::cm); -``` - -### 7.2 Track Finding -```cpp -// Setup cluster -cluster.Create_graph(true); -cluster.dijkstra_shortest_paths(start_index); -cluster.cal_shortest_path(end_index); -auto path = cluster.get_path_wcps(); -``` - -## 8. Extension Points - -The class is designed for extension through: -1. Template specialization for different point types -2. Custom blob implementations -3. Additional graph algorithms -4. New spatial analysis methods - -## 9. Error Handling - -The class uses: -- Exception throwing for invalid operations -- Boundary checking for indices -- Validation of geometric operations -- Sanity checks for graph operations - - - -This documentation provides a comprehensive overview of the Facade_Cluster class architecture. Would you like me to: -1. Add more detailed algorithm explanations? -2. Provide more specific code examples? -3. Explain any particular aspect in more detail? \ No newline at end of file diff --git a/clus/docs/Facade_Cluster_API.md b/clus/docs/Facade_Cluster_API.md deleted file mode 100644 index defeb9419..000000000 --- a/clus/docs/Facade_Cluster_API.md +++ /dev/null @@ -1,639 +0,0 @@ -# Facade_Cluster Class Documentation - -## Overview -The `Facade_Cluster` class is part of the WireCell Point Cloud namespace and provides a high-level interface for working with clusters of 3D points and blobs in particle detector data analysis. It acts as a facade over a PC (Point Cloud) tree, giving semantics to what would otherwise be simple nodes. - -## Key Features - -### Point Cloud Management -- Supports both 2D and 3D point clouds -- Provides k-d tree functionality for efficient spatial queries -- Manages collections of blobs (groups of points) within the cluster - -### Spatial Operations -- Distance calculations between points and clusters -- Finding nearest neighbors and points within a radius -- Calculation of geometric properties (center, PCA axes) -- Convex hull computation -- Hough transformations for direction finding - -### Graph Operations -- Supports creation and manipulation of cluster graphs -- Implements Dijkstra's shortest path algorithm -- Provides connectivity analysis between blobs - -### Time-Based Analysis -- Manages time-slice based organization of blobs -- Supports time-ordered operations and queries -- Tracks temporal relationships between points and blobs - -## Complete API Reference - -### Core Class Management - -#### Constructor & Destructor -```cpp -Cluster(); -virtual ~Cluster(); -``` -- Default constructor initializes an empty cluster -- Virtual destructor ensures proper cleanup of derived classes - -#### Grouping Access -```cpp -Grouping* grouping(); -const Grouping* grouping() const; -``` -- Returns pointer to the parent grouping that contains this cluster -- Used to access parameters and higher-level organization -- Const version available for read-only access - -### Point Cloud Operations - -#### 3D Point Cloud Management -```cpp -const sv3d_t& sv3d() const; -``` -- Returns the scoped view for the "3d" point cloud (x,y,z coordinates) -- Used internally for point cloud management -- Provides access to underlying data structure - -```cpp -const kd3d_t& kd3d() const; -``` -- Returns the k-d tree for "3d" point cloud -- May trigger k-d tree building if not already constructed -- Essential for efficient spatial queries - -```cpp -const points_type& points() const; -``` -- Returns full array of point coordinates -- Recommended for bulk point access -- More efficient than accessing individual points repeatedly - -#### 2D Point Cloud Management -```cpp -const sv2d_t& sv2d(const size_t plane) const; -``` -- Returns 2D scoped view for specified plane (0=U, 1=V, 2=W) -- Provides access to 2D projections for wire plane analysis -- Essential for wire-plane specific operations - -```cpp -const kd2d_t& kd2d(const size_t plane) const; -``` -- Returns 2D k-d tree for specified plane -- Enables efficient spatial queries in 2D projections -- Used for wire-plane specific distance calculations - -### Point Access and Queries - -#### Point Information -```cpp -geo_point_t point3d(size_t point_index) const; -geo_point_t point(size_t point_index) const; -``` -- Returns 3D point at given k-d tree index -- point() is alias for point3d() to match Simple3DPointCloud interface -- Use with caution in tight loops - prefer bulk access through points() - -```cpp -int npoints() const; -``` -- Returns total number of points in cluster -- Uses cached value when available -- Thread-safe for read access - -```cpp -size_t nbpoints() const; -``` -- Returns total number of points according to sum of Blob::nbpoints() -- Equivalent to WCP's get_num_points() -- May differ from npoints() due to different counting methods - -#### Spatial Queries -```cpp -kd_results_t kd_radius(double radius, const geo_point_t& query_point) const; -``` -- Performs radius search in k-d tree -- Returns points within specified radius of query point -- Note: radius parameter is linear distance, not squared - -```cpp -kd_results_t kd_knn(int nnearest, const geo_point_t& query_point) const; -``` -- Performs k-nearest neighbor search -- Returns the specified number of closest points to query point -- Efficient for finding local point neighborhoods - -```cpp -std::vector kd_points(const kd_results_t& res); -std::vector kd_points(const kd_results_t& res) const; -``` -- Converts k-d tree query results to vector of points -- Both mutable and const versions available -- Useful for processing query results - -#### Closest Point Finding -```cpp -std::pair get_closest_point_blob(const geo_point_t& point) const; -``` -- Returns closest point and its containing blob to given point -- Useful for associating external points with cluster structure -- Returns {point, nullptr} if no points found - -```cpp -std::pair get_closest_wcpoint(const geo_point_t& p) const; -``` -- Returns index and coordinates of closest point to given point -- Equivalent to WCP's get_closest_wcpoint functionality -- Returns {-1, nullptr} if no points found - -```cpp -size_t get_closest_point_index(const geo_point_t& point) const; -``` -- Returns k-d tree index of closest point to given point -- Throws ValueError if cluster is empty -- More efficient than get_closest_wcpoint when only index is needed - -```cpp -double get_closest_dis(const geo_point_t& point) const; -``` -- Returns distance to closest point from given point -- Throws ValueError if cluster is empty -- Efficient when only distance is needed - -```cpp -std::vector get_closest_2d_index( - const geo_point_t& p, - const double search_radius, - const int plane) const; -``` -- Finds indices of points within search radius in 2D projection -- Operates on specified wire plane (0=U, 1=V, 2=W) -- Essential for wire-plane specific analysis - -```cpp -template -std::tuple get_closest_points(const PCType& two) const; -``` -- Finds closest points between this cluster and another point cloud -- Returns tuple containing: - - Index of closest point in this cluster - - Index of closest point in the other point cloud - - Distance between these points -- Uses iterative search to find global minimum distance -- Checks multiple starting positions to avoid local minima -- Template allows comparison with any point cloud type implementing required interface - -```cpp -std::pair get_closest_point_along_vec( - geo_point_t& p_test, // Starting point - geo_point_t dir, // Direction vector - double test_dis, // Maximum search distance - double dis_step, // Step size for search - double angle_cut, // Maximum allowed angle deviation - double dis_cut // Maximum allowed distance from line -) const; -``` -- Searches for closest point along a specified direction vector -- Parameters: - - p_test: Starting point for the search - - dir: Direction to search along - - test_dis: Maximum distance to search - - dis_step: Distance between test points along direction - - angle_cut: Maximum allowed angular deviation (in degrees) - - dis_cut: Maximum allowed perpendicular distance from search line -- Returns: - - Closest point found meeting criteria - - Distance from starting point -- Used for tracking trajectory analysis -- Important for finding continuation points in tracks -- Includes angular constraints to ensure consistent direction - -#### Point Counting and Analysis -```cpp -int nnearby(const geo_point_t& point, double radius) const; -``` -- Counts points within radius of given point -- Uses linear distance measure (not squared) -- Efficient for density estimation - -```cpp -std::pair ndipole( - const geo_point_t& point, - const geo_point_t& dir, - const double dis=-1) const; -``` -- Returns count of points in forward/backward direction from point -- Optional distance cutoff (disabled if negative) -- Useful for analyzing point cloud directionality - -### Blob Management - -#### Blob Access -```cpp -std::vector kd_blobs(); -std::vector kd_blobs() const; -``` -- Returns all blobs in k-d tree order -- Order differs from children() order and sort_blobs() order -- Both mutable and const versions available - -```cpp -Blob* blob_with_point(size_t point_index); -const Blob* blob_with_point(size_t point_index) const; -``` -- Returns blob containing the point at given k-d tree index -- Essential for mapping between points and their containing blobs -- Both mutable and const versions available - -```cpp -std::vector blobs_with_points(const kd_results_t& res); -std::vector blobs_with_points(const kd_results_t& res) const; -``` -- Returns blobs containing points from k-d tree query results -- Maintains order of input results -- Useful for processing spatial query results - -```cpp -std::vector get_blob_indices(const Blob*) const; -``` -- Returns vector of point indices belonging to specified blob -- Uses lazy initialization for efficiency -- Important for blob-point relationship mapping - -#### Blob Information -```cpp -void print_blobs_info() const; -``` -- Prints detailed information about all blobs in cluster -- Outputs wire index ranges (U, V, W) and time slice indices -- Useful for debugging and analysis - -```cpp -const Blob* get_first_blob() const; -``` -- Returns blob at earliest time -- Throws ValueError if cluster is empty -- Based on time_blob_map ordering - -```cpp -const Blob* get_last_blob() const; -``` -- Returns blob at latest time -- Throws ValueError if cluster is empty -- Based on time_blob_map ordering - -```cpp -size_t get_num_time_slices() const; -``` -- Returns number of unique time slices in cluster -- Based on time_blob_map size -- Important for temporal analysis - -#### Blob Relationships -```cpp -std::vector is_connected(const Cluster& c, const int offset) const; -``` -- Determines connectivity between this cluster and another -- Offset parameter controls connection tolerance -- Returns vector of connecting blobs - -```cpp -const_blob_point_map_t get_closest_blob( - const geo_point_t& point, - double radius) const; -``` -- Returns map of blobs and their closest points within radius -- Each returned blob has at least one point within radius -- Useful for analyzing local blob structure - -### Geometric Analysis - -#### Position and Direction -```cpp -geo_point_t calc_ave_pos(const geo_point_t& origin, const double dis) const; -``` -- Calculates charge-weighted average position of nearby points -- Uses points within specified distance of origin -- Important for smoothing and local averaging - -```cpp -std::pair get_two_extreme_points() const; -``` -- Finds most distant pair of points in cluster -- Uses local averaging for stability -- Important for determining cluster extent - -```cpp -std::pair get_highest_lowest_points(size_t axis = 1) const; -``` -- Returns points at extremes of given Cartesian axis -- Default is Y-axis (axis=1) -- Returns points in descending order - -```cpp -std::pair get_earliest_latest_points() const; -``` -- Returns points at extremes of X-axis (drift direction) -- Returns points in ascending order -- Important for drift time analysis - -```cpp -std::pair get_front_back_points() const; -``` -- Returns points at extremes of Z-axis -- Used for longitudinal extent analysis -- Important for track reconstruction - -#### Shape Analysis -```cpp -std::vector get_hull() const; -``` -- Computes convex hull of cluster points -- Uses QuickHull algorithm -- Important for shape analysis and visualization - -```cpp -double get_length() const; -``` -- Returns geometric size of cluster -- Based on transverse extents and time -- Uses cached value for efficiency - -```cpp -std::tuple get_uvwt_range() const; -std::tuple get_uvwt_min() const; -std::tuple get_uvwt_max() const; -``` -- Return wire indices and time ranges -- Important for detector coordinate analysis -- Provide min/max values for U, V, W coordinates and time - -```cpp -geo_point_t get_center() const; -``` -- Returns the geometric center (centroid) of the cluster -- Calculated as average position of all points -- Uses charge weighting if available -- Cached for efficiency after first calculation -- Important for PCA and other geometric calculations -- Triggers PCA calculation if not already performed - -```cpp -geo_vector_t get_pca_axis(int axis) const; -``` -- Returns principal component axis vector for specified component -- Parameters: - - axis: Index of principal component (0, 1, or 2) - - 0: Primary (longest) axis - - 1: Secondary axis - - 2: Tertiary (shortest) axis -- Returns normalized direction vector -- Triggers PCA calculation if not already performed -- Important for determining cluster orientation -- Throws IndexError if axis is invalid - -```cpp -double get_pca_value(int axis) const; -``` -- Returns eigenvalue for specified principal component -- Parameters: - - axis: Index of principal component (0, 1, or 2) - - 0: Largest eigenvalue (most variance) - - 1: Middle eigenvalue - - 2: Smallest eigenvalue -- Eigenvalues indicate spread of points along each axis -- Useful for shape analysis: - - Large ratio between values indicates elongated structure - - Similar values indicate spherical structure -- Triggers PCA calculation if not already performed -- Throws IndexError if axis is invalid - -These functions are particularly important for: -- Track finding and reconstruction -- Cluster shape analysis -- Trajectory determination -- Pattern recognition -- Quality assessment of clustering - -Note on PCA Implementation: -- PCA calculation is performed lazily (only when needed) -- Results are cached for efficiency -- Uses Eigen library for eigenvalue decomposition -- Considers charge weighting when available -- Thread-safe for const access -- Invalidated when cluster structure changes - -#### Direction Finding -```cpp -std::pair hough_transform( - const geo_point_t& point, - const double radius, - HoughParamSpace param_space = HoughParamSpace::theta_phi, - std::shared_ptr s3dpc = nullptr, - const std::vector& global_indices = {}) const; -``` -- Performs Hough transform for direction finding -- Supports different parameter spaces (theta-phi or costheta-phi) -- Essential for track direction determination -- Optional external point cloud support - -```cpp -geo_vector_t vhough_transform(...); // Same parameters as hough_transform -``` -- Converts Hough transform results to directional vector -- More convenient than raw Hough parameters -- Returns normalized direction vector - -### Graph Operations - -#### Graph Construction and Management -```cpp -void Create_graph(const bool use_ctpc = true) const; -``` -- Creates graph representation of cluster -- Optional CTPC (Continuous Track Point Cloud) usage -- Foundation for path finding operations - -```cpp -void Establish_close_connected_graph() const; -``` -- Creates edges between points within blobs and overlapping blobs -- Uses distance-based cuts for edge creation -- Important for initial graph structure - -```cpp -void Connect_graph(const bool use_ctpc = false) const; -``` -- Connects graph components with additional edges -- Optional CTPC usage for validation -- Completes graph connectivity - -#### Path Finding -```cpp -void dijkstra_shortest_paths(const size_t pt_idx, const bool use_ctpc = true) const; -``` -- Computes shortest paths from given start point -- Uses Dijkstra's algorithm -- Essential for track path finding - -```cpp -void cal_shortest_path(const size_t dest_wcp_index) const; -``` -- Calculates specific shortest path to destination -- Uses results from dijkstra_shortest_paths -- Updates internal path storage - -```cpp -const std::list& get_path_wcps() const; -``` -- Returns current path point indices -- Available after path finding operations -- Represents ordered sequence of points - -#### Path Analysis -```cpp -geo_point_t get_furthest_wcpoint( - geo_point_t old_wcp, - geo_point_t dir, - const double step = 5*units::cm, - const int allowed_nstep = 12) const; -``` -- Finds furthest point along specified direction -- Uses step size and maximum steps for search -- Important for track extension - -```cpp -void adjust_wcpoints_parallel(size_t& start_idx, size_t& end_idx) const; -``` -- Adjusts endpoint positions to align with point cloud -- Updates indices in place -- Important for track endpoint refinement - -```cpp -bool construct_skeleton(const bool use_ctpc); -``` -- Builds skeletal representation of cluster -- Returns false if skeleton already exists -- Important for structural analysis - -### Time-Based Operations -```cpp -const time_blob_map_t& time_blob_map() const; -``` -- Returns mapping of time slices to blob sets -- Uses lazy initialization -- Essential for temporal analysis - -```cpp -std::unordered_map examine_x_boundary( - const double low_limit = -1*units::cm, - const double high_limit = 257*units::cm); -``` -- Analyzes cluster for boundary crossing -- Can split cluster at boundaries -- Returns map of resulting clusters - -### Quality Assessment -```cpp -bool judge_vertex( - geo_point_t& p_test, - const double asy_cut = 1./3., - const double occupied_cut = 0.85); -``` -- Evaluates if point is a vertex -- Uses asymmetry and occupancy criteria -- Updates point position during evaluation - -```cpp -bool sanity(Log::logptr_t log = nullptr) const; -``` -- Verifies internal consistency of cluster -- Optional logging of issues -- Important for debugging and validation - -### Utility Functions -```cpp -size_t hash() const; -``` -- Generates hash value representing cluster content -- Based on length and blob hashes -- Useful for comparison and caching - -```cpp -static bool cluster_less(const Cluster* a, const Cluster* b); -static void sort_clusters(std::vector& clusters); -static void sort_clusters(std::vector& clusters); -``` -- Comparison and sorting functions for clusters -- Based on multiple criteria (length, points, coordinates) -- Both const and non-const versions available - -## Important Notes - -1. **Performance Considerations** - - Many operations use lazy evaluation and caching - - K-d trees are built on-demand - - Complex operations should be used judiciously in tight loops - -2. **Thread Safety** - - Most const methods are thread-safe - - Caching operations may not be thread-safe - - Graph operations should be synchronized if used in multi-threaded context - -3. **Memory Management** - - The class manages various internal caches and data structures - - Users should be aware of potential memory usage with large point clouds - -## Dependencies -- Boost Graph Library -- WireCell utilities and interfaces -- Point Cloud data structures -- K-d tree implementations - - -## Best Practices - -1. **Efficient Point Access** - - Use bulk point access methods when possible - - Avoid repeated single point queries in loops - - Leverage k-d tree functionality for spatial queries - -2. **Graph Operations** - - Create graphs only when needed - - Reuse path finding results when possible - - Consider using cached results for repeated operations - -3. **Memory Optimization** - - Clear caches if memory becomes a concern - - Use appropriate container types for point storage - - Monitor memory usage with large datasets - -## Common Pitfalls - -1. **Performance Issues** - - Avoid repeated construction of k-d trees - - Don't perform point-by-point operations when bulk operations are available - - Be careful with large-scale graph operations - -2. **Accuracy Considerations** - - Be aware of floating-point precision in spatial calculations - - Consider distance metrics carefully in spatial queries - - Validate results when working with edge cases - -3. **Resource Management** - - Don't assume caches are always valid - - Clear unnecessary data when working with memory constraints - - Be mindful of graph construction costs - -## Contributing -When extending or modifying the Facade_Cluster class: -- Maintain const correctness -- Update caching mechanisms appropriately -- Document performance implications -- Add appropriate test cases -- Follow existing coding style and conventions - diff --git a/clus/docs/Facade_Cluster_alg.md b/clus/docs/Facade_Cluster_alg.md deleted file mode 100644 index 577d5f84c..000000000 --- a/clus/docs/Facade_Cluster_alg.md +++ /dev/null @@ -1,298 +0,0 @@ - - -# Detailed Algorithm Explanations for Facade_Cluster - -## 1. Graph Construction Algorithms - -### 1.1 Establish_close_connected_graph -This algorithm creates the initial graph structure connecting points within and between blobs. - -```python -Algorithm: Establish_close_connected_graph - -Input: Collection of blobs and their points -Output: Connected graph representing close points - -1. Initialize data structures: - - Create maps for wire indices (U, V, W planes) - - Initialize empty graph - -2. For each blob: - - Map points to wire indices - - Create wire-index to point mappings for each plane - -3. Create in-blob connections: - For each point p1 in blob: - For each point p2 in same blob: - if are_connected(p1, p2): - distance = calculate_distance(p1, p2) - add_edge(p1, p2, distance) - -4. Create between-blob connections: - For each time slice: - For each blob pair in time slice: - if blobs_overlap(blob1, blob2): - connect_overlapping_blobs(blob1, blob2) - -5. Apply connection criteria: - - Maximum wire interval check - - Distance threshold check - - Angular separation check -``` - -Key Features: -- Wire plane-based connectivity check -- Distance-based edge creation -- Time slice consideration -- Angular constraints - -### 1.2 Connect_graph -This algorithm enhances connectivity between graph components. - -```python -Algorithm: Connect_graph - -Input: Initial graph from Establish_close_connected_graph -Output: Fully connected graph with additional edges - -1. Find connected components: - components = find_connected_components(graph) - -2. For each component pair: - 2.1 Create point clouds for each component - 2.2 Find closest points between components: - - Use k-d tree search - - Consider multiple starting points - - Apply Hough transform for direction - -3. For each potential connection: - 3.1 Validate path: - - Check point density - - Verify trajectory smoothness - - Consider detector geometry - - 3.2 If path valid: - - Add new edges - - Update connectivity - -4. Optional CTPC validation: - If use_ctpc: - - Validate paths through detector - - Remove invalid connections -``` - -## 2. Path Finding Algorithms - -### 2.1 Dijkstra Implementation -Customized Dijkstra's algorithm for track finding. - -```python -Algorithm: dijkstra_shortest_paths - -Input: -- Starting point index -- Graph structure -- Optional CTPC validation - -Output: -- Distance to all points -- Parent pointers for path reconstruction - -1. Initialize: - - Set all distances to infinity - - Set source distance to 0 - - Create priority queue Q - -2. Custom distance metric: - distance_metric(v1, v2): - base_distance = euclidean_distance(v1, v2) - if use_ctpc: - quality = evaluate_path_quality(v1, v2) - return base_distance * quality_factor(quality) - return base_distance - -3. Main loop: - While Q not empty: - u = Q.extract_min() - For each neighbor v of u: - alt = distance[u] + distance_metric(u, v) - if alt < distance[v]: - distance[v] = alt - parent[v] = u - Q.decrease_key(v, alt) - -4. Path reconstruction: - reconstruct_path(parent, target): - path = empty_list - while target ≠ source: - path.prepend(target) - target = parent[target] - path.prepend(source) - return path -``` - -## 3. Spatial Analysis Algorithms - -### 3.1 PCA Implementation -Principal Component Analysis for cluster orientation. - -```python -Algorithm: Calculate_PCA - -Input: Point cloud with optional charge weights -Output: Principal axes and eigenvalues - -1. Calculate center: - center = weighted_mean(points, charges) - -2. Build covariance matrix: - For each point p: - p_centered = p - center - For i in [0,1,2]: - For j in [i,2]: - cov[i,j] += weight * p_centered[i] * p_centered[j] - if i != j: cov[j,i] = cov[i,j] - -3. Eigendecomposition: - - Use Eigen library for decomposition - - Sort eigenvalues in descending order - - Normalize eigenvectors - -4. Cache results: - - Store center point - - Store principal axes - - Store eigenvalues - -5. Quality checks: - - Verify orthogonality - - Check eigenvalue ratios - - Validate axis directions -``` - -### 3.2 Hough Transform for Direction Finding - -```python -Algorithm: vhough_transform - -Input: -- Reference point -- Search radius -- Parameter space type (theta-phi or costheta-phi) - -Output: Direction vector - -1. Initialize parameter space: - - Create 2D histogram (180×360 bins) - - Define parameter ranges based on space type - -2. Collect points: - points = find_points_in_radius(reference, radius) - -3. For each point: - 3.1 Calculate direction vector to reference - 3.2 Convert to chosen parameter space: - If theta-phi: - theta = acos(dir.z) - phi = atan2(dir.y, dir.x) - If costheta-phi: - costheta = dir.z - phi = atan2(dir.y, dir.x) - - 3.3 Weight contribution: - weight = calculate_weight(point) - histogram[theta_bin][phi_bin] += weight - -4. Find maximum: - - Locate histogram maximum - - Convert parameters back to vector - - Normalize result - -5. Optional refinement: - - Local maximum fitting - - Multiple peak detection - - Angular uncertainty estimation -``` - -## 4. Connectivity Analysis - -### 4.1 Blob Connection Algorithm - -```python -Algorithm: is_connected - -Input: -- Two clusters -- Offset parameter for tolerance - -Output: Vector of connecting blobs - -1. Time slice mapping: - - Create time-ordered blob maps for both clusters - - Find overlapping time ranges - -2. For each time overlap: - 2.1 Check blob pairs: - For blob1 in cluster1[time]: - For blob2 in cluster2[time]: - if blobs_overlap(blob1, blob2, offset): - add_to_connections(blob1) - -3. Overlap check: - blob_overlap(blob1, blob2, offset): - - Check wire index ranges - - Apply offset tolerance - - Verify spatial proximity - - Consider charge distributions - -4. Return connecting blobs: - - Sort by connection strength - - Filter weak connections - - Validate consistency -``` - -## 5. Point Cloud Operations - -### 5.1 Closest Point Search Algorithm - -```python -Algorithm: get_closest_points - -Input: Two clusters -Output: Closest point indices and distance - -1. Initialize search: - min_distance = infinity - best_points = null - -2. Multi-start search: - 2.1 Try endpoints: - - Start-Start - - Start-End - - End-Start - - End-End - - 2.2 For each start pair: - current = start_pair - while not converged: - p1 = find_closest(cluster1, p2) - p2 = find_closest(cluster2, p1) - if no_improvement: - break - update_best_if_better(p1, p2) - -3. Local refinement: - - Use k-d tree for efficient nearby point search - - Consider charge weights - - Apply geometric constraints - -4. Validation: - - Check distance consistency - - Verify path feasibility - - Consider detector constraints -``` - -Would you like me to: -1. Add more specific implementation details for any algorithm? -2. Explain the mathematical foundations of certain algorithms? -3. Provide complexity analysis for the algorithms? -4. Show more example use cases? \ No newline at end of file diff --git a/clus/docs/Facade_Grouping.md b/clus/docs/Facade_Grouping.md deleted file mode 100644 index 454519c9d..000000000 --- a/clus/docs/Facade_Grouping.md +++ /dev/null @@ -1,166 +0,0 @@ -I'll analyze the Facade::Grouping class structure and logic in detail. This is the original CTPointCloud in Wire-Cell Prototype. - - - - - -# Facade::Grouping Class Analysis - -## Class Overview -The Facade::Grouping class is part of the WireCell::PointCloud::Facade namespace and represents a hierarchical structure for grouping clusters of points in a wire chamber detector system. It inherits from NaryTree::FacadeParent, implementing a tree-like structure where each Grouping node can have multiple Cluster children. - -## Key Components - -### 1. Core Data Members -- `TPCParams m_tp`: Holds TPC (Time Projection Chamber) parameters including: - - Wire pitches (pitch_u, pitch_v, pitch_w) - - Wire angles (angle_u, angle_v, angle_w) - - Drift timing parameters -- `IAnodePlane::pointer m_anode`: Pointer to anode plane interface -- `mapfp_t m_proj_centers`: Cached projected centers for each face/plane -- `mapfp_t m_pitch_mags`: Cached pitch magnitudes for each face/plane -- `mapfp_t>> m_dead_winds`: Maps dead wire regions - -### 2. Point Cloud Data Structure -The class manages 2D point cloud data through: -- KD-trees for efficient spatial searching -- Scoped views of the point cloud data organized by face and plane -- Support for both active and dead regions in the detector - -## Major Functionality - -### 1. Initialization and Configuration -```cpp -void set_params(const TPCParams& tp) -void set_anode(const IAnodePlane::pointer anode) -void on_construct(node_type* node) -``` -- Configures TPC parameters -- Sets up anode plane interface -- Initializes dead wire regions during construction - -### 2. Spatial Operations -The class provides several methods for spatial analysis: - -a) Point Validation: -```cpp -bool is_good_point(const geo_point_t& point, const int face, double radius, - int ch_range, int allowed_bad) -``` -- Validates points based on: - - Proximity to existing points - - Presence of dead channels - - Number of matching planes - -b) Point Search: -```cpp -kd_results_t get_closest_points(const geo_point_t& point, const double radius, - const int face, int pind) -``` -- Uses KD-tree for efficient radius-based point searches -- Transforms 3D points to 2D coordinates for plane-specific searches - -### 3. Dead Region Management -```cpp -bool get_closest_dead_chs(const geo_point_t& point, const int ch_range, - const int face, int pind) -``` -- Manages dead wire regions -- Checks if points fall within dead channel ranges -- Supports channel-based range queries - -### 4. Coordinate Conversions -```cpp -std::tuple convert_3Dpoint_time_ch(const geo_point_t& point, - const int face, const int pind) -``` -- Converts 3D spatial points to: - - Time indices - - Channel numbers -- Accounts for: - - Wire angles - - Pitch sizes - - Drift parameters - -### 5. Data Access and Caching -```cpp -const mapfp_t& proj_centers() -const mapfp_t& pitch_mags() -``` -- Lazy initialization of cached values -- Maintains geometric parameters for each face/plane combination - -## Implementation Details - -### 1. Data Organization -- Uses face/plane indexing system -- Maintains separate KD-trees for each face/plane combination -- Implements lazy initialization for computed values - -### 2. Search Optimization -- Uses KD-trees for efficient spatial searches -- Maintains separate structures for active and dead regions -- Implements radius-based searching with coordinate transformations - -### 3. State Management -- Implements hashing for state comparison -- Maintains cached geometric parameters -- Supports runtime configuration of TPC parameters - -## Key Algorithms - -### 1. Point Validation -```plaintext -1. For each plane in face: - - Check for nearby points within radius - - Check for dead channels within range - - Count matching planes -2. Compare matches against allowed_bad threshold -3. Return validation result -``` - -### 2. Dead Channel Checking -```plaintext -1. Convert 3D point to time/channel coordinates -2. Check channel range [wind-range, wind+range] -3. For each channel in range: - - Check if channel is in dead region - - Verify point's x-coordinate falls within dead region bounds -4. Return true if point matches any dead region -``` - -### 3. Coordinate Transformation -```plaintext -1. Calculate projected coordinates using wire angles -2. Apply pitch and center corrections -3. Convert spatial coordinates to time/channel indices -4. Account for drift time and TPC parameters -``` - -## Usage Considerations - -1. Performance Optimizations: - - Uses KD-trees for efficient spatial searches - - Implements caching for frequently used values - - Lazy initialization of computed parameters - -2. Configuration Requirements: - - Must set TPCParams before use - - Requires valid IAnodePlane pointer - - Needs proper initialization of dead regions - -3. Error Handling: - - Checks for null pointers - - Validates geometric parameters - - Ensures proper initialization of required components - - - -This class is a sophisticated implementation of a spatial data structure designed for wire chamber detector systems. It manages both the geometric relationships between detector components and the efficient searching and validation of points within the detector space. - -The key strengths of this implementation are: -1. Efficient spatial searching using KD-trees -2. Comprehensive handling of dead regions -3. Flexible coordinate transformation system -4. Robust caching mechanism for frequently used values - diff --git a/clus/docs/Facade_Util.md b/clus/docs/Facade_Util.md deleted file mode 100644 index cc801e300..000000000 --- a/clus/docs/Facade_Util.md +++ /dev/null @@ -1,573 +0,0 @@ -# Simple3DPointCloud Class Analysis - -## Core Purpose -The Simple3DPointCloud class is designed to manage and perform operations on 3D point cloud data with efficient spatial queries using a k-d tree data structure. It's part of the WireCell::PointCloud::Facade namespace and appears to be used in particle physics detector data processing. - -## Class Structure - -### Key Member Variables -1. `m_points` (points_type) - - A 3-dimensional vector structure storing point coordinates - - Organized as three separate arrays for x, y, z coordinates - - Uses a columnar data layout for better memory efficiency - -2. `m_kd` (std::unique_ptr) - - A lazy-initialized k-d tree for spatial queries - - Uses NFKDVec::Tree with dynamic indexing - - Mutable to allow lazy initialization in const methods - -### Important Type Definitions -```cpp -using nfkd_t = NFKDVec::Tree; -using points_type = nfkd_t::points_type; -using results_type = nfkd_t::results_type; -using point_type = std::vector; -``` - -## Key Algorithms and Operations - -### 1. Point Addition (add method) -```cpp -void add(const point_type& new_pt) { - // Validate 3D point - if (new_pt.size() != 3) { - raise("points must be 3D"); - } - // Add coordinates to respective arrays - for (size_t ind=0; ind<3; ++ind) { - points()[ind].push_back(new_pt[ind]); - } - // Update k-d tree - kd().append({{new_pt[0]}, {new_pt[1]}, {new_pt[2]}}); -} -``` - -### 2. K-D Tree Management -- Lazy initialization pattern used for the k-d tree -- Two versions (const and non-const) of the kd() method -- Tree can be rebuilt on demand using the rebuild parameter - -### 3. Nearest Neighbor Search Operations - -#### A. Closest Point Index Query -```cpp -results_type get_closest_index(const geo_point_t& p, const size_t N) const { - return kd().knn(N, p); // K-nearest neighbors search -} -``` - -#### B. Closest Point Search -```cpp -std::pair get_closest_wcpoint(const geo_point_t& p) const { - const auto knn_res = kd().knn(1, p); - // Returns index and actual point coordinates -} -``` - -#### C. Directional Search (get_closest_point_along_vec) -Searches for points along a specified direction with constraints: -- Starting point (p_test1) -- Direction vector (dir) -- Search distance (test_dis) -- Step size (dis_step) -- Angular cutoff (angle_cut) -- Distance cutoff (dis_cut) - -## Key Features - -1. **Efficiency** - - Columnar data storage for better memory access patterns - - Lazy initialization of k-d tree - - Efficient spatial queries using k-d tree structure - -2. **Flexibility** - - Supports both exact and approximate nearest neighbor searches - - Allows directional searches with constraints - - Supports multiple point query methods - -3. **Safety** - - Input validation for 3D points - - Error handling for invalid operations - - Const-correctness for thread safety - -4. **Memory Management** - - Smart pointer usage for k-d tree - - Automatic cleanup through RAII - - Efficient memory usage through columnar storage - -## Common Use Patterns - -1. **Building Point Cloud** -```cpp -Simple3DPointCloud cloud; -cloud.add({x, y, z}); // Add points one at a time -``` - -2. **Spatial Queries** -```cpp -// Find N nearest neighbors -auto neighbors = cloud.get_closest_index(point, N); - -// Find single nearest point -auto [index, point] = cloud.get_closest_wcpoint(query_point); -``` - -3. **Directional Searches** -```cpp -auto [index, distance] = cloud.get_closest_point_along_vec( - start_point, direction, - search_distance, step_size, - angle_cutoff, distance_cutoff -); -``` - -## Implementation Notes - -1. The class uses a columnar data structure instead of an array of structs, which can provide better cache performance for certain operations. - -2. The k-d tree is lazily initialized and can be rebuilt on demand, allowing for efficient updates when the point cloud changes. - -3. The class provides both exact (knn) and constrained (directional) search capabilities. - -4. Error handling is implemented for invalid inputs and operations. - -5. The class supports integration with other components through its template-based closest points comparison functionality. - -# Multi2DPointCloud Class Analysis - -## Core Purpose -The Multi2DPointCloud class is designed to manage and perform operations on 2D projections of 3D point clouds in three different planes (u, v, w), commonly used in particle physics wire chamber detectors. It maintains separate 2D point clouds for each plane and provides efficient spatial queries using k-d trees. - -## Class Structure - -### Key Member Variables -1. `m_points[3]` (points_type[3]) - - Array of 3 two-dimensional vector structures storing point coordinates - - Each points_type stores x and y coordinates for a plane - - Uses columnar data layout for efficiency - -2. `m_kd[3]` (std::unique_ptr[3]) - - Array of 3 lazy-initialized k-d trees for spatial queries - - One tree per plane (u, v, w) - - Mutable to allow lazy initialization in const methods - -3. `angle_uvw[3]` (double[3]) - - Array storing the angles for each plane (u, v, w) - - Used for projecting 3D points onto 2D planes - -### Important Type Definitions -```cpp -using nfkd_t = NFKDVec::Tree; -using coordinates_type = nfkd_t::coordinates_type; -using points_type = nfkd_t::points_type; -using results_type = nfkd_t::results_type; -using point_type = std::vector; -``` - -## Key Algorithms and Operations - -### 1. Initialization -```cpp -Multi2DPointCloud(double angle_u, double angle_v, double angle_w) - : angle_uvw{angle_u, angle_v, angle_w} { - for (size_t plane = 0; plane < 3; ++plane) { - points(plane).resize(2); // 2D points need only x,y coordinates - } -} -``` - -### 2. Point Addition and Projection -```cpp -void add(const geo_point_t& new_pt) { - for (size_t plane = 0; plane < 3; ++plane) { - // Project 3D point onto 2D plane using rotation matrix - double x = new_pt[0]; // x coordinate remains unchanged - // Calculate y coordinate using rotation matrix - double y = cos(angle_uvw[plane]) * new_pt[2] - - sin(angle_uvw[plane]) * new_pt[1]; - - // Store projected coordinates - points(plane)[0].push_back(x); - points(plane)[1].push_back(y); - // Update k-d tree for the plane - kd(plane).append({{x}, {y}}); - } -} -``` - -### 3. K-D Tree Management -Maintains separate k-d trees for each plane with lazy initialization: -```cpp -const nfkd_t& kd(const size_t plane, const bool rebuild=false) const { - if (rebuild) m_kd[plane] = nullptr; - if (m_kd[plane]) return *m_kd[plane]; - m_kd[plane] = std::make_unique(2); - return *m_kd[plane]; -} -``` - -### 4. Spatial Query Operations - -#### A. Closest Point Distance Query -```cpp -std::pair get_closest_2d_dis(const geo_point_t& p, size_t plane) const { - // Project 3D point to 2D - double x = p[0]; - double y = cos(angle_uvw[plane]) * p.z() - sin(angle_uvw[plane]) * p.y(); - - // Perform k-nearest neighbor search - const auto& res = kd(plane).knn(1, {x, y}); - - // Return index and distance - if (res.size() == 1) - return std::make_pair(res[0].first, sqrt(res[0].second)); - else - return std::make_pair(-1, 1e9); -} -``` - -#### B. Radius Search -```cpp -std::vector> get_closest_2d_index_radius( - const geo_point_t& p, const double radius, size_t plane) const { - // Project point to 2D - double x = p[0]; - double y = cos(angle_uvw[plane]) * p.z() - sin(angle_uvw[plane]) * p.y(); - - // Search within radius - const auto& res = kd(plane).radius(radius * radius, {x, y}); - - // Convert results - std::vector> ret; - for (const auto& r : res) { - ret.push_back(std::make_pair(r.first, sqrt(r.second))); - } - return ret; -} -``` - -#### C. K-Nearest Neighbors Search -```cpp -std::vector> get_closest_2d_index_knn( - const geo_point_t& p, const int N, size_t plane) const { - // Project point to 2D - double x = p[0]; - double y = cos(angle_uvw[plane]) * p.z() - sin(angle_uvw[plane]) * p.y(); - - // Find N nearest neighbors - const auto& res = kd(plane).knn(N, {x, y}); - - // Convert results - std::vector> ret; - for (const auto& r : res) { - ret.push_back(std::make_pair(r.first, r.second)); - } - return ret; -} -``` - -## Key Features - -1. **Multi-Plane Management** - - Maintains separate 2D projections for each plane - - Independent k-d trees for efficient spatial queries in each plane - - Consistent projection transformations - -2. **Efficient Projection** - - Uses rotation matrices for 3D to 2D projection - - Preserves x-coordinate while transforming y-z plane - - Optimized for wire chamber geometry - -3. **Flexible Querying** - - Supports nearest neighbor searches - - Implements radius-based searches - - Provides k-nearest neighbors queries - - Returns both indices and distances - -4. **Memory Efficiency** - - Columnar data storage - - Lazy k-d tree initialization - - Smart pointer management - -## Common Use Patterns - -1. **Initialization and Point Addition** -```cpp -Multi2DPointCloud cloud(angle_u, angle_v, angle_w); -cloud.add(point3d); // Automatically projects to all planes -``` - -2. **Spatial Queries Per Plane** -```cpp -// Find closest point in a plane -auto [index, distance] = cloud.get_closest_2d_dis(point3d, plane); - -// Find points within radius -auto neighbors = cloud.get_closest_2d_index_radius(point3d, radius, plane); - -// Find k nearest neighbors -auto knn = cloud.get_closest_2d_index_knn(point3d, k, plane); -``` - -## Implementation Notes - -1. The class uses a projection scheme specific to wire chamber detector geometry where each plane represents a different wire orientation. - -2. The projection preserves the x-coordinate and transforms the y-z coordinates based on the plane angle. - -3. Each plane maintains its own k-d tree for efficient spatial queries in the projected 2D space. - -4. The class provides comprehensive error handling and boundary checking. - -5. The implementation is optimized for the specific needs of particle physics detector data processing. - - -# DynamicPointCloud Class Analysis - -## Core Purpose -The DynamicPointCloud class is a sophisticated data structure that combines 3D point cloud management with 2D projections, specifically designed for particle physics detector track analysis. It maintains both 3D and 2D representations of points while associating them with clusters and blobs, making it particularly useful for particle track reconstruction. - -## Class Structure - -### Key Member Variables -1. `m_pc2d` (Multi2DPointCloud) - - Handles 2D projections in three planes (u, v, w) - - Manages wire plane geometry and projections - -2. `m_pc3d` (Simple3DPointCloud) - - Stores and manages the full 3D point cloud data - - Provides 3D spatial queries - -3. `m_winds[3]` (std::vector[3]) - - Stores wire indices for each plane (u, v, w) - - Used for detector readout mapping - -4. `m_clusters` (std::vector) - - Stores pointers to associated cluster objects - - Maps points to their parent clusters - -5. `m_blobs` (std::vector) - - Stores pointers to associated blob objects - - Contains charge deposit information - -### Important Type Definitions -```cpp -using points3d_type = Simple3DPointCloud::points_type; -using points2d_type = Multi2DPointCloud::points_type; -using point_type = std::vector; -``` - -## Key Algorithms and Operations - -### 1. Point Addition Methods - -#### A. Standard Point Addition -```cpp -void add_points(const Cluster* cluster, const int flag=0, - const double step = 0.6*units::cm) { - size_t current_size = get_num_points(); - const auto& winds = cluster->wire_indices(); - - if (flag == 0) { - // Add actual points from cluster - for (size_t i = 0; i != cluster->npoints(); i++) { - // Store cluster reference - m_clusters.push_back(cluster); - - // Add 3D point - m_pc3d.add({cluster->point3d(i).x(), - cluster->point3d(i).y(), - cluster->point3d(i).z()}); - - // Add 2D projections - m_pc2d.add(cluster->point3d(i)); - - // Store wire indices - for (size_t plane = 0; plane < 3; ++plane) { - m_winds[plane].push_back(winds[plane][i]); - } - - // Store blob reference - m_blobs.push_back(cluster->blob_with_point(i)); - } - } - else { - // Add skeleton points with interpolation - const std::list& path_wcps = cluster->get_path_wcps(); - - // Interpolate points along path - geo_point_t prev_wcp = cluster->point3d(path_wcps.front()); - for (auto it = path_wcps.begin(); it != path_wcps.end(); it++) { - geo_point_t test_point = cluster->point3d(*it); - double dis = (test_point - prev_wcp).magnitude(); - - if (dis <= step) { - // Add point directly if close enough - points.push_back(test_point); - } - else { - // Interpolate points along segment - int num_points = int(dis / step) + 1; - for (int k = 0; k != num_points; k++) { - double t = (k + 1.0) / num_points; - geo_point_t current_pt = prev_wcp + - (test_point - prev_wcp) * t; - points.push_back(current_pt); - } - } - prev_wcp = test_point; - } - } -} -``` - -#### B. Directional Point Addition -```cpp -void add_points(const Cluster* cluster, const geo_point_t& p_test, - const geo_point_t& dir_unmorm, const double range, - const double step, const double angle) { - geo_point_t dir = dir_unmorm.norm(); - int num_points = int(range / step) + 1; - - for (int k = 0; k != num_points; k++) { - // Calculate distance cut based on angle - double dis_cut = std::min( - std::max(2.4 * units::cm, - k * step * sin(angle / 180. * 3.1415926)), - 13 * units::cm); - - // Add point with calculated position - m_clusters.push_back(cluster); - m_blobs.push_back(nullptr); - - geo_point_t new_point = p_test + dir * k * step; - m_pc3d.add(new_point); - m_pc2d.add(new_point); - - // Store distance cut as wire index - for (int plane = 0; plane < 3; ++plane) { - m_winds[plane].push_back(int(dis_cut)); - } - } -} -``` - -### 2. Spatial Query Operations - -#### A. 2D Point Information Retrieval -```cpp -std::vector> -get_2d_points_info(const geo_point_t& p, const double radius, - const int plane) { - // Get points within radius in specified plane - auto results = m_pc2d.get_closest_2d_index_radius(p, radius, plane); - - // Build result tuples with distance, cluster, and index - std::vector> return_results; - for (const auto& [index, distance] : results) { - return_results.push_back(std::make_tuple( - distance, - m_clusters.at(index), - index - )); - } - return return_results; -} -``` - -### 3. Advanced Analysis Methods - -#### A. Hough Transform -```cpp -std::pair hough_transform(const geo_point_t& origin, - const double dis) const { - // Collect points within distance - std::vector pts; - std::vector blobs; - auto results = m_pc3d.kd().radius(dis * dis, origin); - - // Build histogram in parameter space - auto hist = make_histogram(...); - - for (const auto& [point_index, _] : results) { - const auto* blob = m_blobs[point_index]; - auto charge = blob->charge(); - if (charge <= 0) continue; - - const auto& pt = m_pc3d.point(point_index); - const Vector dir = (pt - origin).norm(); - const double r = (pt - origin).magnitude(); - - // Calculate parameters and fill histogram - const double p1 = theta_param(dir); - const double p2 = phi_param(dir); - - // Weight based on distance and charge - double weight = charge / blob->npoints(); - if (r >= 10 * units::cm) { - weight *= pow(10 * units::cm / r, 2); - } - - hist(p1, p2, weight); - } - - // Find maximum bin - auto max_bin = find_maximum_bin(hist); - return {max_bin.center(0), max_bin.center(1)}; -} -``` - -## Key Features - -1. **Dual Representation** - - Maintains both 3D and 2D point clouds - - Efficiently handles projections and transformations - - Preserves relationships between different views - -2. **Cluster Association** - - Associates points with physics clusters - - Maintains blob information for charge analysis - - Supports track reconstruction - -3. **Flexible Point Addition** - - Supports direct point addition - - Provides path interpolation - - Allows directional point generation - -4. **Advanced Analysis** - - Implements Hough transform for track finding - - Supports various spatial queries - - Handles charge-weighted calculations - -## Common Use Patterns - -1. **Cluster Processing** -```cpp -DynamicPointCloud cloud(angle_u, angle_v, angle_w); -cloud.add_points(cluster); // Add all points from cluster -``` - -2. **Track Finding** -```cpp -// Find track direction using Hough transform -auto [theta, phi] = cloud.hough_transform(origin, search_radius); -``` - -3. **Spatial Analysis** -```cpp -// Find nearby points in 2D projection -auto points = cloud.get_2d_points_info(point, radius, plane); -``` - -## Implementation Notes - -1. The class combines Simple3DPointCloud and Multi2DPointCloud for comprehensive spatial analysis. - -2. Point addition methods handle both direct points and interpolated paths. - -3. The Hough transform implementation is optimized for particle track finding. - -4. Wire indices are used to map points to detector readout channels. - -5. The implementation supports charge-weighted analysis for better track reconstruction. - diff --git a/clus/docs/Facade_blob.md b/clus/docs/Facade_blob.md deleted file mode 100644 index 524371079..000000000 --- a/clus/docs/Facade_blob.md +++ /dev/null @@ -1,150 +0,0 @@ - -# Facade_Blob Class Analysis - -## Overview -The Facade_Blob class is part of the WireCell::PointCloud::Facade namespace and implements a facade pattern over a point cloud tree structure. It's designed to represent a "blob" of points in a 3D space, specifically for wire chamber detector data analysis. - -## Class Structure - -### Base Class -```cpp -class Blob : public NaryTree::Facade -``` -- Inherits from NaryTree::Facade with points_t template parameter -- Implements a facade pattern to provide high-level interface over point cloud data - -### Key Member Variables - -1. Geometry Properties: - - `float_t charge_`: Total charge of the blob - - `float_t center_x_, center_y_, center_z_`: Center coordinates - - `int_t npoints_`: Number of points in the blob - -2. Wire Indices: - - Slice indices (time dimension): - - `slice_index_min_` - - `slice_index_max_` - - - Wire plane indices (for 3 orientations U, V, W): - - U plane: `u_wire_index_min_`, `u_wire_index_max_` - - V plane: `v_wire_index_min_`, `v_wire_index_max_` - - W plane: `w_wire_index_min_`, `w_wire_index_max_` - -3. Wire Analysis Data: - - `max_wire_interval_`: Maximum interval between wires - - `min_wire_interval_`: Minimum interval between wires - - `max_wire_type_`: Wire type with maximum interval (0:u, 1:v, 2:w) - - `min_wire_type_`: Wire type with minimum interval (0:u, 1:v, 2:w) - -## Key Algorithms - -### 1. Blob Overlap Detection -```cpp -bool overlap_fast(const Blob& b, const int offset) const -``` -Algorithm: -1. Checks for overlap in each wire plane (U, V, W) independently -2. Uses an offset parameter for adjustable overlap detection -3. Returns true if overlap exists in all three planes -4. Implementation uses fast rejection testing: - - If any plane shows no overlap, returns false immediately - - Overlap condition: min of one ≤ max of other + offset - 1 - -### 2. Blob Comparison (blob_less algorithm) -```cpp -bool blob_less(const Blob* a, const Blob* b) -``` -Implements a strict weak ordering for blobs using hierarchical comparison: -1. Compare number of points -2. Compare total charge -3. Compare slice indices (min then max) -4. Compare wire indices in order (U, V, W, min then max) -5. Finally falls back to pointer comparison - -### 3. Hash Generation -```cpp -size_t hash() const -``` -Generates a unique hash combining multiple properties: -1. Number of points -2. Center coordinates (x, y, z) -3. Slice indices -4. Wire indices for all planes -Uses boost::hash_combine for combining individual values - -## Data Access and Management - -### Point Cloud Access -```cpp -std::vector points() const -``` -1. Retrieves 3D points from local point cloud dataset -2. Extracts x, y, z coordinates -3. Returns vector of geometric points - -### Construction and Initialization -```cpp -void on_construct(node_type* node) -``` -Initialization process: -1. Calls base class constructor -2. Retrieves local point clouds from node -3. Extracts scalar values into member variables -4. Caches frequently accessed values for performance - -## Validation and Consistency - -### Sanity Check -```cpp -bool sanity(Log::logptr_t log = nullptr) const -``` -Verifies blob consistency: -1. Compares stored point count with actual point cloud size -2. Logs discrepancies if logger is provided -3. Returns boolean indicating consistency - -## Integration with Cluster System - -### Cluster Access -```cpp -Cluster* cluster() -const Cluster* cluster() const -``` -1. Provides access to parent cluster -2. Uses templated facade pattern for type safety -3. Available in both const and non-const versions - -## Formatting and Display - -### Stream Output -```cpp -std::ostream& operator<<(std::ostream& os, const Blob& blob) -``` -Provides detailed blob information including: -1. Hash value -2. Number of points -3. Center position -4. Charge -5. All wire and slice indices - - -The Facade_Blob class is a sophisticated implementation for handling point cloud data in a wire chamber detector system. Here are the key points about its design and usage: - -1. Purpose: -- Provides a high-level interface to point cloud data specifically for particle detection -- Manages 3D spatial data along with wire chamber specific information -- Facilitates blob analysis and comparison operations - -2. Design Pattern: -- Uses the Facade pattern to simplify complex point cloud operations -- Implements a tree structure for hierarchical organization of blobs -- Maintains cached values for performance optimization - -3. Key Features: -- Fast overlap detection between blobs -- Comprehensive hash generation for blob identification -- Robust comparison operations for blob sorting -- Detailed validation and consistency checking -- Integration with larger cluster system - -The class is designed for performance and flexibility, with careful attention to const-correctness and memory management. \ No newline at end of file diff --git a/clus/docs/MultiAlgBlobClustering.md b/clus/docs/MultiAlgBlobClustering.md deleted file mode 100644 index 2bce19cc2..000000000 --- a/clus/docs/MultiAlgBlobClustering.md +++ /dev/null @@ -1,191 +0,0 @@ - - - -# MultiAlgBlobClustering Class Analysis - -## Overview -MultiAlgBlobClustering is a class designed for clustering point cloud data in a wire cell detector, specifically handling both "live" and "dead" regions. It's part of the WireCell clustering system and implements multiple interfaces including ITensorSetFilter, IConfigurable, and ITerminal. - -## Class Structure - -### Key Interfaces -- `ITensorSetFilter`: For processing tensor sets -- `IConfigurable`: For configuration management -- `ITerminal`: For finalization operations -- `Aux::Logger`: For logging capabilities - -### Important Member Variables -1. Data Management: - - `m_sink`: Bee::Sink for ZIP file output - - `m_bee_img`, `m_bee_ld`: Bee::Points for visualization - - `m_bee_dead`: Bee::Patches for dead regions - - `m_last_ident`: Tracks the last processed identifier - -2. Configuration Parameters: - - `m_inpath`: Input path pattern for point cloud data - - `m_outpath`: Output path pattern - - `m_dead_live_overlap_offset`: Overlap offset for dead-live clustering - - `m_x_boundary_low_limit`, `m_x_boundary_high_limit`: X-axis boundaries - - `m_save_deadarea`: Flag for saving dead area information - - `m_perf`: Performance monitoring flag - -## Workflow - -### 1. Initialization -- Constructor initializes logging and Bee visualization components -- Configuration is handled via `configure()` method, setting paths and parameters - -### 2. Main Processing Pipeline -The core processing happens in the `operator()` method: - -1. Input Validation - - Checks for end-of-stream condition - - Validates input tensor set - -2. State Management - - Manages identifiers for continuous processing - - Handles flushing of visualization data - -3. Data Loading - - Loads live point cloud tree - - Loads dead point cloud tree - - Initializes clustering structures - -4. Clustering Process -The clustering pipeline involves multiple stages: - -a) Live-Dead Clustering - - Connects live regions with dead regions - - Uses overlap offset parameter - -b) Extension Clustering - - Extends tracks in multiple passes - - Handles parallel and prolonged tracks - - Uses different distance thresholds - -c) Regular Clustering - - Processes standard clustering with and without extension - - Uses configurable distance parameters - -d) Specialized Clustering - - Parallel and prolonged track handling - - Close distance clustering - - Boundary examination - - Overclustering protection - -5. Output Generation - - Creates visualization data (Bee format) - - Generates output tensor sets - - Merges live and dead region data - -### 3. Finalization -- Handles cleanup via `finalize()` method -- Flushes remaining data -- Closes output files - -## Key Algorithms - -1. **Clustering Live-Dead** - - Purpose: Connect live regions with dead regions - - Parameters: Uses `m_dead_live_overlap_offset` - - Implementation: Analyzes overlap between live and dead regions - -2. **Clustering Extension** - - Purpose: Extend tracks and connect fragments - - Multiple passes with different parameters: - - Long distance (150cm) for prolonged tracks - - Medium distance (30cm) for parallel tracks - - Short distance (15cm) for regular connections - -3. **Clustering Regular** - - Purpose: Standard clustering operations - - Two modes: - - Without extension (60cm threshold) - - With extension (30cm threshold) - -4. **Specialized Clustering** - - Parallel/Prolonged: Handles special track cases - - Close Distance: For nearby point merging - - Boundary Examination: Checks X-axis boundaries - - Overclustering Protection: Prevents excessive merging - -## Performance Monitoring - -The class includes a comprehensive performance monitoring system: -- Uses `ExecMon` for timing measurements -- Provides detailed logging of cluster statistics -- Configurable via `m_perf` flag - -## Visualization Output - -The class generates multiple visualization outputs: -1. Live clusters visualization -2. Dead-live interaction visualization -3. Dead regions visualization (optional) -4. Output in Bee format for visualization tools - -## Error Handling -- Robust input validation -- Logging of processing stages -- Configuration validation -- Memory management for point cloud trees - - - - -```mermaid -flowchart TB - Input[Input TensorSet] --> ValidateInput[Validate Input] - ValidateInput --> LoadData[Load Point Cloud Data] - - subgraph DataLoading[Data Loading] - LoadData --> LiveTree[Load Live Tree] - LoadData --> DeadTree[Load Dead Tree] - end - - subgraph Clustering[Clustering Pipeline] - LiveTree --> LiveDead[Live-Dead Clustering] - DeadTree --> LiveDead - - LiveDead --> RegularClust[Regular Clustering] - RegularClust --> ExtendClust[Extension Clustering] - - ExtendClust --> ParallelClust[Parallel/Prolong Clustering] - ParallelClust --> CloseClust[Close Distance Clustering] - - CloseClust --> MultiPass[Multi-Pass Extension] - MultiPass --> Separate[Clustering Separate] - Separate --> Connect[Clustering Connect] - Connect --> Deghost[Clustering Deghost] - Deghost --> BoundaryCheck[X-Boundary Check] - BoundaryCheck --> ProtectOver[Protect Overclustering] - end - - subgraph Output[Output Generation] - ProtectOver --> GenViz[Generate Visualization] - GenViz --> BeeOutput[Bee Format Output] - ProtectOver --> GenTensor[Generate Tensor Output] - GenTensor --> MergeTensor[Merge Live/Dead Tensors] - end - - MergeTensor --> FinalOutput[Output TensorSet] - BeeOutput --> ZipFile[ZIP File Output] - -``` - -The MultiAlgBlobClustering class is a sophisticated component for processing point cloud data in wire cell detectors. It implements multiple clustering algorithms to handle both live and dead regions in the detector. - -The key features of the class include: - -1. Flexible input/output handling with configurable paths -2. Multiple clustering stages with different strategies -3. Performance monitoring capabilities -4. Visualization output in Bee format -5. Comprehensive error handling and logging - -The class processes data through several stages: -1. First validates and loads point cloud data -2. Applies multiple clustering algorithms in sequence -3. Generates both visualization and tensor outputs -4. Handles cleanup and finalization - diff --git a/clus/docs/PointTreeBuilding.md b/clus/docs/PointTreeBuilding.md deleted file mode 100644 index 4ff712a10..000000000 --- a/clus/docs/PointTreeBuilding.md +++ /dev/null @@ -1,193 +0,0 @@ - -# PointTreeBuilding Class Analysis - -## Overview -PointTreeBuilding is a class for converting clusters of blobs into point cloud trees and output tensors, primarily used in wire-cell data processing. It inherits from three base classes: -- `Aux::Logger` - For logging functionality -- `IClusterFaninTensorSet` - For tensor set operations -- `IConfigurable` - For configuration management - -## Core Components - -### 1. Key Member Variables - -```cpp -// Sampling and configuration -size_t m_multiplicity {2}; // Number of input vectors (default 2) -std::vector m_tags; // Tags for input frames -size_t m_count{0}; // Processing counter - -// Physics parameters -double m_tick {0.5*units::us}; // Time tick size -double m_drift_speed {1.101*units::mm/units::us}; // Drift velocity -double m_time_offset {-1600 * units::us}; // Time offset -double m_dead_threshold {1e10}; // Threshold for dead channels - -// Angle parameters for projections -double m_angle_u {1.0472}; // 60 degrees -double m_angle_v {-1.0472}; // -60 degrees -double m_angle_w {0}; // 0 degrees - -// Core components -IAnodePlane::pointer m_anode; // Anode plane interface -std::map m_samplers; // Blob samplers -std::string m_datapath = "pointtrees/%d"; // Output data path format -``` - -### 2. Main Processing Flow - -The class processes data through its operator() method with the following steps: - -1. Input validation and EOS (End Of Stream) checking -2. Processing live clusters through `sample_live()` -3. Processing dead clusters through `sample_dead()` (if multiplicity = 2) -4. Adding channel-time point clouds (CTPC) through `add_ctpc()` -5. Adding dead wire information through `add_dead_winds()` -6. Converting results to tensors for output - -## Key Algorithms - -### 1. Live Cluster Sampling (sample_live) - -```cpp -Points::node_ptr sample_live(const WireCell::ICluster::pointer icluster) const { - 1. Get geometric clusters from the input cluster graph - 2. Create root node for point tree - 3. For each cluster: - - Sample 3D points using the "3d" sampler - - Create 2D projections (2dp0, 2dp1, 2dp2) - - Calculate blob center - - Add scalar metadata (charge, margins, etc.) - - Add to point tree - 4. Return root node -} -``` - -### 2. Dead Cluster Sampling (sample_dead) - -```cpp -Points::node_ptr sample_dead(const WireCell::ICluster::pointer icluster) const { - 1. Get geometric clusters from input graph - 2. Create root node - 3. For each cluster: - - Create scalar dataset with basic metadata - - Extract corner points from blob shape - - Add to point tree - 4. Return root node -} -``` - -### 3. CTPC Addition (add_ctpc) - -This algorithm adds channel-time point clouds to the tree: - -1. Extract slice information from cluster graph -2. For each slice: - - Process activity in channels - - Calculate positions (x,y) for each wire - - Group data by face and plane -3. Create datasets for each face/plane combination containing: - - Position coordinates (x,y) - - Charge information - - Channel identifiers - - Wire indices - - Slice indices - -### 4. Dead Wire Processing (add_dead_winds) - -This algorithm processes dead wire information: - -1. Scan cluster graph for dead channels (charge uncertainty > threshold) -2. For each dead channel: - - Calculate drift coordinates (xbeg, xend) - - Group by face and plane -3. Create datasets containing: - - Start/end positions - - Wire indices - - Dead channel information - -## Configuration System - -The class uses a JSON-based configuration system with these key parameters: - -```json -{ - "multiplicity": 2, - "tags": ["tag1", "tag2"], - "datapath": "pointtrees/%d", - "anode": "AnodePlaneType", - "samplers": { - "3d": "BlobSamplerType", - "dead": "DeadBlobSamplerType" - } -} -``` - -## Error Handling - -The class implements several error checks: -1. Input validation for multiplicity -2. EOS detection -3. Sampler availability verification -4. Configuration parameter validation - -## Key Data Structures - -### 1. Point Cloud Tree -- Hierarchical structure representing spatial relationships -- Each node contains: - - 3D point data - - 2D projections - - Scalar metadata - - Channel/time information - -### 2. Tensor Output -- Organized by live/dead classification -- Contains position, charge, and metadata information -- Structured for efficient processing downstream - -## Performance Considerations - -1. Memory Management: - - Uses smart pointers for tree nodes - - Efficient data structure sharing - - Careful handling of large point clouds - -2. Computational Efficiency: - - Organized processing by face/plane - - Efficient point cloud sampling - - Structured data access patterns - -## Usage Example - -```cpp -// Creating and configuring the processor -auto ptb = make_shared(); -Configuration cfg; -cfg["multiplicity"] = 2; -cfg["anode"] = "AnodePlane"; -cfg["samplers"]["3d"] = "BlobSampler"; -ptb->configure(cfg); - -// Processing clusters -ICluster::pointer live_cluster = /* input live cluster */; -ICluster::pointer dead_cluster = /* input dead cluster */; -input_vector invec = {live_cluster, dead_cluster}; -output_pointer tensorset; -bool success = (*ptb)(invec, tensorset); -``` - - -This class serves as a crucial component in the wire-cell data processing pipeline, transforming blob clusters into structured point cloud trees. The key points to understand are: - -1. It handles both "live" and "dead" clusters separately, with different processing strategies for each -2. It creates multiple projections (3D and 2D) of the point clouds -3. It maintains detailed metadata about charges, channels, and wire positions -4. It organizes all data into a hierarchical tree structure for efficient processing - -The class is particularly sophisticated in its handling of: -- Multiple coordinate systems (3D space, wire planes, time dimensions) -- Different types of data (live/dead clusters, charges, geometrical information) -- Complex metadata relationships -- Efficient data organization for downstream processing - diff --git a/clus/docs/algorithms.org b/clus/docs/algorithms.org new file mode 100644 index 000000000..b1d931018 --- /dev/null +++ b/clus/docs/algorithms.org @@ -0,0 +1,26 @@ +#+title: Notes on algorithms used in Wire-Cell Toolkit "clus" subpackage + +* Dijkstra's shortest paths + +This finds the shortest between a given seed vertex and all vertices in a +connected graph. It returns a set of identifiers of "predecessor vertices". +Eg, a vector of boost graph vertex descriptors. This set is ordered and aligned +to the ordered collection of graph vertices. One recursively "walk" the +predecessors collection to find the path to a given vertex. A "distances" +collection collects the distances of steps in this walk. + +** Determinism + +There is no randomness in the results of Dijkstra's but results are weakly +deterministic. Two sources of determinism should be considered. First, +tie-breaking in the distance comparisons is required and the chosen strategy may +lead to result changes due to otherwise unrelated code changes. Depending on +tie breaks, different paths with identical distance may be chosen as "shortest". +Ties can occur more often based on the type for distance. Using "double" or +"float" is better than "int". + +Second, result ordering is subject to the ordering of the graph vertices. +Graphs with unstable vertex identifiers will have unstable result ordering. +With stable vertex identifiers, a change of vertex collection can lead to +different ordering of otherwise same results. With Boost Graph Library, best to +use ~vecS~ and not ~setS~ as the former guarantees index-like descriptors. diff --git a/clus/docs/cluster.org b/clus/docs/cluster.org new file mode 100644 index 000000000..b021994ed --- /dev/null +++ b/clus/docs/cluster.org @@ -0,0 +1,62 @@ +#+title: Wire-Cell Toolkit Cluster facade + +* Overview + +The ~WireCell::Clus::Facade::Cluster~ class provides a ~NaryFacade~ over nodes in a +PC tree representing clusters of blobs of points and which are children nodes of +the nodes with ~Grouping~ facade. + +The ~Cluster~ class provides: + +- Specific functionality on top of data in the PC tree. +- Caching of results of that functionality for optimization. +- A storage context for certain transient data useful in the context of MABC. + + +See also [[file:mabc.org][MultiAlgBlobClustering (MABC)]] doc. + +* Graphs + +This summarizes the graphs in use in ~clus/~. + +** Shortest paths graphs + +A cluster has connectivity that is represented by a (boost) graph as constructed +by one of several possible graph makers. Various graph operations are supported. + +The following graph types exist: + +- basic :: This graph type is used to calcualte shortest paths between two + vertices (each representing a blob). It is used for shortest path + calculations. + +- ctpc :: This is a variant of *basic* which has extra connectivity applied that + considers the "wire-time point cloud" (ctpc - where "c" stood for "channel" in + the microboone days). It is used for shortest path calculations. + + +The operations: + +- shortest paths :: A (singular) shortest path depends on the graph type (*basic* + vs *ctpc*) and a source and target vertex. The calculation has three + parts. 1. Construct either a *basic* or *ctpc* graph. 2. Apply Dijkstras + shortest paths (plural) algorithm from a given source vertex to find shortest + paths to all other vertices. This is an expensive operation. 3. Find the + singular path from the source to a specific destination vertex. This is a + cheaper but not free calculation. Due to the calculation costs a series of + caches are applied. + +- connected components (CC) :: An *overcluster* graph is calculated and the + standard connected components operation is applied. This returns a CC array + aligned with the collection of vertices and giving a "component" or "group" + number in which each vertex is found. + + +** CC graphs + +- overcluster :: This is a variant of *basic* that also includes *ctpc* information + and includes "protection" against "over clustering". It is used for + "connected components" calculations. + +** temp graphs +** connectivity graphs diff --git a/clus/docs/facade-mixin.org b/clus/docs/facade-mixin.org new file mode 100644 index 000000000..baebf2299 --- /dev/null +++ b/clus/docs/facade-mixin.org @@ -0,0 +1,13 @@ +#+title: Facade's Mixin + +A class inherits a "mixin" in order to gain a "bag of methods". In +~Facade_Util.h~ the ~Mixin~ class provides common methods to ~Grouping~, ~Cluster~ and +~Blob~ facades that cover these features: + +- Access to the "local PCs" (point cloud datasets and their arrays). Users of + the facades should learn about this and avoid reinventing PC access methods. + +- A systemic, hermetic but monolithic caching mechanism. Developers of the + facades should learn about this. + +More information is in the ~Facade_Util.h~ comments. diff --git a/clus/docs/fiducialutils.org b/clus/docs/fiducialutils.org new file mode 100644 index 000000000..ec20db790 --- /dev/null +++ b/clus/docs/fiducialutils.org @@ -0,0 +1,53 @@ +* Test job + +#+begin_example +bats-debug -f steiner clus/test/test-porting.bats +#+end_example + +This merely appends ~MakeFiducialUtils~ to MABC pipeline after the "steiner" +related. + +However, the recently completed "steiner" porting has left this broken. It can +be repeated with the N-1 test: + +#+begin_example +bats-debug -f steiner clus/test/test-porting.bats +#+end_example + + +* Instructions for Xin + +- FiducialUtils is the replacement for ToyFiducial +- It separates data into "static", "dynamic" and "internal". + - Static is intended to hold data that is unchanged over many "events". + - Dynamic is meant to hold data about the current "event". + - Internal is whatever cache derived from static+dynamic needed by the query methods +- You may extend these three structs as needed. +- You may add more "query" methods as needed. +- The initial InternalData has a "dummy" placeholder. + - It should be removed when you start filling in the ported details. +- example config + - ~pgrapher/common/clus.jsonnet~ provides a ~fiducialutils()~ function to add to an MABC pipeline. + - a new ~clus/test/test-porting/stm/main.jsonnet~ provides a top-level config +- example job + - ~bats-debug -f stm clus/test/test-porting.bats~ + + +Next steps to make an ~StmTagger~ (or whatever name) "ensemble visitor" to do +whatever ~Check_STM~ does: + +- Make a new "ensemble visitor" class. + - You can use the new ~clus/src/make_fiducialutils.cxx~ as an example. +- Add a config maker in ~pgrapher/common/clus.jsonnet~ +- Extend ~clus/test/test-porting/stm/main.jsonnet~ to include it in the MABC pipeline + + + + +* Questions to Xin + + +- [ ] The "offset_x" and/or "step" in the "query methods" are given default + values in WCP's ToyFiducial. Are these values indeed the same for all calls + for a given "event"? If so, their values should be moved out of the method + arguments and into the "StaticData" struct. diff --git a/clus/docs/mabc.org b/clus/docs/mabc.org new file mode 100644 index 000000000..17842f5de --- /dev/null +++ b/clus/docs/mabc.org @@ -0,0 +1,110 @@ +#+title: MultiAlgBlobClustering, its functional components and PC tree facades + +* Overview + +The ~MultiAlgBlobClustering~ (MABC) component is a Wire-Cell Toolkit data-flow +programming graph node. Primarily it executes a pipeline of special +~IEnsembleVisitor~ *functional component* interfaces that process (mutate) a WCT +point-cloud (PC) tree data structure with the help of PC tree node "facade" +classes. See also [[file:cluster.org]]. + +** Facades + +The PC tree "facade" classes are so named as they (attempt to) follow the [[https://en.wikipedia.org/wiki/Facade_pattern][facade +pattern]]. There is one facade type applied to all nodes in a given PC tree +layer. Thus, a facade is meant to apply specific character to the otherwise +uniform node type. Its methods are meant to name *interpretations* of the +underlying PC tree at node's level and to enable *cache-based optimizations*. +Developers of facades are *strongly cautioned* not to add functionality and data +beyond these two intentions. + +There are four facade layers. In order from root to leaves they are: + +- ensemble :: facade on the root node (no parent), provides named access to child groupings. +- grouping :: a set of child clusters that have are built based on some defining assumption/mechanism (eg, "live", "dead" and "shadow" groupings). +- cluster :: a set of child blobs that are considered mutually "connected" in some way. +- blob :: a leaf (no children) representing a region of 3D space that has been sampled and evaluated to provide a (blob-local) point cloud. + +While only one facade hierarchy can be attached to a PC tree at a given time, it +is possible to enact different hierarchies by defining different facade classes. +This allows, for example, a PC tree to transition between domains where +different interpretations of the underlying data are relevant. + +** Pipeline components + +The main purpose of MABC is to execute a user-configured pipeline of *functional +components* operating on the ensemble. Each component executes in series and is +allowed to "mutate" the ensemble. + +For the most part, these functional components are categorized as "clustering +methods" (source in ~src/clustering_*.cxx~) as their intent is to construct and +refine clusters. Subsequent "pattern recognition" (patrec) components evaluate +and interpret clusters into higher level objects. These objects have +representations in the datasets typically stored at grouping level. + +The pipeline is executed in a strictly sequential manner. Some functional +components may be sensitive to the order of execution. The user is responsible +for configuring the sequence in an appropriate order. For example, the various +"extend" clustering functions rely on the "live dead" clustering function to +have been run previously. + +The PC tree, facades and function components are not specific to MABC and +additional WCT data flow programming graph nodes or other code may make use of +them. + +* User configuration + +This section may be incomplete, see the [[file:../inc/WireCellClus/MultiAlgBlobClustering.h][MABC header file]] for a more definitive reference. + +- ~groupings~ :: an array of grouping names to accept from input. Defaults to + ~["live","dead"]~. If a named grouping is not available on input, an empty + grouping is created. Any input not named is ignored. + +- ~inpath~ :: the *base* datapath under which input tensors may be located. The + ~outpath~ is equivalent for output. + +- ~insubpath~ :: an array of objects giving ~.name~ and ~.subpath~ suffixes relative + to ~inpath~. A grouping name not found in this mapping will have a corresponding + subpath of ~/~. This setting is optional and only needed if the user + requires some datapath that does not follow the default pattern. The + ~outsubpath~ is equivalent for output. + +- ~cluster_id_order~ :: determine how cluster identity (ID) numbering is + determined. By default (unset), the ID numbering of output clusters represent + the ordering applied by the component operations (typically this is + insert-order and/or with redundant and/or non-sequential numbers). If "tree", + the insert-order is used and the IDs are sequentially ordered starting with 1. + If "size" then the order is determined by a size heuristic (large to small) + and again starting from 1. + +- ~pipeline~ :: an array of ~IEnsembleVisitor~ instance identifiers (type/name) of + the functional components that will be executed in the MABC pipeline. + +Each functional component in the MABC pipeline has its own configuration. + +* Developer notes + +This section collects some information relevant to developers of MABC pipeline +components. + +** Cluster merging and separating + +Two primitive operations on clusters include merging and separating. These +operations are performed in the context of a grouping common to all clusters +involved. Separation of a cluster leads to new clusters that become children of +the same grouping that held the original cluster. Likewise, merging is +performed by clusters that have the same parent grouping. Both operations can +optionally retain or remove the initial cluster(s) from the grouping. In +merging, the single new cluster is given the ID of its first constituent. In +separating, all new clusters get a common ID equal to the original. + +Separating and merging are governed by a "connected components" (CC) array. +Separating a cluster requires a CC array that spans the cluster's blob children +list and provides a "group ID" for each. All blob children with the same group +ID are placed in an new cluster and each new cluster is provided with an +association to its formative group ID. Conversely, when clusters are merged, a +new cluster is formed to accept the blob nodes of the input donor clusters. The +merging process results in a CC array that records which of the cluster in the +ordered donor list each blob came from. CC arrays can be stored in datasets on +the PC tree so that separation and merging can be reversible operations. + diff --git a/patrec/docs/porting_dictionary.md b/clus/docs/porting/porting_dictionary.md similarity index 86% rename from patrec/docs/porting_dictionary.md rename to clus/docs/porting/porting_dictionary.md index 3b84a7138..ad52cccf9 100644 --- a/patrec/docs/porting_dictionary.md +++ b/clus/docs/porting/porting_dictionary.md @@ -219,10 +219,19 @@ The final portion of porting covers the transformation from clusters of blobs an ## WCP Algorithms - [ ] :question: What is the overall "data flow graph" for these stages. + * Xin: For major components of the Pattern Recognition, the short answer is that it would require everything (3D points, saved in PCT, 2D measurements saved in CTPC, derived data 3D fitted trajectory and dQ/dx). These are needed to do trajectory/dQ/dx fitting, do PID, do energy reconstruction etc. The light information is not needed, but the matched time from flash is needed to position things at correct location. + * Xin: for the later feature extraction, it also need to access the above data to form various calculation. + * Xin: for the trajectory/dQ/dx fitting, the input consists of 2 parts: i) hypotheses, (derived from 3D points and expressed as ProtoSegment, ProtoVertex etc), ii) measurements (2D data). - [ ] :question: What input data is required for each stage, what output is produced? (can input/output be fully modeled as a graph or is there "extra" data that will not fit that model?) + * Xin: There are two major outputs: 1. the particle flow (with a tree of particles, made by ProtoSegment, ProtoVertex, and WCShower in WCP), the different stages of the PAttern recognition is essentially figure out some information (e.g. PID, energy), as well as the order of things to form the particle flow tree. + * Xin, 2. in addition to the particle flow, we also extract various features for event selection. These features are based on various input (3D points, 2D measurements ...). These features in WCP were used to train BDTs for event selection. - [ ] :question: Is the data flow graph a linear pipeline or a more general DAG? -- [ ] :question: Will the PC-tree be required throughout all stages? + * Xin: the WCP flow graph can be viewed as a linear pipeline. +- [ ] :question: Will the PC-tree be required throughout all stages? + * Xin: likely, since 3D points, which is needed, are stored in PC-tree - [ ] :question: Will the PC-tree be required to be output by the final algorithm that produces the particle flow data structure? + * Xin: I think the 3D points should be stored or associated with the particle flow data structure. For example, if I have a pion in the output, I do want to know which 3D points are associated with this pion. These would be useful in making features for latter usage. + * Xin: for this section, the performance is going to be the key, the computing should be less of a concern. This means that it would be great that the system can be expanded to incorporate more advanced algorithm (e.g. AI/ML). ### [multi dQ/dx fitting](https://github.com/BNLIF/wire-cell-pid/blob/537a3fd17f8a7b3cf5412594267c14c4cc1775cb/docs/PR3DCluster_multi_dQ_dx_fit.md) (WCP) diff --git a/patrec/docs/tjft/wcp-data-notes.org b/clus/docs/porting/tjft/wcp-data-notes.org similarity index 64% rename from patrec/docs/tjft/wcp-data-notes.org rename to clus/docs/porting/tjft/wcp-data-notes.org index e6e0a95a5..28b0819d4 100644 --- a/patrec/docs/tjft/wcp-data-notes.org +++ b/clus/docs/porting/tjft/wcp-data-notes.org @@ -28,25 +28,34 @@ are represented by a displacement vector (~fit_pt_vec~). A number of PCs are associated to a segment ("nominal", "fit", "associated" and "steiner"). - [ ] :question: How are the pointes for these PCs determined? + - Xin: the starting 3D points are derived by closest distance approach (Vorono diagram) + - Xin: the other points (~wcpc_vec~, shortest path?, ~fit_pt_vec~, fitted results, so derived, Steiner tree, also derived and used shortest path) are derived. - [ ] :question: Do these PCs need to be persisted between algorithms or can they be constructed locally? + - Xin: they would need to be persisted in general. - [ ] :question: What is the life cycle of ~ProtoSegments~? Are they always temporary/local? Is there a single "owner" of them alL? + - Xin: they are used throughout the entire pattern recognition cycle. + - Xin: the basic PR is like solving a Sudoku puzzle, we try to gradually reconstruct the missing pieces. Once we have all the pieces known, we need to make guesses on the remaining missing pieces. Then, based on all the information, we will try to extract the features to do what we need (event selection or energy reconstruction). Each segment is identified by a number (~id~) and is associated with a cluster (~cluster_id~) and particle properties (type, mass, momentum, energy). - [ ] :question: What is the relationship between segment and cluster? + - Xin: cluster contains one or more segments. One segment cannot go across multiple clusters. A collection of *track/shower* properties are also associated to each segment. - [ ] :question: I assume the ~*_vec~ are arrays. From the description it is not clear if elements of these arrays are associated with a *point* or an *interval* between two points. Eg, ~dQ~, ~dx~, etc sound like per-interval but ~p{uvwt}~ sound like per-point. + - Xin: these dQ, dx, are associated with the fitted trajectory points, each point can also be viewed as segment, half with previous point, half with next point. These points are not the original 3D points, but newly created points. *** ProtoVertex This represents a vertex though an initial point and a fitted point. - [ ] :question: It also carries values relevant to an extended object such as ~dQ~ and ~dx~. The comment is "Track length segment". Does this refer to a ~ProtoSegment~ or something else? + - Xin, same as before, each vertex carry a whole bunch of dx, each represents the half between vertex and nearby trajectory points (from each protosegment). - [ ] :question: Is a ~ProtoSegment~ and a ~ProtoVertex~ associated? If so, how? Only through a ~WCShower~? + - Xin, not sure the meaning of the question. The protoSegment and protovertex are basically the initial hypotheses of the trajectory fitting, so they form a graph (in WCP, it was represented by two maps, vertex --> segments, segments --> vertex). Then this graph was used in doing the trajectory and dQ/dx fitting. A similar graph (two maps) was also used inside the WCShower. *** WCShower @@ -55,15 +64,19 @@ In WCP, a *shower* (~WCShower~) represents a classification of - and an extensio - [ ] :question: There is a ~flag_shower~. Does this mean that ~WCShower~ is used to represent physics tracks (eg, due to a muon/pion/proton) as well as physics showers (em, hadronic)? + * Xin, yes we also use WCShower to represent the long muons (many segements represent delta ray and muon segements). We should have used a different data product in WCP. A shower has a *starting vertex* and a *starting segment* as well as *staring point* and *ending point*. - [ ] :question: Are the *starting vertex*, the *starting point* and the vertex of the *starting segment* in fact identical? + - Xin: starting vertex should be one vertex of the starting segement. Note, starting segement have another vertex inside the shower. + - Xin: the starting point is very likely the best-fit point of the starting vertex. A shower has two point clouds ("fit" and "associated"). +- Xin: associated are the original 3D points, fit are the results from track trajectory and dQ/dx fitting. A shower has two maps to represent a bipartite graph of segment and vertex nodes. @@ -78,10 +91,12 @@ A shower has two maps to represent a bipartite graph of segment and vertex nodes In WCP, a Steiner tree is constructed from initial set of 3D points. - [ ] :question: Can we state a set-theoretic inequality that enumerates the sets of points and there sizes? My impression is that we have 3 sets: "sampling points" $\subseteq$ "Steiner vertices" $\subseteq$ "Steiner terminals". + - Xin, I believe so, we need to talk more to make sure my understanding of this is correct. There is an association of a blob (WCP's ~SlimMergeGeomCell~) to a set of "point indices" (~cell_point_indices_map~) - [ ] :question: Which point set from the above (3?) sets are these points? + - Xin: looking at the original code, it seems that they are original 3D points. This brings in PAAL as a dependency. Relies on the "CT PC" (PC form of wire vs time activity sparse image). @@ -92,5 +107,6 @@ This brings in PAAL as a dependency. Relies on the "CT PC" (PC form of wire vs The [[https://github.com/BNLIF/wire-cell-pid/blob/537a3fd17f8a7b3cf5412594267c14c4cc1775cb/docs/PR3DCluster_multi_dQ_dx_fit.md][WCP summary]] describes the fitting as begin across multiple 3D track segments. - [ ] :question: Is the fit really across segments or is it one independent fit per segment? + - Xin, it is fit across all involved segements and vertices diff --git a/patrec/docs/tjft/wcp-data.dot b/clus/docs/porting/tjft/wcp-data.dot similarity index 100% rename from patrec/docs/tjft/wcp-data.dot rename to clus/docs/porting/tjft/wcp-data.dot diff --git a/patrec/docs/tjft/wct-nominal-design-notes.org b/clus/docs/porting/tjft/wct-nominal-design-notes.org similarity index 79% rename from patrec/docs/tjft/wct-nominal-design-notes.org rename to clus/docs/porting/tjft/wct-nominal-design-notes.org index 34afca620..bf1a2cc07 100644 --- a/patrec/docs/tjft/wct-nominal-design-notes.org +++ b/clus/docs/porting/tjft/wct-nominal-design-notes.org @@ -1,4 +1,71 @@ -#+title: Requirements and nominal design +#+title: Requirements and nominal design - work in progress + +* Major elements + +I am prescriptive here to be brief and expect we will delete/change anything bad. + +** Basics + +- PC-tree remains the central data structure. +- We update MABC / clustering function interface to pass ~std::map~ + - Can we understand how to remove the ~cluster_connected_dead~ argument? +- Add a general interface that allows serialization between Boost Graph and ~std::map~ ("named datasets"). + - This will allow aggregate (eg ~struct~) and heterotypic (eg ~std::variant~ or ~boost::any~) graph vertex and edge property types. + - It will require instrumenting these types with some simple Boost.Serialization code. + - A Boost.Serialization "archive" class is needed to use named datasets as the store. + - A convention of a graph having a "name" must be defined such that ~_graph~, ~_nodes~ and ~_edges~ hold the graph, node and edge properties, respectively. + - In addition to the ~_edges~ dataset holding per-edge scalar properties, two special arrays called ~tail~ and ~head~ will hold a pair of indices into the ~_nodes~ dataset to represent the edge, itself. + +- Top level WCP code like the methods of ~NeutrinoID~ can then become "clustering functions". + - Functions need to agree on certain things: + - Which PC-tree layer holds any given graph (mostly Grouping vs Cluster). + - What are the "names" of the graphs. + +** MABC / clustering API clean up + +- Make it a proper Interface API using WCT ~NamedFactory~, etc, facilities. +- Besides the calling interface, allow for declaring consumer/producer metadata as: + + #+begin_src c++ + using GroupingMap = std::map>; + struct IClusteringFunction : public Inteface { + + // The API cleanup and generalization. Original / most all clustering + // "functions" would take a map with a "live" entry, some with a "dead" + // entry and some would produce/take a "shadow" entry. + GroupingMap operator()(GroupingMap groupings) = 0; + + // Optional idea #1: + std::vector produces_groupings() const { return {}; } + std::vector consumes_groupings() const { return {}; } + + // Optional idea #2. + std::vector produces_graphs() const { return {}; } + std::vector consumes_graphs() const { return {}; } + #+end_src + +Optional ideas 1 and 2 are to give a chance to detect when a user has provided broken configuration. + + +One big question is if we should actually define this interface: + +#+begin_src c++ + GroupingMap operator()(GroupingMap groupings) = 0; +#+end_src + +or this one: + +#+begin_src c++ + void operator()(GroupingMap& groupings) = 0; +#+end_src + +That is, do we want to allow a "feed-forward by default" paradigm or a truly functional form? + + +* + +The rest of this doc can be ignored for now. + * Requirements diff --git a/clus/docs/pr.org b/clus/docs/pr.org new file mode 100644 index 000000000..3bed2f078 --- /dev/null +++ b/clus/docs/pr.org @@ -0,0 +1,6 @@ +#+title: Pattern Recognition + +* to write + +- [ ] generalities +- [ ] warning about non-ordering of segment edges diff --git a/clus/docs/steiner.org b/clus/docs/steiner.org new file mode 100644 index 000000000..fc9a6f2f0 --- /dev/null +++ b/clus/docs/steiner.org @@ -0,0 +1,153 @@ +#+title: Info related to porting of Steiner-related WCP code + +* Links + +- Meta issue https://github.com/WireCell/wire-cell-toolkit/issues/431 + +* Guidance + +** Code organization + +- All Steiner-related header and source code is in ~clus/src/~ and so is "private" to the sub-package. + +- All Steiner-related code is in the ~WireCell::Clus::Steiner::~ namespace. + +- ~CreateSteinerGraph~ in ~CreateSteinerGraph.{h,cxx}~ is the "ensemble visitor" that is to be added to an MABC pipeline. + +- ~Steiner::Grapher~ in ~SteinerGrapher.{h,cxx}~ is a class that "wraps" one ~Cluster~. It is kind of a "facade of a facade". This class is intended to represent the Steiner-related parts of ~PR3DCluster~. As such it is okay to load it up with whatever per-Cluster auxiliary information and methods that are needed. It is also given a ~Config~ struct that holds any required "service" components (eg, sampler, detector volumes). The main duty of the ~Grapher~ is to return a graph from its ~create_steiner_graph()~ method. The ~CreateSteinerGraph::visit()~ is responsible to make a ~Grapher~, call it and dispatch the resulting graph. + +* Developing + +Xin, I think you will hack mostly on ~SteinerGrapher.{h,cxx}~ and +~SteinerFunctions.{h,cxx}~. Feel free to fill these up. In particular, the +arguments to the existing methods and functions are definitely incomplete. If +these ~Steiner*.{h,cxx}~ files become uncomfortably large, feel free to make more +~.cxx~ files. Specifically do not add more ~.h~ files that have actual code as was +done in WCP. + +Please come back to me whenever something has no obvious solution or if if you +feel something needs to be added to the outer layers (eg ~CreateSteinerGraph~ or +outside ~clus/~). + + +* Howtos + +** Graphs on a Cluster + +~Cluster~ can hold graphs in a generic way by a *name*. See the ~Graphs~ base class +in [[file:../inc/WireCellClus/Facade_Mixins.h]]. You may construct a graph outside +of ~Cluster~ and give it to ~Cluster~ by name and later retrieve it by name. + +~Cluster~ also yields ~GraphAlgorithms~ by calling one of the variants of the +~Cluster::graph_algorithms()~ methods. The underlying graph will be constructed +if it does not yet exist. It uses *reserved names*: + +- basic :: equivalent to WCP's ~Create_graph()~ with no args. +- ctpc :: the graph used by WCP's (and WCT's) Dijikstra's shortests paths. +- relaxed :: the graph used by WCP's examine graph (WCT's connected blobs). + +After creating one of these graphs by calling ~Cluster::graph_algorithms()~ you +can also get the underlying graph by name. Likewise, if you set a graph with a +reserved name and then later call ~Cluster::graph_algorithms()~, it will use your +graph. Otherwise, you are free to add and retrieve graphs with any name you +like. + +Graphs are merely held by the ~Cluster~ and are not part of the ~Cluster~ cache. +There is no (or not yet any) explicit connection between a graph and an +associated point cloud. For now, it is up to the user to coordinate that +association. + + +** Graph reduction + +A graph can be "filtered" or "reduced" to make a subgraph in two basic ways. + +*** Manual + +The more labor-intensive way is for user code to create a new, empty graph, loop +over an existing graph and manually fill the new one with whatever vertices +and/or edges desired. + +When the new graph has a subset of vertices, their descriptors (indices) will +not match (in general) the corresponding vertices from the original graph. + +*** Filtered graph + +A Boost filtered graph is a graph-like "view" on an original graph that is +generated by specifying a *predicate* (true/false method) to select vertices +and/or edges. + +As it is only a "view" the original graph must be kept alive and the filtered +graph vertices exactly match their corresponding vertices in the original. + +WARNING the ~boost::num_edges()~ and ~boost:num_vertices()~ called on a filtered +graph will return the counts of the underlying graph. However, iterating over +edges (or vertices) is subject to the filter. + +#+begin_src c++ + #include "WireCellUtil/GraphTools.h" + using WireCell::GraphTools::edge_range; + // ... + size_t nedges=0; + for (const auto& edge : edge_range(filtered_graph)) { + ++nedges; + } +#+end_src + + +A filtered graph can be converted in to a fully "real" graph with: + +#+begin_src c++ + filtered_graph_type view = my_filter(graph); + graph_type reduced; + boost::copy_graph(view, reduced); +#+end_src + +The "real" graph can live beyond the original ~graph~ and its vertex descriptors +(indices) have consecutive values that have lost any correspondence with the +original. + +*** Support + +The ~Weighted::GraphAlgorithms~ class provides methods ~reduce()~ to return a +filtered graph given a set of vertices or edges to accept/reject. It also +provides ~weight_threshold()~ to accept/reject edges based on comparing their edge +weights to a give threshold value. + + + +** Making a fresh Cluster + +There are cases in WCP where a ~PR3DCluster~ must be made from scratch. In WCT, a +~Cluster~ is a facade over an underlying node (and a ~Grapher~ is yet another +layer). It is the node that is memory managed and owns the facade. We thus +must keep the node alive while we use its ~Cluster~ (or its ~Grapher~). For an +isolated case (no parent ~Grouping~) we may do: + +#+begin_src c++ + Points::node_t new_cluster_node; + Cluster& new_cluster = new_cluster_node.value.facade(); +#+end_src + +Everything is destroyed when ~new_cluster_node~ goes out of scope. Alternatively, +if we want to make the new ~Cluster~ on an existing ~GRouping~ we can do: + +#+begin_src c++ + Grouping& grouping; // we get somehwere + Cluster& cluster = grouping.make_child(); +#+end_src + + + +* PAAL + +WCP interned a copy of all of PAAL but only used a tiny portion: + +#+begin_src c++ + struct less{}; + class nearest_recorder; + nearest_recorder make_nearest_recorder(); +#+end_src + +These bits are copied into ~clus/src/PAAL.h~. The ~SteinerGrapher_fake.cxx~ +includes some code to exercise compiling this code. diff --git a/clus/docs/talks/clustering_porting_summary.pptx b/clus/docs/talks/clustering_porting_summary.pptx new file mode 100644 index 000000000..e6d075ac8 Binary files /dev/null and b/clus/docs/talks/clustering_porting_summary.pptx differ diff --git a/clus/inc/WireCellClus/BlobSampler.h b/clus/inc/WireCellClus/BlobSampler.h index 7381176a5..042386a26 100644 --- a/clus/inc/WireCellClus/BlobSampler.h +++ b/clus/inc/WireCellClus/BlobSampler.h @@ -30,6 +30,12 @@ namespace WireCell::Clus { // IConfigurable virtual void configure(const WireCell::Configuration& cfg); virtual WireCell::Configuration default_configuration() const; + + // Runtime configuration override interface + virtual std::tuple + sample_blob_with_config(const IBlob::pointer& blob, + int blob_index = 0, + const Configuration& runtime_config = Configuration()); struct CommonConfig { @@ -158,6 +164,65 @@ namespace WireCell::Clus { default values for all strategies and may be set on each individual strategy. + + + - charge_stepped :: sample points on a stepped ray grid with charge-based filtering. + + This is an enhanced version of the "stepped" strategy that adds + charge-based filtering to improve sampling quality by considering + wire signal strength. This strategy is based on the WCPPID sampling + method from the Wire-Cell prototype. Bad/dead planes are automatically + detected based on charge uncertainty values. + + This accepts the following options: + + - min_step_size :: The minimum number of wires over which a step + will be made. default=3. + + - max_step_fraction :: The maximum fraction of a blob a step may + take. If non-positive, then all steps are min_step_size. default=1/12. + + - offset :: How far along the diagonal from a crossing point to + the crossing point of the next neighbor rays to place the point. + A value of 0 places the point at the ray crossing. A value of + 0.5 (default) places the point at the crossing of the ray's + wires. default=0.5. + + - tolerance :: Tolerance for pitch bounds checking. default=0.03. + + - charge_threshold_max :: Minimum charge threshold for wires in the + plane with maximum wire coverage. default=4000. + + - charge_threshold_min :: Minimum charge threshold for wires in the + plane with minimum wire coverage. default=4000. + + - charge_threshold_other :: Minimum charge threshold for wires in the + third plane. default=4000. + + - max_wire_product_threshold :: When the product of max_wires × min_wires + is less than or equal to this value, all wires are used instead of + stepped sets. default=2500. + + - disable_mix_dead_cell :: Boolean flag controlling how zero-charge + wires are handled. When true, zero-charge wires are treated as + failing charge thresholds. default=false. + + - dead_threshold :: Charge uncertainty threshold for detecting dead/bad + planes. If more than 50% of sampled channels in a plane have uncertainty + above this threshold, the plane is considered bad and its charge + threshold is set to 0. default=1e10. + + The charge-based filtering logic works as follows: + 1. Bad planes are automatically detected by analyzing charge uncertainty + 2. For bad planes, charge thresholds are automatically set to 0 + 3. Wires from the stepped sets (mandatory wires) bypass some charge filtering + 4. Non-mandatory wires must meet charge thresholds + 5. If both crossed wires are mandatory, no additional charge filtering is applied + 6. If either crossed wire is non-mandatory, all three wire charges are checked + 7. Points where all three wires have zero charge are excluded + + Note: This strategy requires charge data with uncertainty information to be + available in the blob's activity map through the slice interface. */ struct Sampler; std::vector> m_samplers; diff --git a/clus/inc/WireCellClus/ClusteringFuncs.h b/clus/inc/WireCellClus/ClusteringFuncs.h index 2cf442e44..98caf4748 100644 --- a/clus/inc/WireCellClus/ClusteringFuncs.h +++ b/clus/inc/WireCellClus/ClusteringFuncs.h @@ -1,22 +1,82 @@ +/** + This header provides various free functions used in clustering. + + Some implementations may be found in clustering_*.cxx. The rest are in + ClusteringFuncs.cxx. + + */ + +#ifndef WIRECELLCLUS_CLUSTERINGFUNCS +#define WIRECELLCLUS_CLUSTERINGFUNCS + #include "WireCellClus/MultiAlgBlobClustering.h" #include "WireCellClus/Facade.h" -#include "WireCellClus/ClusteringRetile.h" -#include "WireCellUtil/NamedFactory.h" -#include "WireCellUtil/Units.h" -#include "WireCellUtil/Persist.h" +#include "WireCellClus/IPCTransform.h" +#include "WireCellClus/ClusteringFuncsMixins.h" +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/Graphs.h" + #include "WireCellAux/TensorDMpointtree.h" #include "WireCellAux/TensorDMdataset.h" #include "WireCellAux/TensorDMcommon.h" #include "WireCellAux/SimpleTensorSet.h" -#include "WireCellUtil/Graph.h" +#include "WireCellUtil/NamedFactory.h" +#include "WireCellUtil/Units.h" +#include "WireCellUtil/Persist.h" + +#include #include -namespace WireCell::PointCloud::Facade { +namespace WireCell::Clus::Facade { using namespace WireCell::PointCloud::Tree; + /// Some clustering functions define and react to flags defined on cluster + /// facades. We name these flags with strings in this namespace to assure + /// consistency, add documentation/comments and avoid typos. Note, + /// merge_clusters() will call fresh.from(other) to forward flags. + namespace Flags { + + /// Indicates the cluster is a "live" cluster and connected to a "dead" + /// cluster. + inline const std::string live_dead = "live_dead"; + + /// Indicates the cluster has a flash coincident with beam timing + inline const std::string beam_flash = "beam_flash"; + + /// Indicates the cluster is tagged as through-going muon (TGM) + inline const std::string tgm = "tgm"; + + /// Indicates the cluster is tagged as low energy + inline const std::string low_energy = "low_energy"; + + /// Indicates the cluster is tagged as light mismatch (LM) + inline const std::string light_mismatch = "light_mismatch"; + + /// Indicates the cluster is tagged as fully contained + inline const std::string fully_contained = "fully_contained"; + + /// Indicates the cluster is tagged as short track muon (STM) + inline const std::string short_track_muon = "short_track_muon"; + + /// Indicates the cluster has full detector dead region + inline const std::string full_detector_dead = "full_detector_dead"; + + // main cluster + inline const std::string main_cluster = "main_cluster"; + + // associated cluster + inline const std::string associated_cluster = "associated_cluster"; + + /// This flag is set by ClusteringTaggerCheckSTM algorithm when specific STM conditions are met + inline const std::string STM = "STM"; + + inline const std::string TGM = "TGM"; + + } + struct ClusterLess { bool operator()(const Cluster* a, const Cluster* b) const { return cluster_less(a, b); @@ -32,54 +92,42 @@ namespace WireCell::PointCloud::Facade { using cluster_vector_t = std::vector; - // clustering_util.cxx + // This function will produce a new cluster in the grouping corresponding to + // each connected component in the cluster_connectivity_graph_t that as two + // or more clusters. Any cluster in a single-cluster component is simply + // left in place in the grouping. // - // This function will produce new clusters in live_clusters. The children - // of each "fresh" cluster will be those of the "donor" clusters that are - // connected according to the cluster_connectivity_graph_t. The "fresh" - // cluster will be added to and the "donor" clusters will be removed from - // "known_clusters". The "donor" clusters will also be removed from - // live_clusters. + // Each new cluster will be given the children (blob nodes) of the clusters + // in the associated connected component. These now empty clusters will be + // removed from the grouping and discarded. // - // If both aname and pcname are given then store a cc array in any newly - // created clusters holding the merged set of blobs. The cc array will - // arbitrarily label each blob with a number corresponding to the original - // cluster which was parent to the blob (and which is destroyed after this - // function). + // If both aname and pcname are given then a representation of the previous + // clustering of blob nodes will be stored in the new cluster. This + // connected component (cc) array is in child-node-order and its integer + // value counts which original cluster donated the blob to the new cluster. // - // See above for cluster_connectivity_graph_t. - void merge_clusters(cluster_connectivity_graph_t& g, // - Grouping& grouping, - cluster_set_t& known_clusters, // in/out - const std::string& aname="", const std::string& pcname="perblob"); - - // clustering_live_dead.cxx - // first function ... - void clustering_live_dead(Grouping& live_clusters, - const Grouping& dead_clusters, - cluster_set_t& cluster_connected_dead, // in/out - const int dead_live_overlap_offset // specific params - ); - class ClusteringLiveDead { - public: - ClusteringLiveDead(const WireCell::Configuration& config) - { - // FIXME: throw if not found? - dead_live_overlap_offset_ = get(config, "dead_live_overlap_offset", 2); - } - - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - clustering_live_dead(live_clusters, dead_clusters, cluster_connected_dead, dead_live_overlap_offset_); - } - - private: - int dead_live_overlap_offset_{2}; - }; - + // The fresh.from(other) is called to transfer flags, scope and possibly + // other bits of information. + // + // Pointers to the newly created cluster node facades are returned. These + // are loaned. As usual, the cluster node owns the facade and these nodes + // are in turn owned by the grouping node. + std::vector merge_clusters(cluster_connectivity_graph_t& g, // + Grouping& grouping, + const std::string& aname="", + const std::string& pcname="perblob"); + - // clustering_extend.cxx + /** + * Extract geometry information from a grouping + * @param grouping The input Grouping object + * @param dv Detector geometry provider + * @return Tuple of (drift_direction, angle_u, angle_v, angle_w) + */ + std::tuple extract_geometry_params( + const Grouping& grouping, + IDetectorVolumes::pointer dv); std::vector> get_strategic_points(const Cluster& cluster); @@ -95,426 +143,55 @@ namespace WireCell::PointCloud::Facade { ); - // clustering_extend.cxx - // second function ... - void clustering_extend(Grouping& live_clusters, - cluster_set_t& cluster_connected_dead, // in/out - const int flag, // - const double length_cut = 150*units::cm, // - const int num_try = 0, // - const double length_2_cut = 3*units::cm, // - const int num_dead_try =3 // - ); - class ClusteringExtend { - public: - ClusteringExtend(const WireCell::Configuration& config) - { - // FIXME: throw if not found? - flag_ = get(config, "flag", 0); - length_cut_ = get(config, "length_cut", 150*units::cm); - num_try_ = get(config, "num_try", 0); - length_2_cut_ = get(config, "length_2_cut", 3*units::cm); - num_dead_try_ = get(config, "num_dead_try", 3); - } - - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - clustering_extend(live_clusters, cluster_connected_dead, flag_, length_cut_, num_try_, length_2_cut_, num_dead_try_); - } - - private: - int flag_{0}; - double length_cut_{150*units::cm}; - int num_try_{0}; - double length_2_cut_{3*units::cm}; - int num_dead_try_{3}; - }; - class ClusteringExtendLoop { - public: - ClusteringExtendLoop(const WireCell::Configuration& config) - { - // FIXME: throw if not found? - num_try_ = get(config, "num_try", 0); - } - - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - // for very busy events do less ... - int num_try = num_try_; - if (live_clusters.nchildren() > 1100) num_try = 1; - for (int i = 0; i != num_try; i++) { - // deal with prolong case - clustering_extend(live_clusters, cluster_connected_dead, 1, 150*units::cm, 0); - // deal with parallel case - clustering_extend(live_clusters, cluster_connected_dead, 2, 30*units::cm, 0); - // extension regular case - clustering_extend(live_clusters, cluster_connected_dead, 3, 15*units::cm, 0); - // extension ones connected to dead region ... - if (i == 0) { - clustering_extend(live_clusters, cluster_connected_dead, 4, 60 * units::cm, i); - } - else { - clustering_extend(live_clusters, cluster_connected_dead, 4, 35 * units::cm, i); - } - } - } - - private: - int num_try_{0}; - }; - - bool Clustering_4th_prol(const Cluster& cluster1, - const Cluster& cluster2, - double length_2, - geo_point_t& earliest_p, - geo_point_t& dir_earlp, - double length_cut); - - bool Clustering_4th_para(const Cluster& cluster1, - const Cluster& cluster2, - double length_1, double length_2, - geo_point_t& earliest_p, - geo_point_t& dir_earlp, - double length_cut); - - bool Clustering_4th_reg(const Cluster& cluster1, - const Cluster& cluster2, - double length_1, double length_2, - geo_point_t p1, double length_cut); - - bool Clustering_4th_dead(const Cluster& cluster1, - const Cluster& cluster2, - double length_1, double length_2, double length_cut, int num_dead_try=3); - - - // clustering_regular.cxx - // third function - void clustering_regular(Grouping& live_clusters, - cluster_set_t& cluster_connected_dead, // in/out - const double length_cut = 45*units::cm, // - bool flag_enable_extend = true // - ); - class ClusteringRegular { - public: - ClusteringRegular(const WireCell::Configuration& config) - { - // FIXME: throw if not found? - length_cut_ = get(config, "length_cut", 45*units::cm); - flag_enable_extend_ = get(config, "flag_enable_extend", true); - } - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - clustering_regular(live_clusters, cluster_connected_dead, length_cut_, flag_enable_extend_); - } - private: - double length_cut_{45*units::cm}; - bool flag_enable_extend_{true}; - }; - - bool Clustering_1st_round(const Cluster& cluster1, - const Cluster& cluster2, - double length_1, - double length_2, - double length_cut = 45*units::cm, - bool flag_enable_extend = true); - - // clustering_parallel_prolong.cxx: - void clustering_parallel_prolong(Grouping& live_clusters, - cluster_set_t& cluster_connected_dead, // in/out - const double length_cut = 35*units::cm - ); - class ClusteringParallelProlong { - public: - ClusteringParallelProlong(const WireCell::Configuration& config) - { - // FIXME: throw if not found? - length_cut_ = get(config, "length_cut", 35*units::cm); - } - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - clustering_parallel_prolong(live_clusters, cluster_connected_dead, length_cut_); - } - private: - double length_cut_{35*units::cm}; - }; - - bool Clustering_2nd_round(const Cluster& cluster1, - const Cluster& cluster2, - double length_1, - double length_2, - double length_cut = 35*units::cm); - - // clustering_close.cxx - void clustering_close(Grouping& live_clusters, // - cluster_set_t& cluster_connected_dead, // in/out - const double length_cut = 1*units::cm // - ); - class ClusteringClose { - public: - ClusteringClose(const WireCell::Configuration& config) - { - // FIXME: throw if not found? - length_cut_ = get(config, "length_cut", 1*units::cm); - } - - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - clustering_close(live_clusters, cluster_connected_dead, length_cut_); - } - - private: - double length_cut_{1*units::cm}; - }; - - bool Clustering_3rd_round( const Cluster& cluster1, - const Cluster& cluster2, - double length_1, - double length_2, - double length_cut = 1*units::cm); - - // void clustering_separate(Grouping& live_grouping, - // std::map>& dead_u_index, - // std::map>& dead_v_index, - // std::map>& dead_w_index); - - void clustering_separate(Grouping& live_grouping, - const bool use_ctpc); - class ClusteringSeparate { - public: - ClusteringSeparate(const WireCell::Configuration& config) - { - // FIXME: throw if not found? - use_ctpc_ = get(config, "use_ctpc", true); - } - - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - clustering_separate(live_clusters, use_ctpc_); - } - - private: - double use_ctpc_{true}; - }; - - void clustering_connect1(Grouping& live_grouping); - class ClusteringConnect1 { - public: - ClusteringConnect1(const WireCell::Configuration& config) - { - } - - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - clustering_connect1(live_clusters); - } - - private: - }; - - void clustering_deghost(Grouping& live_grouping, - const bool use_ctpc, - double length_cut = 0); - class ClusteringDeGhost { - public: - ClusteringDeGhost(const WireCell::Configuration& config) - { - // FIXME: throw if not found? - use_ctpc_ = get(config, "use_ctpc", true); - length_cut_ = get(config, "length_cut", 0); - } - - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - clustering_deghost(live_clusters, use_ctpc_, length_cut_); - } - - private: - double use_ctpc_{true}; - double length_cut_{0}; - }; - - // this is a function to test the implementation of CT point cloud ... - void clustering_ctpointcloud(Grouping& live_grouping); - class ClusteringCTPointCloud { - public: - ClusteringCTPointCloud(const WireCell::Configuration& config) - { - } - - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - clustering_ctpointcloud(live_clusters); - } - - private: - }; - - - // this is a function to test the implementation of examine bundles ... - void clustering_examine_bundles(Grouping& live_grouping, const bool use_ctpc); - class ClusteringExamineBundles { - public: - ClusteringExamineBundles(const WireCell::Configuration& config) - { - } - - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - clustering_examine_bundles(live_clusters, use_ctpc_); - } - - private: - double use_ctpc_{true}; - }; - - - void clustering_examine_x_boundary(Grouping& live_grouping); - class ClusteringExamineXBoundary { - public: - ClusteringExamineXBoundary(const WireCell::Configuration& config) - { - } - - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - clustering_examine_x_boundary(live_clusters); - } - - private: - }; - - void clustering_protect_overclustering(Grouping& live_grouping); - class ClusteringProtectOverClustering { - public: - ClusteringProtectOverClustering(const WireCell::Configuration& config) - { - } - - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - clustering_protect_overclustering(live_clusters); - } - - private: - }; - - void clustering_neutrino(Grouping &live_grouping, int num_try); - class ClusteringNeutrino { - public: - ClusteringNeutrino(const WireCell::Configuration& config) - { - // FIXME: throw if not found? - num_try_ = get(config, "num_try", 1); - } - - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - for (int i = 0; i != num_try_; i++) { - clustering_neutrino(live_clusters, i); - } - } - - private: - int num_try_{1}; - }; - - void clustering_isolated(Grouping& live_grouping); - class ClusteringIsolated { - public: - ClusteringIsolated(const WireCell::Configuration& config) - { - } - - void operator()(Grouping& live_clusters, Grouping& dead_clusters, cluster_set_t& cluster_connected_dead) const - { - return clustering_isolated(live_clusters); - } - - private: - }; + // These Judge*() functions are used by multiple clustering methods. They + // are defined in clustering_separate.cxx. // time_slice_length is length span for a slice - bool JudgeSeparateDec_1(const Cluster* cluster, const geo_point_t& drift_dir, const double length, const double time_slice_length); + bool JudgeSeparateDec_1(const Cluster* cluster, const geo_point_t& drift_dir, const double length); /// @attention contains hard-coded distance cuts /// @param boundary_points return the boundary points /// @param independent_points return the independent points - bool JudgeSeparateDec_2(const Cluster* cluster, const geo_point_t& drift_dir, + bool JudgeSeparateDec_2(const Cluster* cluster, IDetectorVolumes::pointer dv, const geo_point_t& drift_dir, std::vector& boundary_points, std::vector& independent_points, const double cluster_length); - + // this is used only by ClusteringSeparate but keep it public for symmetry with Separate_2. std::vector Separate_1(const bool use_ctpc, Cluster *cluster, - std::vector &boundary_points, - std::vector &independent_points, - std::map> &dead_u_index, - std::map> &dead_v_index, - std::map> &dead_w_index, - double length); + std::vector &boundary_points, + std::vector &independent_points, + double length, geo_point_t dir_cosmic, geo_point_t dir_beam, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts, + const Tree::Scope& scope); - std::vector Separate_2(Cluster *cluster, const double dis_cut = 5*units::cm, const size_t ticks_per_slice = 4); + // This is used by multiple clustering methods. + std::vector Separate_2(Cluster *cluster, + const Tree::Scope& scope, + const double dis_cut = 5*units::cm); + // Function to compute wire plane parameters for clustering algorithms + void compute_wireplane_params( + const std::set& wpids, + const IDetectorVolumes::pointer dv, + std::map>& wpid_params, + std::map>& wpid_U_dir, + std::map>& wpid_V_dir, + std::map>& wpid_W_dir, + std::set& apas); - inline std::function getClusteringFunction(const WireCell::Configuration& config) { - std::string function_name = config["name"].asString(); + // Calculate PCA direction for a set of points around a center point + geo_vector_t calc_pca_dir(const geo_point_t& center, const std::vector& points); + + +} // namespace WireCell::Clus::Facade + +#endif - if (function_name == "clustering_retile") { - return ClusteringRetile(config); - } - if (function_name == "clustering_live_dead") { - return ClusteringLiveDead(config); - } - else if (function_name == "clustering_extend") { - return ClusteringExtend(config); - } - else if (function_name == "clustering_regular") { - return ClusteringRegular(config); - } - else if (function_name == "clustering_parallel_prolong") { - return ClusteringParallelProlong(config); - } - else if (function_name == "clustering_close") { - return ClusteringClose(config); - } - else if (function_name == "clustering_extend_loop") { - return ClusteringExtendLoop(config); - } - else if (function_name == "clustering_separate") { - return ClusteringSeparate(config); - } - else if (function_name == "clustering_connect1") { - return ClusteringConnect1(config); - } - else if (function_name == "clustering_deghost") { - return ClusteringDeGhost(config); - } - else if (function_name == "clustering_examine_x_boundary") { - return ClusteringExamineXBoundary(config); - } - else if (function_name == "clustering_protect_overclustering") { - return ClusteringProtectOverClustering(config); - } - else if (function_name == "clustering_neutrino") { - return ClusteringNeutrino(config); - } - else if (function_name == "clustering_isolated") { - return ClusteringIsolated(config); - } - else if (function_name == "clustering_ctpointcloud") { - return ClusteringCTPointCloud(config); - } - else if (function_name == "clustering_examine_bundles") { - return ClusteringExamineBundles(config); - } - else { - throw std::invalid_argument("Unknown function name in configuration"); - } - } -} // namespace WireCell::PointCloud::Facade diff --git a/clus/inc/WireCellClus/ClusteringFuncsMixins.h b/clus/inc/WireCellClus/ClusteringFuncsMixins.h new file mode 100644 index 000000000..f9e179018 --- /dev/null +++ b/clus/inc/WireCellClus/ClusteringFuncsMixins.h @@ -0,0 +1,92 @@ +/// This API provides some mixin classes for "clustering classes" to handle +/// common behavior. + +#ifndef WIRECELLCLUS_CLUSTERINGFUNCSMIXINS +#define WIRECELLCLUS_CLUSTERINGFUNCSMIXINS + +#include "WireCellClus/IPCTransform.h" +#include "WireCellClus/ParticleDataSet.h" + +#include "WireCellIface/IDetectorVolumes.h" +#include "WireCellIface/IRecombinationModel.h" +#include "WireCellIface/IFiducial.h" +#include "WireCellUtil/Configuration.h" +#include "WireCellUtil/NamedFactory.h" +#include "WireCellUtil/Exceptions.h" + +#include "WireCellUtil/PointTree.h" + +namespace WireCell::Clus { + + // A mixin to get an IDetectorVolumes + // + // Configuration: + // + // "detector_volumes" : string type/name of an IDetectorVolumes + struct NeedDV { + IDetectorVolumes::pointer m_dv; + void configure(const WireCell::Configuration &cfg); + }; + + // A mixin to get a IRecombinationModel + struct NeedRecombModel { + IRecombinationModel::pointer m_recomb_model; + void configure(const WireCell::Configuration &cfg); + }; + + // A mixin to get particle_data_set + class NeedParticleData { + ParticleDataSet::pointer m_particle_data; + std::string m_particle_data_name{"ParticleDataSet"}; + + public: + void configure(const WireCell::Configuration& config) { + m_particle_data_name = get(config, "particle_dataset", m_particle_data_name); + auto configurable = Factory::find_tn(m_particle_data_name); + m_particle_data = std::dynamic_pointer_cast(configurable); + if (!m_particle_data) { + THROW(ValueError() << errmsg{"Failed to find or cast ParticleDataSet: " + m_particle_data_name}); + } + } + + protected: + ParticleDataSet::pointer particle_data() { return m_particle_data; } + const ParticleDataSet::pointer particle_data() const { return m_particle_data; } + }; + + // A mixin to get an IFiducial + // + // Configuration: + // + // "fiducial" : string type/name of an IFiducial + struct NeedFiducial { + IFiducial::pointer m_fiducial; + void configure(const WireCell::Configuration &cfg); + }; + + // A mixin to get an IPCTransformSet + // + // Configuration: + // + // "pc_transforms" : string type/name of an IPCTransformSet + struct NeedPCTS { + Clus::IPCTransformSet::pointer m_pcts; + void configure(const WireCell::Configuration &cfg); + }; + + // A mixin for things that need to be configured for a scope (pc name and coord names) + // + // Configuration: + // + // "pc_name" : string name for a "node-local" PointCloud::Dataset + // "coords" : array of string giving names of arrays in the PC to use as coordinates + struct NeedScope { + PointCloud::Tree::Scope m_scope; + NeedScope(const std::string &pcname = "3d", + const std::vector &coords = {"x", "y", "z"}, + size_t depth = 0); + void configure(const WireCell::Configuration &cfg); + }; + +} +#endif diff --git a/clus/inc/WireCellClus/ClusteringRetile.h b/clus/inc/WireCellClus/ClusteringRetile.h deleted file mode 100644 index 983c9979b..000000000 --- a/clus/inc/WireCellClus/ClusteringRetile.h +++ /dev/null @@ -1,126 +0,0 @@ -// This defines a class to be called by ClusteringFuncs. -// -// We do not provide a function like others but instead code to the -// ClusteringFuncs API because some info like RayGrid::Coordinates is needed for -// each call. -// -// When called it "retiles the cluster" using this sequence. -// -// 1) constructs layers of "activity" from input grouping. -// 2) applies "hacks" to the activity. -// 3) runs WCT tiling to create blobs. -// 4) runs blobs sampling to make point clouds -// 5) produces clusters such that the new blobs formed from an old cluster form a new "shadow" cluster. -// 6) forms a PC-tree -// 7) outputs the new grouping -// -// Developer notes, delete when no longer relevant: -// -// Unlike usual ..., the many FIXME's here and in the .cxx can NOT be ignored. -// Remove them as they are fixed. -// -// FIXME: (2) above is omitted, to start with, making this a very complex type -// of "copy". Xin will add the "hacks". -// -// FIXME: (7) is currently impossible until issue #377 provides a solution. We -// start by making and then dropping the "shadow" grouping. -// - - -#ifndef WIRECELLCLUS_CLUSTERINGRETILE -#define WIRECELLCLUS_CLUSTERINGRETILE - -#include "WireCellUtil/RayTiling.h" -#include "WireCellIface/IBlob.h" -#include "WireCellIface/IBlobSampler.h" -#include "WireCellIface/IAnodeFace.h" -#include "WireCellClus/Facade_Grouping.h" -#include "WireCellClus/Facade_Cluster.h" -#include "WireCellUtil/RayHelpers.h" -#include "WireCellAux/PlaneTools.h" - - -#include - -// This is not an appropriate name but I propagate it because that is what is -// used in ClusteringFuncs.h. Better that it be WireCell::Clus. -namespace WireCell::PointCloud::Facade { - - // There is a hard-wired factory method in ClusteringFuncs to which this - // class is added. - class ClusteringRetile { - public: - ClusteringRetile(const WireCell::Configuration& config); - - // fixme: we can not satisfy this type by including CluteringFuncs.h - // because that header must include this one. A refactoring would ease - // this problem. - using cluster_set_t = std::set; - - // FIXME: #377. - void operator()(Grouping& live_clusters, Grouping& dead_clusters, - cluster_set_t& cluster_connected_dead) const; - - private: - - // Step 1. Build activities from blobs in a cluster. - void get_activity(const Cluster& cluster, std::map, std::vector >& map_slices_measures) const; - - - // Step 2. Modify activity to suit. - void hack_activity(const Cluster& cluster, std::map, std::vector >& map_slices_measures) const; - - // Step 3. Form IBlobs from activities. - std::vector make_iblobs(std::map, std::vector >& map_slices_measures) const; - - std::set remove_bad_blobs(const Cluster& cluster, Cluster& shad_cluster, int tick_span) const; - - // Remaining steps are done in the operator() directly. - - /** Configuration: "sampler" (required) - - The type/name an IBlobSampler for producing the "3d" point cloud. - - If not given, the retailed blob tree nodes will not have point clouds. - */ - WireCell::IBlobSampler::pointer m_sampler; - - // fixme: this restricts ClusteringRetile to single-anode-face clusters. - // As such, it will likely freak out if fed clusters that have been - // stitched across anode faces. Since tiling is inherently a per-face - // operation, this may be okay. - /** Configuration "face" (optional, default is 0) - - The INDEX of the face in the anode's list of faces to use. - */ - IAnodeFace::pointer m_face; - - /** Configuration "cut_time_low" (optional, default is -1e9) - Lower bound for time cut in nanoseconds - */ - double m_cut_time_low; - - /** Configuration "cut_time_high" (optional, default is 1e9) - Upper bound for time cut in nanoseconds - */ - double m_cut_time_high; - - std::vector m_plane_infos; - - /** Configuration "anode" (required) - - The type/name of the anode. - */ - // nothing to store. - - std::map> process_groupings( - Grouping& original, - Grouping& shadow, - const std::string& aname = "isolated", - const std::string& pname = "perblob") const; - - }; -} - - -#endif diff --git a/clus/inc/WireCellClus/DetUtils.h b/clus/inc/WireCellClus/DetUtils.h new file mode 100644 index 000000000..fbc682850 --- /dev/null +++ b/clus/inc/WireCellClus/DetUtils.h @@ -0,0 +1,28 @@ +/** This API is for various utility functions related to detector information */ + +#ifndef WIRECELL_CLUS_DETUTILS +#define WIRECELL_CLUS_DETUTILS + +#include "WireCellClus/DynamicPointCloud.h" +#include "WireCellIface/IDetectorVolumes.h" +#include "WireCellUtil/Point.h" +#include +#include +#include + +namespace WireCell::Clus { + + /// Get all APA ident numbers known to the detector volumes. + std::set apa_idents(IDetectorVolumes::pointer dv); + + /// Get map from "layer" level WPID to a drift "dirx" vector and three wire angles. + using wpid_faceparams_map = std::map>; + wpid_faceparams_map face_parameters(IDetectorVolumes::pointer dv); + + /// Create a new and empty DynamicPointCloud. The DV is needed for the wire angles. + std::shared_ptr make_dynamicpointcloud(IDetectorVolumes::pointer dv); + + +} + +#endif diff --git a/clus/inc/WireCellClus/DynamicPointCloud.h b/clus/inc/WireCellClus/DynamicPointCloud.h new file mode 100644 index 000000000..72d55e8d7 --- /dev/null +++ b/clus/inc/WireCellClus/DynamicPointCloud.h @@ -0,0 +1,112 @@ +#ifndef WIRECELLCLUS_DYNAMICPOINTCLOUD_H +#define WIRECELLCLUS_DYNAMICPOINTCLOUD_H + +#include "WireCellUtil/Graph.h" +#include "WireCellUtil/Point.h" +#include "WireCellUtil/PointCloudDataset.h" +#include "WireCellUtil/PointTree.h" +#include "WireCellUtil/Spdlog.h" +#include "WireCellUtil/Units.h" +// #include "WireCellUtil/D2Vector.h" +#include "WireCellIface/IAnodeFace.h" +#include "WireCellIface/IAnodePlane.h" +#include "WireCellIface/IDetectorVolumes.h" + +namespace WireCell::Clus::Facade { + using points_t = PointCloud::Tree::Points; + using node_t = PointCloud::Tree::Points::node_t; + using node_ptr = std::unique_ptr; + using geo_point_t = WireCell::Point; + using geo_vector_t = WireCell::Vector; + using geo_point2d_t = D3Vector; + using geo_vector2d_t = D3Vector; + // using wpid_params_t = std::map>; + + class Cluster; + class Blob; + + class DynamicPointCloud { + public: + struct DPCPoint { + double x, y, z; + // this could be a duplicated from Blob + // if wpid then x/y_2d length must be 3 + int wpid; + const Cluster *cluster; + const Blob *blob; + std::vector> x_2d; + std::vector> y_2d; + std::vector> wpid_2d; + std::vector wind; // length 3 or 0 + std::vector dist_cut; // length 3 or 0 + }; + using nfkd_t = NFKDVec::Tree; + + DynamicPointCloud(const std::map> &wpid_params) + : m_wpid_params(wpid_params) + { + } + ~DynamicPointCloud() = default; + + void add_points(const std::vector &points); + const std::vector& get_points() const { return m_points; } + + DynamicPointCloud::nfkd_t &kd3d() const; + DynamicPointCloud::nfkd_t &kd2d(const int plane, const int face, const int apa) const; + const std::unordered_map &kd2d_l2g(const int plane, const int face, const int apa) const; + const std::unordered_map> &kd2d_g2l(const int plane, const int face, const int apa) const; + + /// @brief: kd2d().radius(radius) + /// @return: [dist, Cluster, global point_index] + std::vector> get_2d_points_info(const geo_point_t &p, + const double radius, + const int plane, const int face, + const int apa) const; + /// @brief: kd2d().knn(1) + /// @brief: dist, Cluster, global point_index + std::tuple get_closest_2d_point_info(const geo_point_t &p, const int plane, + const int face, const int apa) const; + + std::pair hough_transform(const geo_point_t &origin, const double dis) const; + geo_point_t vhough_transform(const geo_point_t &origin, const double dis) const; + + private: + // main data + std::vector m_points; + + // for 3D only consider all apa for now + mutable std::unique_ptr m_kd3d{nullptr}; + + std::map> m_wpid_params; + + // for 2D, wpid to kd + mutable std::map> m_kd2d; + mutable std::unordered_map> m_kd2d_index_l2g; + mutable std::unordered_map >> m_kd2d_index_g2l; + }; + + std::vector + make_points_cluster(const Cluster *cluster, + const std::map> &wpid_params, bool flag_wrap = false); + + std::vector make_points_cluster_skeleton( + const Cluster *cluster, const IDetectorVolumes::pointer dv, + const std::map> &wpid_params, + const std::vector& path_wcps, + bool flag_wrap = false, + const double step = 0.6 * units::cm); + + std::vector make_points_direct(const Cluster *cluster, const IDetectorVolumes::pointer dv, const std::map> &wpid_params, std::vector>& points_info, bool flag_wrap = false); + + /// @brief add points from p_test along dir with range and step + /// @attention: the index_uvw is hacked to store the distance cut + std::vector make_points_linear_extrapolation( + const Cluster *cluster, const geo_point_t &p_test, const geo_point_t &dir_unmorm, const double range, + const double step, const double angle, const IDetectorVolumes::pointer dv, + const std::map> &wpid_params); + + void fill_wrap_points(const Cluster *cluster, const geo_point_t &point, const WirePlaneId &wpid_point, std::vector>& p_x, std::vector>& p_y, std::vector>& p_wpid); + +} // namespace WireCell::Clus::Facade + +#endif // WIRECELLCLUS_DYNAMICPOINTCLOUD_H diff --git a/clus/inc/WireCellClus/Facade.h b/clus/inc/WireCellClus/Facade.h index 9532bafcf..46248eee3 100644 --- a/clus/inc/WireCellClus/Facade.h +++ b/clus/inc/WireCellClus/Facade.h @@ -9,5 +9,7 @@ #include "WireCellClus/Facade_Blob.h" #include "WireCellClus/Facade_Cluster.h" #include "WireCellClus/Facade_Grouping.h" +#include "WireCellClus/Facade_Ensemble.h" +#include "WireCellClus/DynamicPointCloud.h" #endif diff --git a/clus/inc/WireCellClus/Facade_Blob.h b/clus/inc/WireCellClus/Facade_Blob.h index 887a388a8..6eb231f17 100644 --- a/clus/inc/WireCellClus/Facade_Blob.h +++ b/clus/inc/WireCellClus/Facade_Blob.h @@ -13,13 +13,12 @@ #include "WireCellUtil/Graph.h" #include "WireCellIface/IAnodePlane.h" #include "WireCellIface/IAnodeFace.h" +#include "WireCellIface/WirePlaneId.h" +#include "WireCellClus/Facade_Mixins.h" #include "WireCellClus/Facade_Util.h" - -// using namespace WireCell; NO! do not open up namespaces in header files! - -namespace WireCell::PointCloud::Facade { +namespace WireCell::Clus::Facade { class Cluster; @@ -28,7 +27,7 @@ namespace WireCell::PointCloud::Facade { float_t center_x{0}; float_t center_y{0}; float_t center_z{0}; - int_t face{0}; + WireCell::WirePlaneId wpid{0}; int_t npoints{0}; int_t slice_index_min{0}; // unit: tick @@ -49,9 +48,11 @@ namespace WireCell::PointCloud::Facade { /// Give a node "Blob" semantics - class Blob : public NaryTree::Facade, public Mixin { - public: - Blob() : Mixin(*this, "scalar") {} + class Blob : public NaryTree::Facade + , public Mixins::Cached + { + public: + Blob() : Mixins::Cached(*this, "scalar") {} virtual ~Blob() {} // Return the cluster to which this blob is a child. May be nullptr. @@ -67,7 +68,8 @@ namespace WireCell::PointCloud::Facade { float_t center_y() const { return cache().center_y; } float_t center_z() const { return cache().center_z; } int_t npoints() const { return cache().npoints; } - int_t face() const {return cache().face;} + WireCell::WirePlaneId wpid() const {return cache().wpid;} + // int_t face() const { return cache().wpid.face(); } // units are number of ticks /// FIXME: change min, max to begin end @@ -90,12 +92,21 @@ namespace WireCell::PointCloud::Facade { size_t hash() const; // Return the scope points. - std::vector points() const; + // std::vector points(const std::string& pc_name = "3d", const std::vector& coords = {"x", "y", "z"}) const; + std::vector points(const std::string& pc_name, const std::vector& coords) const; size_t nbpoints() const; // Check facade consistency bool sanity(Log::logptr_t log = nullptr) const; + // charge information, access cluster --> grouping, and use CTPC cache to access these information ... + double estimate_total_charge() const; + double estimate_minimum_charge() const; + double get_wire_charge(int plane, const int_t wire_index) const; + double get_wire_charge_error(int plane, const int_t wire_index) const; + // void check_dead_wire_consistency() const; + // + private: // moved to cache @@ -113,14 +124,16 @@ namespace WireCell::PointCloud::Facade { void sort_blobs(std::vector& blobs); void sort_blobs(std::vector& blobs); - struct blob_less_functor { - bool operator()(const Facade::Blob* a, const Facade::Blob* b) const { - return Facade::blob_less(a, b); + struct BlobLess { + bool operator()(const Blob* a, const Facade::Blob* b) const { + return blob_less(a, b); } }; + using BlobSet = std::set; + -} // namespace WireCell::PointCloud::Facade +} // namespace WireCell::Clus::Facade -template <> struct fmt::formatter : fmt::ostream_formatter {}; +template <> struct fmt::formatter : fmt::ostream_formatter {}; #endif diff --git a/clus/inc/WireCellClus/Facade_Cluster.h b/clus/inc/WireCellClus/Facade_Cluster.h index 9966bb957..741396e01 100644 --- a/clus/inc/WireCellClus/Facade_Cluster.h +++ b/clus/inc/WireCellClus/Facade_Cluster.h @@ -10,48 +10,173 @@ #include "WireCellUtil/Point.h" #include "WireCellUtil/Units.h" #include "WireCellUtil/Spdlog.h" -#include "WireCellUtil/Graph.h" + #include "WireCellIface/IAnodePlane.h" #include "WireCellIface/IAnodeFace.h" +#include "WireCellIface/IDetectorVolumes.h" -#include "WireCellClus/Facade_Util.h" +#include "WireCellClus/Facade_Mixins.h" #include "WireCellClus/Facade_Blob.h" +#include "WireCellClus/Facade_ClusterCache.h" +#include "WireCellClus/IPCTransform.h" +#include "WireCellClus/Graphs.h" + +#include +namespace WireCell::Clus::Facade { -// using namespace WireCell; NO! do not open up namespaces in header files! + using IPCTransformSet = Clus::IPCTransformSet; + using namespace WireCell::PointCloud; -namespace WireCell::PointCloud::Facade { class Blob; class Grouping; - struct ClusterCache { }; - // Give a node "Cluster" semantics. A cluster node's children are blob nodes. - class Cluster : public NaryTree::FacadeParent, public Mixin { - - // The expected scope. - const Tree::Scope scope = {"3d", {"x", "y", "z"}}; - const Tree::Scope scope_wire_index = {"3d", {"uwire_index", "vwire_index", "wwire_index"}}; - Tree::Scope scope2ds[3] = { - {"2dp0", {"x", "y"}}, - {"2dp1", {"x", "y"}}, - {"2dp2", {"x", "y"}} - }; - - public: - Cluster() : Mixin(*this, "cluster_scalar") {} + class Cluster : public NaryTree::FacadeParent + , public Mixins::Cached + , public Mixins::Graphs + { + public: + Cluster() : Mixins::Cached(*this, "cluster_scalar"){} virtual ~Cluster() {} - // Override Mixin - virtual void clear_cache() const; + void invalidate_cache() {clear_cache();} + + // return raw pc information ... + void set_default_scope(const Tree::Scope& scope); + const Tree::Scope& get_default_scope() const {return m_default_scope;} + const Tree::Scope& get_raw_scope() const {return m_scope_3d_raw;} + + // set, get scope filter ... + void set_scope_filter(const Tree::Scope& scope, bool flag=true); + bool get_scope_filter(const Tree::Scope& scope) const; + + void set_scope_transform(const Tree::Scope& scope, const std::string& transform_name); + + // If no scope given, will use default scope. + std::string get_scope_transform(Tree::Scope scope = {}) const; + + const Tree::Scope& get_scope(const std::string& scope_name) const; + + /// Set other's default scope, filter and transform to this. See also from(). + void default_scope_from(const Cluster& other); + + /// Set on this various meta information from the other including + /// default scope and any flags in the default flags_ prefix. + /// + /// This may be appropriate to call when this cluster is made from + /// others such as in a cluster merge. + /// + /// See set_flag(), get_flag(), flag_names() and flag_from() provided by + /// base class. + void from(const Cluster& other); + + double get_cluster_t0() const; + void set_cluster_t0(double cluster_t0); + + // scopes_from() and from() + + void set_cluster_id(int id); + int get_cluster_id() const; + + /// @param correction_name: T0Correction + std::vector add_corrected_points(const Clus::IPCTransformSet::pointer pcts, + const std::string &correction_name); + + + /// Check if a point index is excluded from graph operations + /// @param point_index Global point index in cluster + /// @return True if point should be excluded from calculations + bool is_point_excluded(size_t point_index) const { + const auto& excluded = cache().excluded_points; + return excluded.find(point_index) != excluded.end(); + } + + /// Get all non-excluded point indices + /// @return Vector of valid point indices for calculations + std::vector get_valid_point_indices() const { + std::vector valid_indices; + const auto& excluded = cache().excluded_points; + + for (int i = 0; i < npoints(); ++i) { + if (excluded.find(i) == excluded.end()) { + valid_indices.push_back(i); + } + } + return valid_indices; + } + + /// Get excluded points information + /// @return Set of excluded point indices + const std::set& get_excluded_points() const { + return cache().excluded_points; + } + + /// Clear excluded points (useful for testing or manual control) + void clear_excluded_points() { + auto& cache_ref = const_cast(cache()); + cache_ref.excluded_points.clear(); + } + + /// Set excluded points manually (useful for testing) + void set_excluded_points(const std::set& excluded) { + auto& cache_ref = const_cast(cache()); + cache_ref.excluded_points = excluded; + } + - // Return the grouping to which this cluster is a child. May be nullptr. + + /// Return the grouping to which this cluster is a child. May be nullptr. Grouping* grouping(); const Grouping* grouping() const; - // Get the scoped view for the "3d" point cloud (x,y,z) + /// Order is synchronized with children(). + std::vector wpids_blob() const; + + /// return the wpid given a point ... + WirePlaneId wpid(const geo_point_t& point) const; + + /// Get an arbitrary scoped view. Make sure type T matches the type of + /// the scope's coords arrays. + template + const Tree::ScopedView& sv(const Tree::Scope& sc) const + { + return m_node->value.scoped_view(sc); + } + + /// Default scoped view is a view of the default scope. + template + const Tree::ScopedView& sv() const + { + return m_node->value.scoped_view(m_default_scope); + } + + + /// Return a vector of values from given array name "key" that spans the + /// points in nodes of the 3D RAW scoped view. Note, the "key" name + /// need not be in the RAW scope. + template + const std::vector points_property(const std::string& key) const + { + return sv().template flat_vector(key); + } + + /// Return a vector of values from given array name "key" that spans the + /// points in nodes of the given scoped. Note, the "key" name need not + /// be in the RAW scope. If the pcname is not given, the scope pcname + /// is used. + template + const std::vector points_property(const std::string& key, + const Tree::Scope& scope, + std::string pcname="") const + { + return sv(scope).template flat_vector(key, pcname); + } + + // Get a 3D scopeed view scoped view for the "3d" point cloud (x,y,z) using sv3d_t = Tree::ScopedView; const sv3d_t& sv3d() const; + const sv3d_t& sv3d_raw() const; // Access the k-d tree for "3d" point cloud (x,y,z). // Note, this may trigger the underlying k-d build. @@ -59,25 +184,50 @@ namespace WireCell::PointCloud::Facade { const kd3d_t& kd3d() const; const kd3d_t& kd() const; + const kd3d_t& kd3d_raw() const; + + public: + + // Steiner point cloud k-d tree queries + using steiner_kd_results_t = std::vector>; + + steiner_kd_results_t kd_steiner_radius(double radius_not_squared, const geo_point_t& query_point, + const std::string& steiner_pc_name = "steiner_pc") const; + steiner_kd_results_t kd_steiner_knn(int nnearest, const geo_point_t& query_point, + const std::string& steiner_pc_name = "steiner_pc") const; + + // Helper method to get steiner points from results + std::vector>> kd_steiner_points(const steiner_kd_results_t& res, + const std::string& steiner_pc_name = "steiner_pc") const; + + // Method to explicitly build the steiner k-d tree cache + void build_steiner_kd_cache(const std::string& steiner_pc_name = "steiner_pc") const; + using kd_results_t = kd3d_t::results_type; // Perform a k-d tree radius query. This radius is linear distance kd_results_t kd_radius(double radius_not_squared, const geo_point_t& query_point) const; // Perform a k-d tree NN query. kd_results_t kd_knn(int nnearest, const geo_point_t& query_point) const; + /// Return std::vector kd_points(const kd_results_t& res); std::vector kd_points(const kd_results_t& res) const; + // std::vector kd_points_raw(const kd_results_t& res); + // std::vector kd_points_raw(const kd_results_t& res) const; + // print all blob information void print_blobs_info() const; std::string dump() const; - std::string dump_graph() const; + // std::string dump_graph() const; // Get all blobs in k-d tree order. This is different than children() // order and different that sort_blobs() order. std::vector kd_blobs(); std::vector kd_blobs() const; + // // Return the number of blobs from the k-d tree + // size_t nkd_blobs() const; // Return the blob with the point at the given k-d tree point index. Blob* blob_with_point(size_t point_index); @@ -90,12 +240,53 @@ namespace WireCell::PointCloud::Facade { // Return the 3D point at the k-d tree point index. Calling this in a // tight loop should probably be avoided. Instead get the full points() array. geo_point_t point3d(size_t point_index) const; - // alias for point3d to match the Simple3DPointCloud interface - geo_point_t point(size_t point_index) const; + geo_point_t point3d_raw(size_t point_index) const; + + // return WirePlaneId for an index ... + WirePlaneId wire_plane_id(size_t point_index) const; + + /// Get wire indices for a specific point index and plane + /// @param point_index Global point index in cluster + /// @param plane Plane index (0=U, 1=V, 2=W) + /// @return Wire index for the specified plane + int wire_index(size_t point_index, int plane) const; + + /// Get charge value for a specific point index and plane + /// @param point_index Global point index in cluster + /// @param plane Plane index (0=U, 1=V, 2=W) + /// @return Charge value for the specified plane + double charge_value(size_t point_index, int plane) const; + + /// Get charge uncertainty for a specific point index and plane + /// @param point_index Global point index in cluster + /// @param plane Plane index (0=U, 1=V, 2=W) + /// @return Charge uncertainty for the specified plane + double charge_uncertainty(size_t point_index, int plane) const; + + /// Check if wire is dead for a specific point index and plane + /// @param point_index Global point index in cluster + /// @param plane Plane index (0=U, 1=V, 2=W) + /// @param dead_threshold Uncertainty threshold for dead wire detection + /// @return True if wire is considered dead + bool is_wire_dead(size_t point_index, int plane, double dead_threshold = 1e10) const; + + /// Calculate charge for a Wire-Cell point using prototype algorithm + /// @param point_index Global point index in cluster + /// @param charge_cut Minimum charge threshold (default: 0.0) + /// @param disable_dead_mix_cell Enable dead wire handling (default: false) + /// @return Pair of (all_planes_good, calculated_charge) + std::pair calc_charge_wcp( + size_t point_index, + double charge_cut = 4000.0, + bool disable_dead_mix_cell = true) const; + + // Return vector is size 3 holding vectors of size npoints providing k-d tree coordinate points. using points_type = kd3d_t::points_type; + // Return points in a scope in point order. If no scope is given, use default_scope. const points_type& points() const; + const points_type& points_raw() const; // Return charge-weighted average position of points of blobs within distance of point. geo_point_t calc_ave_pos(const geo_point_t& origin, const double dis) const; @@ -117,7 +308,7 @@ namespace WireCell::PointCloud::Facade { template std::tuple get_closest_points(const PCType& two) const { - return PointCloud::Facade::get_closest_points(*this, two); + return get_closest_points(*this, two); } // @@ -147,9 +338,12 @@ namespace WireCell::PointCloud::Facade { // Return the number of points in the k-d tree int npoints() const; + // Safely check if the cluster is valid (m_node is non-null) + bool is_valid() const { return this->m_node != nullptr; } + // Number of points according to sum of Blob::nbpoints() // WCP: int get_num_points() - size_t nbpoints() const; + // size_t nbpoints() const; // Return the number of points within radius of the point. Note, radius // is a LINEAR distance through the L2 metric is used internally. @@ -200,19 +394,31 @@ namespace WireCell::PointCloud::Facade { /// to be more in line with the overall point cloud. void adjust_wcpoints_parallel(size_t& start_idx, size_t& end_idx) const; - /// WCP: Construct_skeleton - bool construct_skeleton(const bool use_ctpc); + /// Get extreme points from the cluster, optionally filtered by spatial relationship to a reference cluster + /// @param reference_cluster Optional reference cluster for spatial filtering (corresponds to old_time_mcells_map in prototype) + /// @return Vector of vectors, each containing extreme points in different categories + /// Returns extreme points along main axis, and other significant extremes along coordinate axes + std::vector> get_extreme_wcps( + const Cluster* reference_cluster = nullptr) const; + + std::pair get_two_boundary_wcps( + bool flag_cosmic = false) const; + + std::pair get_two_boundary_steiner_graph_idx(const std::string& steiner_graph_name = "steiner_graph", const std::string& steiner_pc_name = "steiner_pc", bool flag_cosmic = false) const; // return idx for steiner tree /// section for 2D PC // Get the scoped view for the "3d" point cloud (x,y,z) using sv2d_t = Tree::ScopedView; - const sv2d_t& sv2d(const size_t plane) const; + /// @param plane 0, 1, 2 + /// @param wpid currently provides the apa and face + const sv2d_t& sv2d(const int apa, const int face, const size_t plane) const; using kd2d_t = sv2d_t::nfkd_t; - const kd2d_t& kd2d(const size_t plane) const; + const kd2d_t& kd2d(const int apa, const int face, const size_t plane) const; /// - std::vector get_closest_2d_index(const geo_point_t& p, const double search_radius, const int plane) const; + std::vector get_closest_2d_index(const geo_point_t& p, const double search_radius, const int apa, const int face, const int plane) const; + std::vector is_connected(const Cluster& c, const int offset) const; @@ -248,14 +454,14 @@ namespace WireCell::PointCloud::Facade { // extents in each view and in time. double get_length() const; - // Return blob at the front of the time blob map. Raises ValueError if cluster is empty. - const Blob* get_first_blob() const; + // // Return blob at the front of the time blob map. Raises ValueError if cluster is empty. + // const Blob* get_first_blob() const; - // Return blob at the back of the time blob map. Raises ValueError if cluster is empty. - const Blob* get_last_blob() const; + // // Return blob at the back of the time blob map. Raises ValueError if cluster is empty. + // const Blob* get_last_blob() const; // number of unique slice times, i.e. time_blob_map().size() - size_t get_num_time_slices() const; + // size_t get_num_time_slices() const; // Return a value representing the content of this cluster. size_t hash() const; @@ -263,60 +469,125 @@ namespace WireCell::PointCloud::Facade { // Check facade consistency between blob view and k-d tree view. bool sanity(Log::logptr_t log = nullptr) const; - inline MCUGraph* get_graph() { return m_graph.get(); } - inline const MCUGraph* get_graph() const { return m_graph.get(); } - void Create_graph(const bool use_ctpc = true) const; - - /// @brief edges inside blobs and between overlapping blobs - /// @attention has distance-based cuts - void Establish_close_connected_graph() const; - /// @attention some distance-based cuts - void Connect_graph(const bool use_ctpc) const; - void Connect_graph() const; - void Connect_graph_overclustering_protection(const bool use_ctpc) const; - std::vector examine_graph(const bool use_ctpc = true) const; /// - void dijkstra_shortest_paths(const size_t pt_idx, const bool use_ctpc = true) const; + /// Blob-level connected components + /// + /// Return a connected components array that is aligned with the blob + /// node children list. Each element gives a "group number" identifying + /// a connected subgraph in which the corresponding blob resides. The + /// special group number -1 indicates the corresponding blob does not + /// contribute points to the graph. + /// + /// The "relaxed" graph (ne' "overclustering protection") is used. See + /// graph_algorithms() family of methods. + /// + /// Note, this method used to be called "examine_graph()". + /// + std::vector connected_blobs(IDetectorVolumes::pointer dv, IPCTransformSet::pointer pcts) const; + + /// Return graph of given flavor or try to make it if it does not exist. + /// See graph_algorithms() for flavors that can be made. + const graph_type& find_graph(const std::string& flavor = "basic") const; + graph_type& find_graph(const std::string& flavor = "basic"); + + const graph_type& find_graph(const std::string& flavor, const Cluster& ref_cluster) const; + graph_type& find_graph(const std::string& flavor, const Cluster& ref_cluster); + + const graph_type& find_graph(const std::string& flavor, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts) const; + graph_type& find_graph(const std::string& flavor, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts); + + const graph_type& find_graph(const std::string& flavor, + const Cluster& ref_cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts) const; + graph_type& find_graph(const std::string& flavor, + const Cluster& ref_cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts); + + /// + /// Graph algorithms hold a graph and are cached /// - void cal_shortest_path(const size_t dest_wcp_index) const; + /// Get a graph algorithms by its "flavor" and throw KeyError if not + /// found. + /// + /// Note, as a special case, the default graph flavor "basic" can and + /// will be produced on the fly. + const WireCell::Clus::Graphs::Weighted::GraphAlgorithms& + graph_algorithms(const std::string& flavor = "basic") const; + const WireCell::Clus::Graphs::Weighted::GraphAlgorithms& + graph_algorithms(const std::string& flavor, const Cluster& ref_cluster) const; + /// Get and construct if needed a GA for a graph of a known flavor and + /// that uses detector information in its construction. Known flavors + /// include: + /// + /// - "ctpc" :: likely used for Djikstra's shortest paths + /// - "relaxed" :: likely used for connected blobs + /// + /// If the flavor is not in this known set, KeyError is + /// thrown. /// - inline const std::list& get_path_wcps() const { return m_path_wcps; } - inline const std::list& get_path_blobs() const { return m_path_mcells; } - // In class declaration: - std::vector indices_to_points(const std::list& path_indices) const; - void organize_points_path_vec(std::vector& path_points, double low_dis_limit) const; - void organize_path_points(std::vector& path_points, double low_dis_limit) const; + /// Note, "relaxed" used to be known as "overclustering protection"). + const WireCell::Clus::Graphs::Weighted::GraphAlgorithms& + graph_algorithms(const std::string& flavor, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts) const; + + const WireCell::Clus::Graphs::Weighted::GraphAlgorithms& + graph_algorithms(const std::string& flavor, + const Cluster& ref_cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts) const; + + /// Graph algorithm cache management methods for Steiner operations + + /// Clear the cache for a specific GraphAlgorithms instance + void clear_graph_algorithms_cache(const std::string& graph_name); + + /// Remove a GraphAlgorithms instance entirely (will be recreated on next access) + void remove_graph_algorithms(const std::string& graph_name); + + /// Clear all GraphAlgorithms caches + void clear_all_graph_algorithms_caches(); + + /// Get information about cached GraphAlgorithms + std::vector get_cached_graph_algorithms() const; + + + // Return 3D points for given indices in the 3d PC. + std::vector indices_to_points(const std::vector& path_indices) const; + + // void organize_points_path_vec(std::vector& path_points, double low_dis_limit) const; + // void organize_path_points(std::vector& path_points, double low_dis_limit) const; + // TODO: relying on scoped_view to do the caching? using wire_indices_t = std::vector>; const wire_indices_t& wire_indices() const; std::vector get_hull() const; - geo_point_t get_center() const; - geo_vector_t get_pca_axis(int axis) const; - double get_pca_value(int axis) const; - // Add this inline member function in the class definition: - inline void reset_pca() { m_pca_calculated = false; } + // Return PCA calculated on blob children sample points + // PCA has attributes: {center,axis,values} + using PCA = ClusterCache::PCA; + PCA& get_pca() const; // start slice index (tick number) to blob facade pointer can be // duplicated, example usage: // https://github.com/HaiwangYu/learn-cpp/blob/main/test-multimap.cxx // WCP: get_time_cells_set_map - using BlobSet = std::set; - using time_blob_map_t = std::map; + using time_blob_map_t = ClusterCache::time_blob_map_t; const time_blob_map_t& time_blob_map() const; - // PCA helper functions - void Calc_PCA(std::vector& points) const; - // Calculate PCA direction for a set of points around a center point - geo_vector_t calc_pca_dir(const geo_point_t& center, const std::vector& points) const; - /// @brief Determine if a cluster may be separated due to crossing the boundary. /// @return connected components array or empty if separation is not warranted. std::vector @@ -327,10 +598,15 @@ namespace WireCell::PointCloud::Facade { /// TODO: currently return copy, return a const reference? std::vector get_blob_indices(const Blob*) const; + // Return the number of unique wires or ticks. + std::map > get_uvwt_range() const; + std::tuple get_uvwt_min(int apa = 0, int face = 0) const; + std::tuple get_uvwt_max(int apa = 0, int face = 0) const; + + /// @brief to assess whether a given point (p_test) in a cluster is a vertex, or endpoint, based on asymmetry and occupancy criteria. /// @note p_test will be updated - bool judge_vertex(geo_point_t& p_test, const double asy_cut = 1. / 3., const double occupied_cut = 0.85); - + bool judge_vertex(geo_point_t& p_test, const IDetectorVolumes::pointer dv, const double asy_cut = 1. / 3., const double occupied_cut = 0.85); class Flash { friend class Cluster; @@ -374,46 +650,90 @@ namespace WireCell::PointCloud::Facade { // Return a flash. If there is none, it will hold default values. Flash get_flash() const; + /// Helper function to check if a point is spatially related to a reference cluster using time_blob_map + /// Implements the same filtering logic as prototype's old_time_mcells_map checking + /// @param point_index Index of point in current cluster + /// @param ref_time_blob_map Reference cluster's time-indexed blob map + /// @return True if point's wire indices fall within reference cluster's spatial regions + bool is_point_spatially_related_to_time_blobs( + size_t point_index, + const time_blob_map_t& ref_time_blob_map, + bool flag_nearby_timeslice + ) const; + private: - mutable time_blob_map_t m_time_blob_map; // lazy, do not access directly. - mutable std::map> m_map_mcell_indices; // lazy, do not access directly. - // Add to private members in Facade_Cluster.h: - mutable std::vector m_hull_points; - mutable bool m_hull_calculated{false}; + // default scope for all points with raw x,y,z as coordinates + std::map m_scopes = { + {"scope_3d_raw", {"3d", {"x", "y", "z"}}} + }; + // FIXME: shoud we remove this in the future? + const Tree::Scope& m_scope_3d_raw = m_scopes.at("scope_3d_raw"); + const Tree::Scope m_scope_wire_index = {"3d", {"uwire_index", "vwire_index", "wwire_index"}}; + std::string m_scope2ds_prefix[3] = {"2dp0", "2dp1", "2dp2"}; + Tree::Scope m_default_scope = m_scope_3d_raw; + std::map m_map_scope_filter={{m_scope_3d_raw.hash(), true}}; + std::map m_map_scope_transform={{m_scope_3d_raw.hash(), "Unity"}}; + + // We handle graph algorithms special as the GA's use graphs that are + // held in their own cache in the Mixins::Graphs base. + mutable std::map m_galgs; + + - // Cached and lazily calculated in get_length(). - // Getting a new node invalidates by setting to 0. - mutable double m_length{0}; - // Cached and lazily calculated in npoints() - mutable int m_npoints{0}; + /// Helper function to check wire range overlap between a point and a reference blob + /// @param point_index Index of point in current cluster + /// @param ref_blob Reference blob to check against + /// @return True if point's wire indices fall within reference blob's wire ranges + bool check_wire_ranges_match(size_t point_index, const Blob* ref_blob) const; - void Calc_PCA() const; + /// @brief Get live wire indices for a given plane across all blobs in cluster, grouped by (apa, face) + /// @param plane Wire plane index (0=U, 1=V, 2=W) + /// @return Map from (apa, face) pairs to sets of live wire indices for the specified plane + std::map, std::set> get_live_wire_indices(int plane) const; + /// @brief Count live channels between two wire indices + /// @param wire_min Minimum wire index + /// @param wire_max Maximum wire index + /// @param live_indices Set of all live wire indices + /// @return Number of live channels in the range + int count_live_channels_between(int wire_min, int wire_max, const std::set& live_indices) const; + + /// @brief Calculate boundary metric between two points + /// @param point_idx1 Index of first point + /// @param point_idx2 Index of second point + /// @param live_u_index Set of live U wire indices + /// @param live_v_index Set of live V wire indices + /// @param live_w_index Set of live W wire indices + /// @param distance_norm Distance normalization factor + /// @param flag_cosmic Use cosmic ray optimized metric + /// @return Boundary metric value + double calculate_boundary_metric( + int point_idx1, int point_idx2, + const std::set& live_u_index, + const std::set& live_v_index, + const std::set& live_w_index, + double distance_norm, bool flag_cosmic) const; + + + // Helper to ensure steiner k-d tree cache is valid + void ensure_steiner_kd_cache(const std::string& steiner_pc_name) const; + + protected: - mutable bool m_pca_calculated{false}; - // lazy, do not access directly. - mutable geo_point_t m_center; - mutable geo_vector_t m_pca_axis[3]; - mutable double m_pca_values[3]; - - // m_graph - mutable std::unique_ptr m_graph; - // create things for Dijkstra - mutable std::vector m_parents; - mutable std::vector m_distances; - mutable int m_source_pt_index{-1}; - mutable std::list m_path_wcps; - mutable std::list m_path_mcells; - - public: // made public only for debugging - // Return the number of unique wires or ticks. - std::tuple get_uvwt_range() const; - std::tuple get_uvwt_min() const; - std::tuple get_uvwt_max() const; - }; + // + // Caching. + // + // See the ClusterCache struct in Facade_ClusterCache.h. + // + // DO NOT PUT BARE CACHE ITEMS DIRECTLY IN THE Cluster class. + // + virtual void fill_cache(ClusterCache& cache) const; + }; // Cluster std::ostream& operator<<(std::ostream& os, const Cluster& cluster); + + // Return true if a is less than b. May be used as 3rd arg in std::sort to // get ascending order. For descending, pass to sort() rbegin()/rend() // instead of begin()/end().. @@ -422,7 +742,7 @@ namespace WireCell::PointCloud::Facade { void sort_clusters(std::vector& clusters); void sort_clusters(std::vector& clusters); - std::tuple get_uvwt_range(const Cluster* cluster, const std::vector& b2id, const int id); + std::map > get_uvwt_range(const Cluster* cluster, const std::vector& b2id, const int id); double get_length(const Cluster* cluster, const std::vector& b2id, const int id); struct cluster_less_functor { @@ -447,8 +767,8 @@ namespace WireCell::PointCloud::Facade { } }; -} // namespace WireCell::PointCloud::Facade +} // namespace WireCell::Clus::Facade -template <> struct fmt::formatter : fmt::ostream_formatter {}; +template <> struct fmt::formatter : fmt::ostream_formatter {}; #endif diff --git a/clus/inc/WireCellClus/Facade_ClusterCache.h b/clus/inc/WireCellClus/Facade_ClusterCache.h new file mode 100644 index 000000000..d819d489d --- /dev/null +++ b/clus/inc/WireCellClus/Facade_ClusterCache.h @@ -0,0 +1,86 @@ +#ifndef WIRECELLCLUS_FACADE_CLUSTERCACHE +#define WIRECELLCLUS_FACADE_CLUSTERCACHE + +#include "WireCellClus/Facade_Blob.h" +#include "WireCellClus/Facade_Util.h" +#include "WireCellClus/Graphs.h" + +// DO NOT #include Facade_Cluster.h itself. It depends on us, not vice versa. + +#include +#include + +namespace WireCell::Clus::Facade { + + + // ALL cached items for Cluster internal use go in this struct. + // + // DO NOT PLACE THEM BARE DIRECTLY IN Cluster. + + struct ClusterCache { + // order is synchronized with children() + + using time_blob_map_t = std::map > >; // apa, face, time, blobset + + // Maps apa/face/slice to child blob facade. Depends on PC tree structure. + time_blob_map_t time_blob_map; + + // Maps child blob node facade to the set of point indices for points in + // that blob. Depends on DEFAULT SCOPE. + std::map> map_mcell_indices; + + // The subset of points() that make up a convex hull. Depends on DEFAULT SCOPE. + std::vector hull_points; + + // The "length" of a cluster estimated by time and wire extents. + // A zero length is invalid. + double length{0}; + // The number of points in the DEFAULT SCOPE. + int npoints{0}; + + + // mutable bool m_pca_calculated{false}; // use pca_axis.size() + struct PCA { + geo_point_t center; + std::vector axis; + std::vector values; + // if vectors are empty, PCA is invalid. + }; + // Depends on DEFAULT SCOPE + std::unique_ptr pca; + + + // Wire plane IDs by point (3d scoped view) index. Depends on the RAW SCOPE. + std::vector point_wpids; + + // Wire plane IDs by blob (child node) index. Depends on the DEFAULT SCOPE. + std::vector blob_wpids; + + // Wire indices by point index (3 vectors for u,v,w) + std::vector point_u_wire_indices; + std::vector point_v_wire_indices; + std::vector point_w_wire_indices; + + // Charge values by point index (3 vectors for u,v,w) + std::vector point_u_charges; + std::vector point_v_charges; + std::vector point_w_charges; + + // Charge uncertainties by point index (3 vectors for u,v,w) + std::vector point_u_charge_uncs; + std::vector point_v_charge_uncs; + std::vector point_w_charge_uncs; + + // Set of point indices excluded during graph operations (equivalent to prototype's excluded_points) + std::set excluded_points; + + // Steiner point cloud k-d tree cache + mutable std::unique_ptr steiner_kd; + mutable decltype(std::declval().get(std::vector{})) steiner_query3d; + mutable std::string cached_steiner_pc_name; + mutable bool steiner_kd_built{false}; + }; + +} + +#endif diff --git a/clus/inc/WireCellClus/Facade_Ensemble.h b/clus/inc/WireCellClus/Facade_Ensemble.h new file mode 100644 index 000000000..2afbff6b5 --- /dev/null +++ b/clus/inc/WireCellClus/Facade_Ensemble.h @@ -0,0 +1,65 @@ +#ifndef WIRECELLCLUS_FACADE_ENSEMBLE +#define WIRECELLCLUS_FACADE_ENSEMBLE + +#include "WireCellClus/Facade_Mixins.h" +#include "WireCellClus/Facade_Util.h" + +#include "WireCellUtil/PointTree.h" + + +namespace WireCell::Clus::Facade { + class Grouping; + + struct EnsembleCache { + /* nothing for now */ + }; + + /** Give a node "Ensemble" semantics. + * + * This node has an "ensemble" of "grouping" nodes. It does not have a + * strong meaning other than a "group of groupings". For example, an + * ensemble may collect "live" and "dead" groupings and later a "shadow" + * grouping may be added (eg, by retiling). + * + * Each child grouping is made or added through the ensemble with an + * associated "name" by which the grouping may later be retrieved. O.w., + * users are free to query the children for a desired grouping. + * + * A grouping holds its own name (via metadata "name" entry) and thus it is + * possible for more than one child Grouping to have the same name. It is + * up the to user to avoid or utilize this feature. + * + */ + class Ensemble : public NaryTree::FacadeParent + , public Mixins::Cached { + public: + + Ensemble() : Mixins::Cached(*this, "ensemble_scalar") {} + virtual ~Ensemble() {} + + /// Return false if no child Groupings have the name, else true. + bool has(const std::string& name) const; + + /// Return all child Grouping names in child-order. Redundant names + /// will appear multiple times. + std::vector names() const; + + std::set unique_names() const; + + /// Return all children with a given name. In the case of multiple + /// Groupings of the same name, the returned vector is in as-seen, + /// child-order. When no grouping has the name, the vector is empty. + std::vector with_name(const std::string& name); + std::vector with_name(const std::string& name) const; + + /// Make a named grouping child node and return its grouping facade. + Grouping& make_grouping(const std::string& name); + + // Add and take ownership of existing grouping node, return its facade. + Grouping& add_grouping_node(const std::string& name, points_t::node_ptr&& node); + + // Return the FIRST grouping found for each name. + std::map groupings_by_name(); + }; +} +#endif diff --git a/clus/inc/WireCellClus/Facade_Grouping.h b/clus/inc/WireCellClus/Facade_Grouping.h index 42149b187..3e7a5ead5 100644 --- a/clus/inc/WireCellClus/Facade_Grouping.h +++ b/clus/inc/WireCellClus/Facade_Grouping.h @@ -13,80 +13,163 @@ #include "WireCellUtil/Graph.h" #include "WireCellIface/IAnodePlane.h" #include "WireCellIface/IAnodeFace.h" +#include "WireCellIface/IDetectorVolumes.h" +#include "WireCellClus/Facade_Mixins.h" #include "WireCellClus/Facade_Util.h" -// using namespace WireCell; NO! do not open up namespaces in header files! +// forward declare +namespace WireCell::Clus { + class FiducialUtils; + using FiducialUtilsPtr = std::shared_ptr; +} -namespace WireCell::PointCloud::Facade { +namespace WireCell::Clus::Facade { class Cluster; + using namespace WireCell::PointCloud; struct GroupingCache { - mapfp_t proj_centers; - mapfp_t pitch_mags; + std::unordered_map>> proj_centers; + std::unordered_map>> pitch_mags; + + // what cluster_wpids the grouping has. + std::set cluster_wpids; + std::set dv_wpids; // #381 if you give a crap about dead_winds. + // detector volume + std::map > map_time_offset; + std::map > map_drift_speed; + std::map > map_tick; + std::map > > map_wire_angles; + std::map > map_drift_dir; + std::map > map_nticks_per_slice; + std::map > > map_plane_channels; + + // Simple wire data cache organized by APA/face + struct WireDataCache { + // [plane][time_slice][wire_index] -> (charge, uncertainty) + std::array >>, 3> charge_data; + + // [plane][wire_index] -> (start_x_position, end_x_position) + std::array>, 3> dead_wires; + + // Track which planes have been cached + std::array cached = {false, false, false}; + }; + + // [apa][face] -> WireDataCache + mutable std::unordered_map> wire_caches; }; // Give a node "Grouping" semantics. A grouping node's children are cluster // nodes that are related in some way. - class Grouping : public NaryTree::FacadeParent, public Mixin { + class Grouping : public NaryTree::FacadeParent, + public Mixins::Cached { - TPCParams m_tp{}; // use default value by default. - /// TODO: replace TPCParams with this in the future? - IAnodePlane::pointer m_anode{nullptr}; + std::map m_anodes; + IDetectorVolumes::pointer m_dv{nullptr}; - public: + /// TODO: remove these in the future + // IAnodePlane::pointer m_anode{nullptr}; + // TPCParams m_tp{}; // use default value by default. - Grouping() : Mixin(*this, "grouping_scalar") {} + public: - // MUST call this sometimes after construction if non-default value needed. - // FIXME: TPCParams should be moved out of the facade! - void set_params(const TPCParams& tp) { m_tp = tp; } + Grouping() : Mixins::Cached(*this, "grouping_scalar") {} + + /// Incrementally set the ident number on clusters in children-order. + /// + /// The sort_order names a sorting algorithm. + /// - tree :: use as-is child-order + /// - size :: use cluster_less() order + /// - * :: anything else leaves clusters unchanged. + /// + /// Note, convention is to start counting from 1. + void enumerate_idents(const std::string& sort_order="tree", int start=1); + + + /// Separate a cluster into many according to groups. + /// + /// The return map represents loaned Clusters. This Grouping retains + /// ownership. + /// + /// This overrides (and calls) the NaryTree::FacadeParent base class + /// method. See its documentation for reference. This method will the + /// idents of the separated clusters to that of the input cluster. + /// + /// Note, the setting of IDs are not strongly constrained. Algorithms + /// are cautioned to NOT assume any particular ID assignment rule. See + /// enumerate_idents() and the "mabc.org" document. + virtual + std::map separate(Cluster*& cluster, + const std::vector groups, + bool remove=false, + bool notify_value=true); + + + // TODO: remove this in the future + // void set_params(const TPCParams& tp) { m_tp = tp; } void set_params(const WireCell::Configuration& cfg); - const TPCParams& get_params() const { return m_tp; } - std::tuple wire_angles() const { return {m_tp.angle_u, m_tp.angle_v, m_tp.angle_w}; } - - void set_anode(const IAnodePlane::pointer anode) { m_anode = anode; } + // const TPCParams& get_params() const { return m_tp; } + + std::tuple wire_angles(const int apa, const int face) const { return {cache().map_wire_angles.at(apa).at(face).at(0), cache().map_wire_angles.at(apa).at(face).at(1), cache().map_wire_angles.at(apa).at(face).at(2)}; } + std::map > get_tick() const { return cache().map_tick;} + std::map > get_time_offset() const { return cache().map_time_offset;} + std::map > get_drift_speed() const { return cache().map_drift_speed;} + std::map > get_drift_dir() const { return cache().map_drift_dir;} + std::map > get_nticks_per_slice() const { return cache().map_nticks_per_slice;} + IChannel::vector get_plane_channels(const int apa, const int face, const WirePlaneLayer_t layer) const{return cache().map_plane_channels.at(apa).at(face).at(layer);} + std::shared_ptr get_plane_channel_wind(const int apa, const int face, const WirePlaneLayer_t layer, const int wind) const{ return cache().map_plane_channels.at(apa).at(face).at(layer).at(wind);} + + /// Set other's non-tree info on this. + /// + /// This may be appropriate when making a new grouping from an existing + /// grouping. It will transfer information not related to the PC tree + /// such as the any anodes. It specifically does not transfer any + /// information about children clusters. + void from(const Grouping& other); + + /// TODO: remove this in the future + void set_anodes(const std::vector& anodes); + const IAnodePlane::pointer get_anode(const int ident) const; // also apa + + void set_detector_volumes(const IDetectorVolumes::pointer dv) { m_dv = dv; } + // const IDetectorVolumes::pointer get_detector_volumes() const { return m_dv; } // Return a value representing the content of this grouping. size_t hash() const; - const mapfp_t< std::map> >& all_dead_winds() const { - // this is added in order that we may dump it in json_summary() for debugging. - return m_dead_winds; - } + std::set wpids() const { return cache().cluster_wpids; } + std::set dv_wpids() const { return cache().dv_wpids; } - std::map>& get_dead_winds(const int face, const int pind) const - { - // make one if not exist - return m_dead_winds[face][pind]; + const std::map>>>& all_dead_winds() const; + std::map>& get_dead_winds(const int apa, const int face, const int pind) const; + - // This is utter garbage. #381. - } using sv2d_t = Tree::ScopedView; using kd2d_t = sv2d_t::nfkd_t; using kd_results_t = kd2d_t::results_type; - const kd2d_t& kd2d(const int face, const int pind) const; + const kd2d_t& kd2d(const int apa, const int face, const int pind) const; - const mapfp_t& proj_centers() const { + const std::unordered_map>>& proj_centers() const { return cache().proj_centers; } - const mapfp_t& pitch_mags() const { + const std::unordered_map>>& pitch_mags() const { return cache().pitch_mags; } - bool is_good_point(const geo_point_t& point, const int face, const double radius = 0.6 * units::cm, const int ch_range = 1, + bool is_good_point(const geo_point_t& point, const int apa, const int face, const double radius = 0.6 * units::cm, const int ch_range = 1, const int allowed_bad = 1) const; // In Facade_Grouping.h, inside the Grouping class declaration - bool is_good_point_wc(const geo_point_t& point, const int face, const double radius = 0.6 * units::cm, + bool is_good_point_wc(const geo_point_t& point, const int apa, const int face, const double radius = 0.6 * units::cm, const int ch_range = 1, const int allowed_bad = 1) const; // In the Grouping class declaration in Facade_Grouping.h - std::vector test_good_point(const geo_point_t& point, const int face, + std::vector test_good_point(const geo_point_t& point, const int apa, const int face, double radius = 0.6 * units::cm, int ch_range = 1) const; @@ -96,24 +179,26 @@ namespace WireCell::PointCloud::Facade { /// @param face /// @param pind plane index /// @return - kd_results_t get_closest_points(const geo_point_t& point, const double radius, const int face, int pind) const; + kd_results_t get_closest_points(const geo_point_t& point, const double radius, const int apa, const int face, int pind) const; /// @brief true if the point is within the dead region, [wind+ch_range, wind-ch_range] and [xmin, xmax] - bool get_closest_dead_chs(const geo_point_t& point, const int ch_range, const int face, int pind) const; + bool get_closest_dead_chs(const geo_point_t& point, const int ch_range, const int apa , const int face, int pind) const; /// @brief convert_3Dpoint_time_ch - std::tuple convert_3Dpoint_time_ch(const geo_point_t& point, const int face, const int pind) const; + std::tuple convert_3Dpoint_time_ch(const geo_point_t& point, const int apa, const int face, const int pind) const; // In class Grouping definition - std::pair convert_time_ch_2Dpoint(const int timeslice, const int channel, const int face, const int plane) const; + std::pair convert_time_ch_2Dpoint(const int timeslice, const int channel, const int apa, const int face, const int plane) const; /// @brief Get number of points for a given plane /// @param plane The plane index (0=U, 1=V, 2=W) /// @return Number of points in the specified plane - size_t get_num_points(const int face, const int pind) const; + size_t get_num_points(const int apa, const int face, const int pind) const; // In Facade_Grouping.h, add to public section: - double get_ave_3d_charge(const geo_point_t& point, const double radius = 0.3 * units::cm, const int face = 0) const; - double get_ave_charge(const geo_point_t& point, const double radius = 0.3 * units::cm, const int face = 0, const int pind = 0) const; + double get_ave_3d_charge(const geo_point_t& point, const int apa, const int face, const double radius = 0.3 * units::cm) const; + double get_ave_charge(const geo_point_t& point, const int apa, const int face, const int pind, const double radius = 0.3 * units::cm) const; + + bool is_blob_plane_bad(const Blob* blob, int plane, double cut_ratio = 0.5) const; /// @brief Get ranges of dead channels that overlap with given time and channel window /// @param min_time Minimum time @@ -124,33 +209,51 @@ namespace WireCell::PointCloud::Facade { /// @param pind Plane index /// @param flag_ignore_time If true, ignore time window check /// @return Vector of pairs representing ranges of dead channels - std::vector> get_overlap_dead_chs(const int min_time, const int max_time, - const int min_ch, const int max_ch, const int face, const int pind, + std::vector > get_overlap_dead_chs(const int min_time, const int max_time, + const int min_ch, const int max_ch, const int apa, const int face, const int pind, const bool flag_ignore_time=false) const; // In Facade_Grouping.h, inside the Grouping class public section: - std::map> get_all_dead_chs(const int face, const int pind, int expand = 12) const; + std::map> get_all_dead_chs(const int apa, const int face, const int pind, int expand = 0) const; // Get overlapping good channel charges in a time-channel window std::map, std::pair> get_overlap_good_ch_charge( - int min_time, int max_time, int min_ch, int max_ch, + int min_time, int max_time, int min_ch, int max_ch, const int apa, const int face, const int pind) const; - // We override this from Mixin in order to inject propagation of the + // Get wire charge and uncertainty for specific wire/time + std::pair get_wire_charge(int apa, int face, int plane, + int wire_index, int time_slice) const; + + // Check if a wire is dead at a specific time + bool is_wire_dead(int apa, int face, int plane, + int wire_index, int time_slice) const; + + // We override this from Mixins::Cached in order to inject propagation of the // utter garbage handling of dead_winds. If someone fixes that, this // method may be removed. #381. virtual void clear_cache() const; + public: + // The grouping is simply a bus to maybe hold this object. See + // MakeFiducialUtils as an ensemble visitor to construct and add a + // FiducialUtils. + FiducialUtilsPtr get_fiducialutils() const { return m_fiducialutils; } + void set_fiducialutils(FiducialUtilsPtr fd) { m_fiducialutils = fd; } private: + FiducialUtilsPtr m_fiducialutils; - // This "cache" is utterly abused. Someone else fix it. #381. - mutable mapfp_t< std::map> > m_dead_winds; + // Build cache for a specific APA/face/plane + void build_wire_cache(int apa, int face, int plane) const; - protected: + protected: // Receive notification when this facade is created on a node. #381. virtual void on_construct(node_type* node); virtual void fill_cache(GroupingCache& cache) const; - + + virtual void fill_dv_cache(GroupingCache& cache) const; + + }; std::ostream& operator<<(std::ostream& os, const Grouping& grouping); @@ -159,8 +262,8 @@ namespace WireCell::PointCloud::Facade { // about the blobs. std::string dump(const Grouping& grouping, int level = 0); -} // namespace WireCell::PointCloud::Facade +} // namespace WireCell::Clus::Facade -template <> struct fmt::formatter : fmt::ostream_formatter {}; +template <> struct fmt::formatter : fmt::ostream_formatter {}; #endif diff --git a/clus/inc/WireCellClus/Facade_Mixins.h b/clus/inc/WireCellClus/Facade_Mixins.h new file mode 100644 index 000000000..d97de2938 --- /dev/null +++ b/clus/inc/WireCellClus/Facade_Mixins.h @@ -0,0 +1,400 @@ +#ifndef WIRECELL_CLUS_FACADEMIXINS +#define WIRECELL_CLUS_FACADEMIXINS + +#include "WireCellClus/Graphs.h" +#include "WireCellUtil/PointTree.h" + +#include +#include + +namespace WireCell::Clus::Facade::Mixins { + + struct DummyCache{}; + + + /// The Ensemble/Grouping/Cluster/Facade classes inherit from this to gain + /// common methods. The mixin itself needs to know its facade type and + /// instance but specifically does not cover any operation that requires + /// knowledge of parent and children. + /// + /// It provides helper functions to deal with local PCs and an optional + /// caching mechanism. See comments on cache() and fill_cache() and + /// clear_cache(). Note, using the cache mechanism does not preclude facade + /// doing DIY caching. + template + class Cached { + SelfType& self; + std::string scalar_pc_name, ident_array_name; + mutable std::unique_ptr m_cache; + public: + Cached(SelfType& self, const std::string& scalar_pc_name, const std::string& ident_array_name = "ident") + : self(self) + , scalar_pc_name(scalar_pc_name) + , ident_array_name(ident_array_name) { + + } + + protected: + /// Facade cache management has a few simple rules, some optional: + /// + /// Cache rule 1: + /// + /// The SelfType MAY call this to access the cache instance. It is + /// guaranteed to have fill_cache() called. See below. + CacheType& cache() const + { + if (! m_cache) { + m_cache = std::make_unique(); + fill_cache(* const_cast(m_cache.get())); + } + return *m_cache.get(); + } + + /// Cache rule 2: + /// + /// Optionally, the SelfType may override this method in order to "bulk + /// fill" the cache instance. + virtual void fill_cache(CacheType& cache) const {} + + + /// Cache rule 3: + /// + /// Optionally, the SelfType MAY implement lazy, fine-grained caching. + /// This can be done with code such as: + /// + /// auto& mydata = cache().mydata; + /// if (mydata.empty()) { /* fill/set mydata */ } + + + /// Cache rule 4: + /// + /// Optionally, but not recommended, the SelfType may provide this + /// method in order to do something when the clear() method is called. + /// The cache instance will be removed just after this call returns. + /// + /// This is not recommended because you should be putting all cached + /// items in the cache. + /// + /// DO NOT EXPOSE THIS METHOD. + virtual void clear_cache() const + { + m_cache = nullptr; + } + + public: + + /// Clear my node of all children nodes and purge my local PCs. + /// Invalidates any cache. + void clear() + { + // node level: + self.node()->remove_children(); + // value level: + self.local_pcs().clear(); + // facade cache level: + clear_cache(); + } + + // Get the map from name to PC for all local PCs. + WireCell::PointCloud::Tree::named_pointclouds_t& local_pcs() + { + return self.value().local_pcs(); + } + const WireCell::PointCloud::Tree::named_pointclouds_t& local_pcs() const + { + return self.value().local_pcs(); + } + + // Return an "identifying number" from the "scalar" PC of the node. As + // with all "ident" values in WCT, there is no meaning ascribed to the + // actual value (by WCT). It is meant to refer to some external + // identity. If the scalar PC or the ident array are not found, the + // default is returned. + // + // This is a special case method that merely delegates to get_scalar(). + int ident(int def = -1) const + { + return get_scalar(ident_array_name, def); + } + + // Set an ident number, delegating to set_scalar(). + void set_ident(int id) + { + set_scalar(ident_array_name, id); + } + + template + T get_element(const std::string& pcname, const std::string& aname, size_t index, T def = 0) const { + const auto& lpcs = local_pcs(); + auto it = lpcs.find(pcname); + if (it == lpcs.end()) { + return def; + } + const auto arr = it->second.get(aname); + if (!arr) { + return def; + } + return arr->template element(index); + } + + // Return a value from the scalar PC + template + T get_scalar(const std::string& aname, T def = 0) const { + return get_element(scalar_pc_name, aname, 0, def); + } + + // Set a value on the scalar PC + template + void set_scalar(const std::string& aname, T val = 0) { + auto& lpcs = local_pcs(); + auto& cs = lpcs[scalar_pc_name]; // create if not existing + auto arr = cs.get(aname); + if (!arr) { + cs.add(aname, PointCloud::Array({(T)val})); + return; + } + arr->template element(0) = (T)val; + } + + /// A flag is a name that can be "set" on a facade. It is simply an + /// entry in the scalar PC. Most imply, a flag is Boolean false if + /// unset (not defined) or has value 0 and set if defined with non-zero + /// value. Non-boolean values are allowed. The flag name has a prefix + /// (default "flag_") to provide a namespace. + void set_flag(const std::string& name, int value=1, const std::string& prefix="flag_") { + set_scalar(prefix + name, value); + } + + /// Get the value of a flag. If the flag is unset, return the + /// default_value. See set_flag(). + int get_flag(const std::string& name, int default_value=0, const std::string& prefix="flag_") const { + return get_scalar(prefix + name, default_value); + } + + /// Get all set flag names with a given prefix. + std::vector flag_names(const std::string& prefix="flag_") const { + std::vector ret; + const auto& spc = get_pc(scalar_pc_name); + for (const auto& key : spc.keys()) { + if (String::startswith(key, prefix)) { + ret.push_back(key.substr(0, prefix.size())); + } + } + return ret; + } + + // Any flag set on the other will be set on this. + void flags_from(const SelfType& other, const std::string& prefix="flag_") { + for (const auto& fname : other.flag_names(prefix)) { + set_flag(fname, other.get_flag(fname, 0, prefix), prefix); + } + } + + + bool has_pc(const std::string& pcname) const + { + static PointCloud::Dataset dummy; + const auto& lpcs = local_pcs(); + auto it = lpcs.find(pcname); + if (it == lpcs.end()) { + return false; + } + return true; + } + + // Const access to a local PC/Dataset. If pcname is missing return + // reference to an empty dataset. + const PointCloud::Dataset& get_pc(std::string pcname) const + { + if (pcname.empty()) { + pcname = scalar_pc_name; + } + + static PointCloud::Dataset dummy; + const auto& lpcs = local_pcs(); + auto it = lpcs.find(pcname); + if (it == lpcs.end()) { + return dummy; + } + return it->second; + } + // Mutable access to a local PC/Dataset. If pcname is missing, a new + // dataset of that name will be created. + PointCloud::Dataset& get_pc(std::string pcname) + { + if (pcname.empty()) { + pcname = scalar_pc_name; + } + + static PointCloud::Dataset dummy; + auto& lpcs = local_pcs(); + return lpcs[pcname]; + } + + // Return true if this cluster has a PC array and PC of given names and type. + template + bool has_pcarray(const std::string& aname, std::string pcname) const { + if (pcname.empty()) { + pcname = scalar_pc_name; + } + + auto& lpc = local_pcs(); + auto lit = lpc.find(pcname); + if (lit == lpc.end()) { + return false; + } + + auto arr = lit->second.get(aname); + if (!arr) { + return false; + } + return arr->template is_type(); + } + + // Return as a span an array named "aname" stored in clusters PC named + // by pcname. Returns default span if PC or array not found or there is + // a type mismatch. Note, span uses array data in place. + template + PointCloud::Array::span_t + get_pcarray(const std::string& aname, std::string pcname) { + if (pcname.empty()) { + pcname = scalar_pc_name; + } + + auto& lpc = local_pcs(); + auto lit = lpc.find(pcname); + if (lit == lpc.end()) { + return {}; + } + + auto arr = lit->second.get(aname); + if (!arr) { + return {}; + } + return arr->template elements(); + } + template + const PointCloud::Array::span_t + get_pcarray(const std::string& aname, std::string pcname) const { + if (pcname.empty()) { + pcname = scalar_pc_name; + } + + auto& lpc = local_pcs(); + auto lit = lpc.find(pcname); + if (lit == lpc.end()) { + return {}; + } + + auto arr = lit->second.get(aname); + if (!arr) { + return {}; + } + return arr->template elements(); + } + + // Store vector as an array named "aname" into this cluster's PC named "pcname". + // Reminder, all arrays in a PC must have same major size. + template + void + put_pcarray(const std::vector& vec, + const std::string& aname, std::string pcname) { + if (pcname.empty()) { + pcname = scalar_pc_name; + } + + auto &lpc = local_pcs(); + auto& pc = lpc[pcname]; + + PointCloud::Array::shape_t shape = {vec.size()}; + + auto arr = pc.get(aname); + if (arr) { + //arr->template assign(vec.data(), shape, false); + arr->assign(vec.data(), shape, false); + } + else { + pc.add(aname, PointCloud::Array(vec, shape, false)); + } + } + + std::string get_name() const { + const PointCloud::Dataset& spc = get_pc(scalar_pc_name); + const auto& md = spc.metadata(); + auto jname = md["name"]; + if (jname.isString()) { + return jname.asString(); + } + return ""; + } + + void set_name(const std::string& name) { + PointCloud::Dataset& spc = get_pc(scalar_pc_name); + auto& md = spc.metadata(); + md["name" ] = name; + } + + }; + + class Graphs { + public: + /** + Graph support. + + The facade owns every graph produced by this support and the graph + dies with the facade. + */ + using graph_type = WireCell::Clus::Graphs::Weighted::Graph; + using graph_store_type = std::map; + + /** Return true if named graph exists. */ + bool has_graph(const std::string& name) const; + + /** Return known graphs. + + This is only available as const. User may use it to test for + existence of a graph or iterate. + */ + const graph_store_type& graph_store() const { return m_graph_store; } + + /** + Create a graph of the given name. + + Replaces graph if it exists. + */ + graph_type& make_graph(const std::string& name, size_t nvertices=0); + + /** Transfer a graph to the facade. + + Replaces graph if it exists. + */ + graph_type& give_graph(const std::string& name, graph_type&& gr); + + /** Return a graph by name. + + Creates empty graph if one does not exist + */ + graph_type& get_graph(const std::string& name); + + /** Return a graph by name. + + Throw KeyError if named graph does not exist. + */ + const graph_type& get_graph(const std::string& name) const; + + /** Transfer ownership of graph. + + If the graph exists it is returned by a moved value. Else an empty + graph is returned. + */ + graph_type take_graph(const std::string& name); + + private: + graph_store_type m_graph_store; + + }; + + +} + +#endif diff --git a/clus/inc/WireCellClus/Facade_Summary.h b/clus/inc/WireCellClus/Facade_Summary.h index 7880f00bf..4d6a37484 100644 --- a/clus/inc/WireCellClus/Facade_Summary.h +++ b/clus/inc/WireCellClus/Facade_Summary.h @@ -7,9 +7,7 @@ #include "WireCellUtil/Configuration.h" -// fixme: this namespace here and elsewhere in clus should be in -// WireCell::Clus::Facade or something. -namespace WireCell::PointCloud::Facade { +namespace WireCell::Clus::Facade { // Summarize facades as JSON. These recur down the type hierarchy and into the tree via Configuration json_summary(const Grouping& grp); diff --git a/clus/inc/WireCellClus/Facade_Util.h b/clus/inc/WireCellClus/Facade_Util.h index e56885c33..b898d2ab2 100644 --- a/clus/inc/WireCellClus/Facade_Util.h +++ b/clus/inc/WireCellClus/Facade_Util.h @@ -5,268 +5,35 @@ #ifndef WIRECELL_CLUS_FACADEUTIL #define WIRECELL_CLUS_FACADEUTIL +#include "WireCellClus/Graphs.h" + +#include "WCPQuickhull/QuickHull.h" + +#include "WireCellIface/IAnodePlane.h" +#include "WireCellIface/IAnodeFace.h" +#include "WireCellIface/IDetectorVolumes.h" + #include "WireCellUtil/PointCloudDataset.h" #include "WireCellUtil/PointTree.h" #include "WireCellUtil/Point.h" #include "WireCellUtil/Units.h" #include "WireCellUtil/Spdlog.h" -#include "WireCellUtil/Graph.h" -// #include "WireCellUtil/D2Vector.h" -#include "WireCellIface/IAnodePlane.h" -#include "WireCellIface/IAnodeFace.h" -#include "WCPQuickhull/QuickHull.h" +#include + // extern int global_counter_get_closest_wcpoint; // using namespace WireCell; NO! do not open up namespaces in header files! -namespace WireCell::PointCloud::Facade { - - struct DummyCache{}; - - /// The Grouping/Cluster/Facade inherit from this to gain additional methods - /// that are common to all three facade types. The mixin itself needs to - /// know its facade type and value but specifically does not include anything - /// that requires parent or child types or values. - /// - /// It provides helper functions to deal with local PCs and an optional - /// caching mechanism. See comments on cache() and fill_cache() and - /// clear_cache(). Note, using the cache mechanism does not preclude facade - /// doing DIY caching. - template - class Mixin { - SelfType& self; - std::string scalar_pc_name, ident_array_name; - mutable std::unique_ptr m_cache; - public: - Mixin(SelfType& self, const std::string& scalar_pc_name, const std::string& ident_array_name = "ident") - : self(self) - , scalar_pc_name(scalar_pc_name) - , ident_array_name(ident_array_name) { - - } - - protected: - /// Facade cache management has three simple rules: - /// - /// Cache rule 1: The SelfType may call this to access a full and const cache. - const CacheType& cache() const - { - if (! m_cache) { - m_cache = std::make_unique(); - fill_cache(* const_cast(m_cache.get())); - } - return *m_cache.get(); - } - - /// Cache rule 2: - /// - /// The SelfType overrides this method to fill an empty cache. This is - /// the only place where the cache object can be accessed by Self in - /// mutable form. - virtual void fill_cache(CacheType& cache) const {} - - public: - /// Cache rule 3: - /// - /// The SelfType may override clear_cache(), for example to clear cached - /// data not in the CacheType. An override must then forward-call the - /// Mixin::clear_cache(). The Mixin, the SelfType implementation and/or - /// SelfType users may all call this method thought the goal is to make - /// clear_cache() called in response to a tree notification. - virtual void clear_cache() const - { - m_cache = nullptr; - } - - public: - - /// Clear my node of all children nodes and purge my local PCs. - /// Invalidates any cache. - void clear() - { - // node level: - self.node()->remove_children(); - // value level: - self.local_pcs().clear(); - // facade cache level: - clear_cache(); - } - - // Get the map from name to PC for all local PCs. - WireCell::PointCloud::Tree::named_pointclouds_t& local_pcs() - { - return self.value().local_pcs(); - } - const WireCell::PointCloud::Tree::named_pointclouds_t& local_pcs() const - { - return self.value().local_pcs(); - } - - // Return an "identifying number" from the "scalar" PC of the node. As - // with all "ident" values in WCT, there is no meaning ascribed to the - // actual value (by WCT). It is meant to refer to some external - // identity. If the scalar PC or the ident array are not found, the - // default is returned. - // - // This is a special case method that merely delegates to get_scalar(). - int ident(int def = -1) const - { - return get_scalar(ident_array_name, def); - } - - // Set an ident number, delegating to set_scalar(). - void set_ident(int id) - { - set_scalar(ident_array_name, id); - } - - template - T get_element(const std::string& pcname, const std::string& aname, size_t index, T def = 0) const { - const auto& lpcs = local_pcs(); - auto it = lpcs.find(pcname); - if (it == lpcs.end()) { - return def; - } - const auto arr = it->second.get(aname); - if (!arr) { - return def; - } - // std::cout << "test1 " << pcname << " " << aname << " " << index << " " << arr->template element(index) << std::endl; - return arr->template element(index); - } - - // Return a value from the scalar PC - template - T get_scalar(const std::string& aname, T def = 0) const { - return get_element(scalar_pc_name, aname, 0, def); - } - - // Set a value on the scalar PC - template - void set_scalar(const std::string& aname, T val = 0) { - auto& lpcs = local_pcs(); - auto& cs = lpcs[scalar_pc_name]; // create if not existing - auto arr = cs.get(aname); - if (!arr) { - cs.add(aname, PointCloud::Array({(T)val})); - return; - } - arr->template element(0) = (T)val; - } - - bool has_pc(const std::string& pcname) const - { - static PointCloud::Dataset dummy; - const auto& lpcs = local_pcs(); - auto it = lpcs.find(pcname); - if (it == lpcs.end()) { - return false; - } - return true; - } - - // Const access to a local PC/Dataset. If pcname is missing return - // reference to an empty dataset. - const PointCloud::Dataset& get_pc(const std::string& pcname) const - { - static PointCloud::Dataset dummy; - const auto& lpcs = local_pcs(); - auto it = lpcs.find(pcname); - if (it == lpcs.end()) { - return dummy; - } - return it->second; - } - // Mutable access to a local PC/Dataset. If pcname is missing, a new - // dataset of that name will be created. - PointCloud::Dataset& get_pc(const std::string& pcname) - { - static PointCloud::Dataset dummy; - const auto& lpcs = local_pcs(); - return lpcs[pcname]; - } - - // Return true if this cluster has a PC array and PC of given names and type. - template - bool has_pcarray(const std::string& aname, const std::string& pcname) const { - auto& lpc = local_pcs(); - auto lit = lpc.find(pcname); - if (lit == lpc.end()) { - return false; - } - - auto arr = lit->second.get(aname); - if (!arr) { - return false; - } - return arr->template is_type(); - } - - // Return as a span an array named "aname" stored in clusters PC named - // "pcname". Returns default span if PC or array not found or there is - // a type mismatch. Note, span uses array data in place. - template - PointCloud::Array::span_t - get_pcarray(const std::string& aname, const std::string& pcname) { - - auto& lpc = local_pcs(); - auto lit = lpc.find(pcname); - if (lit == lpc.end()) { - return {}; - } - - auto arr = lit->second.get(aname); - if (!arr) { - return {}; - } - return arr->template elements(); - } - template - const PointCloud::Array::span_t - get_pcarray(const std::string& aname, const std::string& pcname) const { - - auto& lpc = local_pcs(); - auto lit = lpc.find(pcname); - if (lit == lpc.end()) { - return {}; - } - - auto arr = lit->second.get(aname); - if (!arr) { - return {}; - } - return arr->template elements(); - } - - // Store vector as an array named "aname" into this cluster's PC named "pcname". - // Reminder, all arrays in a PC must have same major size. - template - void - put_pcarray(const std::vector& vec, - const std::string& aname, const std::string& pcname) { +namespace WireCell::Clus::Facade { - auto &lpc = local_pcs(); - auto& pc = lpc[pcname]; - PointCloud::Array::shape_t shape = {vec.size()}; - auto arr = pc.get(aname); - if (arr) { - arr->template assign(vec.data(), shape, false); - } - else { - pc.add(aname, Array(vec, shape, false)); - } - } - }; - - - using points_t = Tree::Points; - using node_t = Tree::Points::node_t; + using points_t = WireCell::PointCloud::Tree::Points; + using node_t = WireCell::PointCloud::Tree::Points::node_t; using node_ptr = std::unique_ptr; using geo_point_t = WireCell::Point; using geo_vector_t = WireCell::Vector; @@ -287,27 +54,6 @@ namespace WireCell::PointCloud::Facade { using int_t = int; - // AVOID DOING THIS in headers!!! In this case it causes conflict between - // boost::units and WireCell::Units in imp files that #include this one. - // - // If typing the namespace:: is too much, then one can do select "using - // namespace::symbol". - // - // using namespace boost; - - struct VertexProp { - int index; - // WCPointCloud::WCPoint wcpoint; - // add pointer to merged cell - }; - using EdgeProp = boost::property; - // struct EdgeProp { - // float dist; // edge distance - // }; - typedef boost::adjacency_list MCUGraph; - typedef boost::graph_traits::vertex_descriptor vertex_descriptor; - typedef boost::graph_traits::edge_descriptor edge_descriptor; - // FIXME: refactor to vector, etc? or vector with ::pitch/::angle? struct TPCParams { int face{0}; @@ -356,10 +102,10 @@ namespace WireCell::PointCloud::Facade { // Refine search around these neighbors for(auto [idx2, dist] : knn) { - auto p2 = two.point(idx2); + geo_point_t p2; // = two.point3d(idx2); // why call this? // Local refinement by checking neighboring points - int curr_idx1 = i; // Keep track of current point index from first cloud + int curr_idx1 = 0; // i; // Keep track of current point index from first cloud std::tie(idx2, p2) = two.get_closest_wcpoint(p1); std::tie(curr_idx1, p1) = one.get_closest_wcpoint(p2); @@ -373,92 +119,7 @@ namespace WireCell::PointCloud::Facade { } return std::make_tuple(p1_save, p2_save, min_dis); - - // int p1_index = 0; - // int p2_index = 0; - // geo_point_t p1 = one.point(p1_index); - // geo_point_t p2 = two.point(p2_index); - // int p1_save = 0; - // int p2_save = 0; - // double min_dis = 1e9; - - // int prev_index1 = -1; - // int prev_index2 = -1; - // while (p1_index != prev_index1 || p2_index != prev_index2) { - // prev_index1 = p1_index; - // prev_index2 = p2_index; - // std::tie(p2_index, p2) = two.get_closest_wcpoint(p1); - // std::tie(p1_index, p1) = one.get_closest_wcpoint(p2); - // } - // // std::cout << "get_closest_points: " << p1_index << " " << p2_index << std::endl; - // double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); - // if (dis < min_dis) { - // min_dis = dis; - // p1_save = p1_index; - // p2_save = p2_index; - // } - - // prev_index1 = -1; - // prev_index2 = -1; - // p1_index = one.points()[0].size() - 1; - // p2_index = 0; - // p1 = one.point(p1_index); - // p2 = two.point(p2_index); - // while (p1_index != prev_index1 || p2_index != prev_index2) { - // prev_index1 = p1_index; - // prev_index2 = p2_index; - // std::tie(p2_index, p2) = two.get_closest_wcpoint(p1); - // std::tie(p1_index, p1) = one.get_closest_wcpoint(p2); - // } - // // std::cout << "get_closest_points: " << p1_index << " " << p2_index << std::endl; - // dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); - // if (dis < min_dis) { - // min_dis = dis; - // p1_save = p1_index; - // p2_save = p2_index; - // } - - // prev_index1 = -1; - // prev_index2 = -1; - // p1_index = 0; - // p2_index = two.points()[0].size() - 1; - // p1 = one.point(p1_index); - // p2 = two.point(p2_index); - // while (p1_index != prev_index1 || p2_index != prev_index2) { - // prev_index1 = p1_index; - // prev_index2 = p2_index; - // std::tie(p2_index, p2) = two.get_closest_wcpoint(p1); - // std::tie(p1_index, p1) = one.get_closest_wcpoint(p2); - // } - // // std::cout << "get_closest_points: " << p1_index << " " << p2_index << std::endl; - // dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); - // if (dis < min_dis) { - // min_dis = dis; - // p1_save = p1_index; - // p2_save = p2_index; - // } - - // prev_index1 = -1; - // prev_index2 = -1; - // p1_index = one.points()[0].size() - 1; - // p2_index = two.points()[0].size() - 1; - // p1 = one.point(p1_index); - // p2 = two.point(p2_index); - // while (p1_index != prev_index1 || p2_index != prev_index2) { - // prev_index1 = p1_index; - // prev_index2 = p2_index; - // std::tie(p2_index, p2) = two.get_closest_wcpoint(p1); - // std::tie(p1_index, p1) = one.get_closest_wcpoint(p2); - // } - // // std::cout << "get_closest_points: " << p1_index << " " << p2_index << std::endl; - // dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); - // if (dis < min_dis) { - // min_dis = dis; - // p1_save = p1_index; - // p2_save = p2_index; - // } - - // return std::make_tuple(p1_save, p2_save, min_dis); + } class Simple3DPointCloud { @@ -502,7 +163,7 @@ namespace WireCell::PointCloud::Facade { template std::tuple get_closest_points(const PCType& two) const { - return PointCloud::Facade::get_closest_points(*this, two); + return Clus::Facade::get_closest_points(*this, two); } @@ -544,56 +205,58 @@ namespace WireCell::PointCloud::Facade { std::ostream& operator<<(std::ostream& os, const Multi2DPointCloud& s3dpc); - class DynamicPointCloud { - public: - DynamicPointCloud(double angle_u, double angle_v, double angle_w); - using points3d_type = Simple3DPointCloud::points_type; - using points2d_type = Multi2DPointCloud::points_type; - using point_type = std::vector; - inline size_t get_num_points() const { return m_pc3d.get_num_points(); } - inline point_type point2d(const size_t plane, const size_t ind) const { - return m_pc2d.point(plane, ind); - } - inline geo_point_t point3d(const size_t ind) const { return m_pc3d.point(ind); } - - // useful when hacking the winds with dist_cut - inline int dist_cut(const size_t plane, const size_t ind) const { return m_winds[plane].at(ind); } - - /// @brief flag 0 points, flag 1 skeleton - void add_points(const Cluster* cluster, const int flag=0, const double step = 0.6*units::cm); // flag 1 points, flag 2 scheleton - - /// @brief add points from p_test along dir with range and step - /// @attention: the index_uvw is hacked to store the distance cut - void add_points(const Cluster* cluster, const geo_point_t& p_test, const geo_point_t& dir_unmorm, const double range, - const double step, const double angle); - - /// @return: dist, Cluster, point_index - std::vector> get_2d_points_info(const geo_point_t& p, const double radius, - const int plane); - /// @brief - std::tuple get_closest_2d_point_info(const geo_point_t& p, const int plane); - - std::pair hough_transform(const geo_point_t& origin, const double dis) const; - geo_point_t vhough_transform(const geo_point_t& origin, const double dis) const; - private: - Multi2DPointCloud m_pc2d; - Simple3DPointCloud m_pc3d; - std::vector m_winds[3]; // u, v, w - std::vector m_clusters; - std::vector m_blobs; - }; + // class DynamicPointCloudLegacy { + // public: + // DynamicPointCloudLegacy(double angle_u, double angle_v, double angle_w); + // using points3d_type = Simple3DPointCloud::points_type; + // using points2d_type = Multi2DPointCloud::points_type; + // using point_type = std::vector; + // inline size_t get_num_points() const { return m_pc3d.get_num_points(); } + // inline point_type point2d(const size_t plane, const size_t ind) const { + // return m_pc2d.point(plane, ind); + // } + // inline geo_point_t point3d(const size_t ind) const { return m_pc3d.point(ind); } + + // // useful when hacking the winds with dist_cut + // inline int dist_cut(const size_t plane, const size_t ind) const { return m_winds[plane].at(ind); } + + // /// @brief flag 0 points, flag 1 skeleton + // void add_points(const Cluster* cluster, const int flag=0, const double step = 0.6*units::cm); // flag 0 points, flag 1 scheleton + + // /// @brief add points from p_test along dir with range and step + // /// @attention: the index_uvw is hacked to store the distance cut + // void add_points(const Cluster* cluster, const geo_point_t& p_test, const geo_point_t& dir_unmorm, const double range, + // const double step, const double angle); + + // /// @return: dist, Cluster, point_index + // std::vector> get_2d_points_info(const geo_point_t& p, const double radius, + // const int plane); + // /// @brief + // std::tuple get_closest_2d_point_info(const geo_point_t& p, const int plane); + + // std::pair hough_transform(const geo_point_t& origin, const double dis) const; + // geo_point_t vhough_transform(const geo_point_t& origin, const double dis) const; + // private: + // Multi2DPointCloud m_pc2d; + // Simple3DPointCloud m_pc3d; + // std::vector m_winds[3]; // u, v, w + // std::vector m_clusters; + // std::vector m_blobs; + // }; - void process_mst_deterministically( - const boost::adjacency_list>& temp_graph, - std::vector>>& index_index_dis, - std::vector>>& index_index_dis_mst) ; + void process_mst_deterministically(const Graphs::Weighted::Graph& g, + std::vector>>& index_index_dis, + std::vector>>& index_index_dis_mst); double time2drift(const IAnodeFace::pointer anodeface, const double time_offset, const double drift_speed, const double time); double drift2time(const IAnodeFace::pointer anodeface, const double time_offset, const double drift_speed, const double drift); int point2wind(const geo_point_t& point, const double angle, const double pitch, const double center); + double wind2point2dproj(const int wind, const double angle, const double pitch, const double center); + + WirePlaneId get_wireplaneid(const geo_point_t& point, const WirePlaneId& wpid1, const WirePlaneId& wpid2, IDetectorVolumes::pointer dv); + WirePlaneId get_wireplaneid(const geo_point_t& p1, const WirePlaneId& wpid1, const geo_point_t& p2, const WirePlaneId& wpid2, IDetectorVolumes::pointer dv); // fixme: why do we inline these? inline double cal_proj_angle_diff(const geo_vector_t& dir1, const geo_vector_t& dir2, double plane_angle) @@ -633,6 +296,8 @@ namespace WireCell::PointCloud::Facade { return false; } -} // namespace WireCell::PointCloud::Facade + + +} // namespace WireCell::Clus::Facade #endif diff --git a/clus/inc/WireCellClus/FiducialUtils.h b/clus/inc/WireCellClus/FiducialUtils.h new file mode 100644 index 000000000..4baaefd35 --- /dev/null +++ b/clus/inc/WireCellClus/FiducialUtils.h @@ -0,0 +1,112 @@ +#ifndef WIRECELL_CLUS_FIDUCIALUTILS +#define WIRECELL_CLUS_FIDUCIALUTILS + +#include "WireCellClus/ClusteringFuncsMixins.h" + +// headers for "static" data sources +#include "WireCellClus/IPCTransform.h" +#include "WireCellIface/IDetectorVolumes.h" +#include "WireCellIface/IFiducial.h" + +// headers for "dynamic" data sources +#include "WireCellClus/Facade_Grouping.h" + +// Other data types +#include "WireCellUtil/Point.h" + + +namespace WireCell::Clus { + + + /** FiducialUtils is a helper for answering data-dependent questions + * involving regions of space. + * + * This class holds various methods to answer questions that requires a mix + * of static data (eg wire geometry) and the dynamic data being processed + * (eg clusters, PCs). + * + * It is intended to be constructed with static information and after + * construction it is to be fed dynamic data at least once. It may perform + * initial computation on that dynamic data and store the results in order + * to optimize the queries performed by its methods. + * + * See also MakeFiducialUtils which is an IEnsembleVisitor that will perform + * create a FiducialUtils and add it to a grouping. + * + * Note: FiducialUtils is approximately equivalent to WCP's ToyFiducial class. + */ + + class FiducialUtils { + public: + + /// Bundle the "static" data. This is copyable. + struct StaticData { + IDetectorVolumes::pointer dv; + IFiducial::pointer fiducial; + Clus::IPCTransformSet::pointer pcts; + }; + + /// Bundle the "dynamic" data. + struct DynamicData { + const Facade::Grouping& live; + const Facade::Grouping& dead; + }; + + FiducialUtils(); + + // Create it with whatever "static" data sources are required. Add to + // this argument list as needed as the methods are populated. + FiducialUtils(StaticData sd); + + // It is possible to create empty and feed static data after + // construction. This will re-initialize any dynamic data as well. + void feed_static(StaticData sd); + + // This must be called at least once to provide the "dynamic" data. It + // may be called multiple times. Each call will start by clearing and + // rebuilding derived internal data on which the methods operate. + void feed_dynamic(const DynamicData& dd); + + + // Query methods + + bool inside_fiducial_volume(const Point& p, + const std::vector& tolerance_vec = {}) const; + + // use live_grouping's CTPC information to do the check ... + bool inside_dead_region(const Point& p_raw, const int apa, const int face, const int minimal_views = 2) const; + + bool check_dead_volume(const Facade::Cluster& main_cluster, const Point& p, const Vector& dir, double step = 1.0*units::cm, const double cut_ratio = 0.81, const int cut_value = 4) const; + + bool check_signal_processing(const Facade::Cluster& main_cluster, const Point& p, const Vector& dir, double step = 1.0*units::cm, const double cut_ratio = 0.8, const int cut_value = 5) const; + + + + + private: + + // Holds static user data. Can be reset which will also clear internal + // data. + StaticData m_sd; + + // Bundle the "internal" data derived from static+dynamic + // + // This holds whatever data that is derived from "dynamic data" (eg, the + // groupings) that are needed to satisfy the "query methods". This + // should be fully derived data and in particular not include the + // objects from the "dynamic data" themselves as their life times may be + // shorter than the lifetime of a FiducialUtils object. + // + // This struct must be default-constructable, eg via InternalData{} so + // make sure all values are initialized here. the feed_*() methods will + // start by clearing the InternalData via this default construction. + struct InternalData { + + // save live grouping ... + Facade::Grouping* live; + }; + InternalData m_internal; + }; + +} +#endif diff --git a/clus/inc/WireCellClus/Graphs.h b/clus/inc/WireCellClus/Graphs.h new file mode 100644 index 000000000..5b4cd23dd --- /dev/null +++ b/clus/inc/WireCellClus/Graphs.h @@ -0,0 +1,208 @@ +#ifndef WIRECELLCLUS_GRAPH +#define WIRECELLCLUS_GRAPH + +#include "WireCellUtil/Graph.h" +#include "WireCellUtil/PointCloudDataset.h" + +#include +#include +#include + +namespace WireCell::Clus::Graphs { + + namespace Weighted { + + /** + * The basic graph type used in clus is an edge-weighted graph with + * vertex descriptors that can serve as indices. For indices to remain + * stable, vertex removal is NOT supported. + */ + + using dijkstra_distance_type = double; + using edge_weight_type = double; + using Graph = boost::adjacency_list< + boost::vecS, // vertices + boost::vecS, // edges + boost::undirectedS, // edge direction (none) + boost::property, + boost::property + >; + using graph_type = Graph; + using vertex_type = boost::graph_traits::vertex_descriptor; + using edge_type = boost::graph_traits::edge_descriptor; + + // A quasi-edge type that simply records a pair of vertices. Unlike + // edge_type, the edge_pair_type is not dependent on the graph. Use + // make_vertex_pair() to assure an order. + using vertex_pair = std::pair; + + /// Return an ordered vertex pair with the smaller of {a,b} first. + vertex_pair make_vertex_pair(vertex_type a, vertex_type b); + + + // A set of unique vertices or edges; + using vertex_set = std::set; + using edge_set = std::set; + + // Filtered graphs and their predicates. + using vertex_predicate = std::function; + using edge_predicate = std::function; + using filtered_graph_type = boost::filtered_graph; + + /// Results of a "discrete Voronoi tessellation" formed on the graph + /// given with each Voronoi cell defined by one in a set of select + /// "terminal" vertices. + struct Voronoi { + + /// A "map" from each graph vertex (index) to its nearest "terminal" + /// vertex. terminal[v] == v for v in the set of terminal vertices. + std::vector terminal; + + /// A "map" from each graph vertex (index) to the distance along the + /// path to its nearest "terminal". distance[v] == 0.0 for v in the + /// set of terminal vertices. + std::vector distance; + + /// A "map" from each graph vertex (index) to the edge into that + /// vertex from the vertex neighbor (the edge "source") that is + /// directly upstream in the walk from closest terminal to the + /// original vertex. If the source is the nearest terminal for the + /// original vertex then the backwards walk is complete. Otherwise, + /// the last_edge for the neighbor provides the next neighbor, etc. + /// Note, the value last_edge[v] for any v in the set of terminals is + /// not defined. (You probably get 0's for both vertices). + std::vector last_edge; + + }; + + + /// + /// Free functions calculating Voronoi and related See GraphAlgorithm's + /// methods of similar names for caching versions. + /// + + /// Return the path of vertices FROM a given vertex TO its nearest + /// terminal. This simply walks last_edge as described above. The + /// result is undefined if the graph other than the one used to make + /// this Voronoi struct. + std::vector terminal_path(const graph_type& graph, const Voronoi& vor, vertex_type ver); + + /// Return a Steiner graph (not tree). This graph has all the + /// vertices of the original graph but only the edges from the + /// original graph which are on the shortest path between terminal + /// vertices. + graph_type steiner_graph(const graph_type& graph, const Voronoi& vor); + + + /// Structure to hold charge calculation parameters (from prototype) + struct ChargeWeightingConfig { + double Q0 = 10000.0; // constant term from prototype + double factor1 = 0.8; // weighting factor 1 from prototype + double factor2 = 0.4; // weighting factor 2 from prototype + bool enable_weighting = true; // whether to apply charge weighting + }; + + + + + /// Construct the "discrete graph Voronoi tessellation". + Voronoi voronoi(const graph_type& graph, const std::vector& terminals); + + /// Embody all possible shortest paths from a given source index. + class ShortestPaths { + + // Dijkstra result from source + size_t m_source; + std::vector m_predecessors; + // distances are not currently used. + + // Lazy calculate path for given destination indices. + mutable std::unordered_map> m_paths; + + public: + ShortestPaths(size_t source, const std::vector predecessors); + + /// Return the unique vertices along shortest path from our source + /// to destination. inclusive. + const std::vector& path(size_t destination) const; + }; + + // Bind some graph algorithms to a graph, with caching.. + class GraphAlgorithms { + const Graph& m_graph; + + // LRU cache configuration + static constexpr size_t DEFAULT_MAX_CACHE_SIZE = 50; // Adjust as needed + size_t m_max_cache_size; + + // LRU cache implementation for shortest paths + // List maintains access order (most recently used at front) + mutable std::list m_access_order; + + // Map from source to {list_iterator, ShortestPaths} + mutable std::unordered_map::iterator, ShortestPaths>> m_sps; + + // Lazy calculate dijkstra shortest path results. + // mutable std::unordered_map m_sps; + + mutable std::vector m_cc; + + // Helper method to update LRU cache + void update_cache_access(size_t source) const; + void evict_oldest_if_needed() const; + + public: + GraphAlgorithms(const Graph& graph, size_t max_cache_size = DEFAULT_MAX_CACHE_SIZE); + + + /// Return the intermediate result that gives access to the shortest + /// paths from the source vertex all possible destination vertices. + const ShortestPaths& shortest_paths(size_t source) const; + + /// Return the unique vertices vertices along the shortest path from + /// source vertex to destination vertex, inclusive. + const std::vector& shortest_path(size_t source, size_t destination) const; + + /// Return a "CC" array giving connected component subgraphs. + const std::vector& connected_components() const; + + /// Get current cache size and maximum cache size + size_t cache_size() const { return m_sps.size(); } + size_t max_cache_size() const { return m_max_cache_size; } + + /// Clear the shortest paths cache + void clear_cache() const; + + /// Return a graph view filtered on the set of vertices. If accept + /// is true (default) the graph only has vertices in the set. If + /// false, it has vertices in the original graph that are not in the + /// set. The vertex descriptors (indices) of the returned graph are + /// the same as the original graph. Use boost::copy_graph() to make + /// a new graph with compactified vertices. + filtered_graph_type reduce(const vertex_set& vertices, bool accept = true) const; + + /// As reduce(vertex_set) but filter on edges. + filtered_graph_type reduce(const edge_set& edge, bool accept = true) const; + + /// Return a graph view filtered on edge weights. If accept is true + /// (default) edges with weights greater or equal to threshold will + /// be kept and others removed and vice versa if accept is false. + filtered_graph_type weight_threshold(edge_weight_type threshold, bool accept = true) const; + + /// Find all neighbors within nlevel hops from the input index. + /// @param index The starting vertex index + /// @param nlevel The number of levels (hops) to search + /// @param include_self Whether to include the original vertex in the result (default: true) + /// @return A set of vertex indices that are within nlevel hops from the input index + vertex_set find_neighbors_nlevel(size_t index, int nlevel, bool include_self = true) const; + + }; + + + } + +} + +#endif + diff --git a/clus/inc/WireCellClus/GroupingHelper.h b/clus/inc/WireCellClus/GroupingHelper.h index 6174fb0d4..cc8715cdd 100644 --- a/clus/inc/WireCellClus/GroupingHelper.h +++ b/clus/inc/WireCellClus/GroupingHelper.h @@ -12,7 +12,7 @@ #include -namespace WireCell::PointCloud::Facade { +namespace WireCell::Clus::Facade { // create a new function to take in the original_grouping and the newly created shadow_grouping // the return of this new function should be a std::map >. // The first Cluster should be the original cluster, the first of the pair Clusters should be the corresponding shadow_cluster, the second of the pair Cluster should be the main cluster due to separation. @@ -30,4 +30,4 @@ namespace WireCell::PointCloud::Facade { } -#endif \ No newline at end of file +#endif diff --git a/clus/inc/WireCellClus/IEnsembleVisitor.h b/clus/inc/WireCellClus/IEnsembleVisitor.h new file mode 100644 index 000000000..f774eb1d7 --- /dev/null +++ b/clus/inc/WireCellClus/IEnsembleVisitor.h @@ -0,0 +1,32 @@ +/** + + Interface providing an ensemble visitor. + + Primarily this is the interface to a "clustering method component" but is + given a more generic name as some non-clustering pattern-recognition related + components will use it. + + */ +#ifndef WIRECELLCLUS_IENSEMBLEVISITOR +#define WIRECELLCLUS_IENSEMBLEVISITOR + + +#include "WireCellUtil/IComponent.h" +#include "WireCellClus/Facade_Ensemble.h" + +#include + +namespace WireCell::Clus { + + class IEnsembleVisitor : public IComponent { + public: + + virtual ~IEnsembleVisitor() {}; + + /// Mutate the ensemble + virtual void visit(Facade::Ensemble& ensemble) const = 0; + }; +} + + +#endif diff --git a/clus/inc/WireCellClus/IPCTransform.h b/clus/inc/WireCellClus/IPCTransform.h new file mode 100644 index 000000000..35d6a690f --- /dev/null +++ b/clus/inc/WireCellClus/IPCTransform.h @@ -0,0 +1,40 @@ +/** A set of point cloud transforms for clustering. + + These interfaces are not at all general purpose so it is buried inside clus. + + */ +#ifndef WIRECELLCLUS_IPCTRANSFORM +#define WIRECELLCLUS_IPCTRANSFORM + +#include "WireCellUtil/IComponent.h" +#include "WireCellUtil/Point.h" +#include "WireCellUtil/PointCloudDataset.h" +#include + +namespace WireCell::Clus { + + class IPCTransform : public IComponent { + public: + using Dataset = WireCell::PointCloud::Dataset; + using Array = WireCell::PointCloud::Array; + + virtual ~IPCTransform() {} + + virtual Point forward(const Point& pos_raw, double clustser_t0, int face, int apa) const = 0; + virtual Point backward(const Point& pos_cor, double clustser_t0, int face, int apa) const = 0; + virtual bool filter(const Point& pos_cor, double clustser_t0, int face, int apa) const = 0; + + virtual Dataset forward(const Dataset& pc_raw, const std::vector& arr_raw_names, const std::vector& arr_cor_names, double clustser_t0, int face, int apa) const = 0; + virtual Dataset backward(const Dataset& pc_cor, const std::vector& arr_cor_names, const std::vector& arr_raw_names, double clustser_t0, int face, int apa) const = 0; + virtual Dataset filter(const Dataset& pc_cor, const std::vector& arr_cor_names, double clustser_t0, int face, int apa) const = 0; + + }; + + class IPCTransformSet : public IComponent { + public: + virtual ~IPCTransformSet() {} + virtual IPCTransform::pointer pc_transform(const std::string &name) const = 0; + }; +} + +#endif diff --git a/clus/inc/WireCellClus/MultiAlgBlobClustering.h b/clus/inc/WireCellClus/MultiAlgBlobClustering.h index bdc7657c0..570e38b54 100644 --- a/clus/inc/WireCellClus/MultiAlgBlobClustering.h +++ b/clus/inc/WireCellClus/MultiAlgBlobClustering.h @@ -1,14 +1,23 @@ #ifndef WIRECELL_CLUS_MULTIALGBLOBCLUSTERING #define WIRECELL_CLUS_MULTIALGBLOBCLUSTERING +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/IClusGeomHelper.h" +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/Facade.h" + #include "WireCellAux/Logger.h" + #include "WireCellIface/ITensorSetFilter.h" #include "WireCellIface/IConfigurable.h" #include "WireCellIface/IAnodePlane.h" +#include "WireCellIface/IDetectorVolumes.h" #include "WireCellIface/ITerminal.h" -#include "WireCellClus/IClusGeomHelper.h" + #include "WireCellUtil/Bee.h" +#include + namespace WireCell::Clus { class MultiAlgBlobClustering @@ -34,8 +43,60 @@ namespace WireCell::Clus { Bee::Sink m_sink; int m_last_ident{-1}; int m_initial_index{0}; // Default to 0 for backward compatibility - Bee::Points m_bee_img, m_bee_ld; - Bee::Patches m_bee_dead; + + // Replace the existing bee points structures with a more flexible approach + struct BeePointsConfig { + // special name "img" dumps "live" before clustering + std::string name; // bee type name + std::string detector; // bee geom name + std::string algorithm; // bee alg name, defaults to type + std::string pcname; // PC to take + std::string grouping; // grouping to take (default to "live") + std::string visitor; // if given, dump just after this visitor runs and any cluster ID enumeration + std::vector coords; + bool individual; + int filter{1};// 1 for on, 0 for off, -1 for inverse filter + }; + + // Vector to store configurations for multiple bee points sets + std::vector m_bee_points_configs; + + // Nested structure to store bee points objects for each configuration, by APA and face + // First key: bee points set name, second key: "anode_id-face_id" string + struct ApaBeePoints { + // Default constructor (add this) + ApaBeePoints() = default; + + // Global points (used when individual == false) + Bee::Points global; + + // Individual points (used when individual == true) + // Key is "anode_id-face_id" string + std::map > by_apa_face; // apa, face + + }; + + Facade::Grouping& load_grouping( + Facade::Ensemble& ensemble, + const std::string& name, + const std::string& path, + const ITensorSet::pointer ints); + + std::map m_bee_points; + + // New helper function to fill bee points + void fill_bee_points(const std::string& name, const Facade::Grouping& grouping); + void fill_bee_points_from_cluster( + Bee::Points& bpts, const Facade::Cluster& cluster, + const std::string& pcname, const std::vector& coords, + int filter); + + void fill_bee_patches_from_grouping(const Facade::Grouping& grouping); + void fill_bee_patches_from_cluster(const Facade::Cluster& cluster); + + std::map> m_bee_dead_patches; + // Bee::Patches m_bee_dead; // dead region ... + // Add new member variables for run/subrun/event int m_runNo{0}; int m_subRunNo{0}; @@ -51,24 +112,56 @@ namespace WireCell::Clus { // Count how many times we are called size_t m_count{0}; + /** Config: "groupings" + * + * List of groupings to select for processing. + * + * Default: ["live","dead"]. + */ + std::vector m_groupings = {"live","dead"}; + /** Config: "inpath" * - * The datapath for the input point graph data. This may be a - * regular expression which will be applied in a first-match - * basis against the input tensor datapaths. If the matched - * tensor is a pcdataset it is interpreted as providing the - * nodes dataset. Otherwise the matched tensor must be a - * pcgraph. + * The BASE datapath for the input pc tree data. This may be a regular + * expression which will be applied in a first-match basis against the + * input tensor datapaths. If the matched tensor is a pcdataset it is + * interpreted as providing the nodes dataset. Otherwise the matched + * tensor must be a pcgraph. + * + * A "%d" will be interpolated with the ident number of the input tensor + * set. + * + * See also "insubpaths". */ std::string m_inpath{".*"}; /** Config: "outpath" * - * The datapath for the resulting pcdataset. A "%d" will be - * interpolated with the ident number of the input tensor set. + * The BASE datapath for the resulting pc tree data. + * + * A "%d" will be interpolated with the ident number of the input tensor + * set. + * + * See outsubpaths. */ std::string m_outpath{""}; + /** Config: insubpaths, outsubpaths. + * + * By default, a grouping of a given NAME is located at an input or + * output path spelled as: "{inpath,outpath}/NAME". + * + * If a grouping NAME is found in either insubpath or outsubpath then + * this default is overridden. Both parameters are array of objects, + * each object has keys "name" and "subpath". The subpath is a simple + * string suffix and thus should include a leading "/" if the user + * wishes to locate the grouping in a "subdirectory". + */ + // See issue #375 and #416. + std::map m_insubpaths, m_outsubpaths; + std::string inpath(const std::string& name, int ident); + std::string outpath(const std::string& name, int ident); + /** Config: "perf" * * If true, emit time/memory performance measures. Default is false. @@ -84,23 +177,63 @@ namespace WireCell::Clus { */ bool m_dump_json{false}; + /** Config: "cluster_id_order" + * + * The various operations can lead to redundantly or non-sequentially + * numbered cluster idents. The application of merge_clusters() will + * cause a resulting cluster to have the ID of its first contributing + * constituent. The application of separate() will cause all clusters + * to have the ID of the original. + * + * When this parameter is given, the cluster IDs will be reset after + * each component operation, on a per-grouping basis, so that they + * represent a specific sort order. + * + * - "tree" :: use the as-is child-order which represents + * child-insertion order unless some operation has explicitly + * reordered the underlying PC tree. + * + * - "size" :: use the "size" of the cluster as determined by + * cluster_less() to order the cluster IDs. This considers the + * cluster length, number of children, number of points followed by + * per-view min then max bounds and finally cluster center. + * + * - "" :: cluster IDs are not modified. + * + * By default, no ID rewriting is performed. + * + * Notes: + * + * - When an ID cluster order is applied, the ID counting starts from 1. + * - The default (unset) ID is -1. + */ + std::string m_clusters_id_order; + // configurable parameters for dead-live clustering int m_dead_live_overlap_offset{2}; - // clustering_examine_x_boundary - // double m_x_boundary_low_limit{-1*units::cm}; - // double m_x_boundary_high_limit{257*units::cm}; - - Configuration m_func_cfgs; + // Keep track of configured clustering methods with their metadata to + // assist in debugging/logging. + struct EnsembleVisitor { + std::string name; + IEnsembleVisitor::pointer meth; + }; + /** Config: pipeline + * + * Array of type/name of instances of IEnsembleVisitor to execute in the pipeline. + */ + std::vector m_pipeline; // the anode to be processed - IAnodePlane::pointer m_anode; + std::vector m_anodes; + + IDetectorVolumes::pointer m_dv; // the face to be processed - int m_face{0}; + // int m_face{0}; // the geometry helper - IClusGeomHelper::pointer m_geomhelper; + // IClusGeomHelper::pointer m_geomhelper; }; } // namespace WireCell::Clus diff --git a/clus/inc/WireCellClus/PRCommon.h b/clus/inc/WireCellClus/PRCommon.h new file mode 100644 index 000000000..48769bbe8 --- /dev/null +++ b/clus/inc/WireCellClus/PRCommon.h @@ -0,0 +1,261 @@ +/** Common types and functions for pattern recognition code. + + This file must not depend on any (other) types in the WireCell::Clus::PR + namespace. + */ +#ifndef WIRECELL_CLUS_PR_COMMON +#define WIRECELL_CLUS_PR_COMMON + +#include "WireCellUtil/Point.h" + +namespace WireCell::Clus::Facade { + class Cluster; + class DynamicPointCloud; +} + +namespace WireCell::Clus::PR { + + /// A mixin for various PR objects that have an associated cluster. + template + class HasCluster { + public: + + virtual ~HasCluster() = default; + + // Getters + + /// Get the associated cluster. May be nullptr. Assumes user keeps + /// cluster (ie, its n-ary tree node) alive. + const Facade::Cluster* cluster() const { return m_cluster; } + Facade::Cluster* cluster() { return m_cluster; } + + // Chainable setters + + /// Store a pointer to a cluster. + Subclass& cluster(Facade::Cluster* cptr) { m_cluster = cptr; return *dynamic_cast(this); } + + private: + Facade::Cluster* m_cluster{nullptr}; + + }; + + /// A mixin for various PR objects that have one or more associated DynamicPointCloud instances. + /// + /// It manages a named map from string to shared pointer of DynamicPointCloud. + template + class HasDPCs { + public: + + virtual ~HasDPCs() = default; + + // Getters + + /// Get a const DynamicPointCloud pointer by name. + /// + /// Returns nullptr if name is unknown. + std::shared_ptr dpcloud(const std::string& name) const { + auto it = m_dpcs.find(name); + if (it == m_dpcs.end()) { + return nullptr; + } + return it->second; + } + + /// Get a mutable DynamicPointCloud pointer by name. + /// + /// Returns nullptr if name is unknown. + std::shared_ptr dpcloud(const std::string& name) { + auto it = m_dpcs.find(name); + if (it == m_dpcs.end()) { + return nullptr; + } + return it->second; + } + + // Chainable setters + + /// Store a shared pointer to a DynamicPointCloud by name. + Subclass& dpcloud(const std::string& name, std::shared_ptr dpc_ptr) { + m_dpcs[name] = dpc_ptr; + return *dynamic_cast(this); + } + + private: + + std::unordered_map> m_dpcs; + + }; + + + + + /// A WCPoint is a 3D point and corresponding wire indices and an index. + // + // FIXME: does this need any change given we now support wrapped wires and + // multi APA/face detectors? + struct WCPoint { + WireCell::Point point; // 3D point + int uvw[3] = {-1,-1,-1}; // wire indices + int index{-1}; // point index in some container + + // FIXME: WCP had this, does WCT need it? + // blob* b; + + + // Return true if the point information has been filled. + bool valid() const { + if (index < 0) return false; + return true; + } + }; + using WCPointVector = std::vector; + + /** A Fit holds information predicted about a point by some "fit". + * + * A PR::Vertex has a scalar Fit object and PR::Segment has a vector + * + * Note, WCP's ProtoSegment had struct-of-array instead of vector + */ + struct Fit { + WireCell::Point point; + double dQ{-1}, dx{0}, pu{-1}, pv{-1}, pw{-1}, pt{0} , reduced_chi2{-1}; + std::pair paf{-1, -1}; // apa, face + + int index{-1}; + double range{-1}; + bool flag_fix{false}; + + // Explicitly NOT defined: + + // bool flag_fit. This seems never actually used in WCP. If needed, + // can we simply test on index or range? + + // Restore values to invalid + void reset() { + index = -1; + flag_fix = false; + range = -1; + } + + double distance(const Point& p) { + return (p - point).magnitude(); + } + + /** Return true if fit information has been filled */ + bool valid() const { + if (index < 0 || range < 0) return false; + return true; + } + }; + using FitVector = std::vector; + + /** Some mixin classes, eg used by Vertex and Segment. + + See also "Graphed" from PRGraph.h and "Flagged" from util. + */ + + /// Transform an object-with-point to a point. + template + Point owp_to_point(const OWP& owp) { return owp.point; }; + + /// This type describes a `transform` function from some type to type Point. + /// + /// Functions that operate on Point and templated types and that take this + /// `transform` function will apply it to non-Point types in order to + /// produce a Point. + /// + /// A likely use will be to pass one of these: + /// + /// @code{.cpp} + /// transform = owp_to_point + /// transform = owp_to_point + /// @endcode + template + using to_point_f = std::function; + + /// Return the closest to point from a collection of points. + /// + /// An iterator into the collection is returned. + /// + /// See `to_point_f` type for information about the `transform` argument. + template + typename Vec::const_iterator closest_point( + const Vec& points, const Point& point, + to_point_f transform = [](const typename Vec::value_type& a) { return a; }) + { + return std::min_element(points.begin(), points.end(), + [&](const auto& a, const auto& b) { + return (transform(a)-point).magnitude() < (transform(b)-point).magnitude();}); + } + + /// Return the closest to point from an iteration range of points. + /// + /// An iterator into the collection is returned. + /// + /// See `to_point_f` type for information about the `transform` argument. + template + It closest_point( + It begin, It end, + const Point& point, + to_point_f::value_type> transform = + [](const typename std::iterator_traits::value_type& a) { return a; }) + { + return std::min_element(begin, end, + [&](const auto& a, const auto& b) { + return (transform(a)-point).magnitude() < (transform(b)-point).magnitude();}); + } + + + + /// Return the "walk length" over a path of points in a vector-like collection. + /// + /// This returns: + /// + /// length(v[0],v[1]) + length(v[1],v[2]) ... + /// + /// See `to_point_f` type for information about the `transform` argument. + template + double walk_length(const VP& points, + to_point_f transform = [](const typename VP::value_type& a) { return a; }) { + const auto siz = points.size(); + if (siz < 2) { return 0.0; } + double total_dist = 0.0; + Point last_point = transform(points[0]); + for (size_t i = 1; i < siz - 1; ++i) { + Point next_point = transform(points[i+1]); + total_dist += (last_point - next_point).magnitude(); + last_point = next_point; + } + return total_dist; + } + + /// Return the "walk length" over a path of points in an integrator range. + /// + /// This returns: + /// + /// length(v[0],v[1]) + length(v[1],v[2]) ... + /// + /// See `to_point_f` type for information about the `transform` argument. + template + double walk_length(It begin, It end, + to_point_f::value_type> transform = + [](const typename std::iterator_traits::value_type& a) { return a; }) + { + const auto siz = std::distance(begin, end); + if (siz < 2) { return 0.0; } + double total_dist = 0.0; + Point last_point = transform(*begin); + for (++begin; begin != end; ++begin) { + Point next_point = transform(*begin); + total_dist += (last_point - next_point).magnitude(); + last_point = next_point; + } + return total_dist; + } + + + +} + + +#endif diff --git a/clus/inc/WireCellClus/PRGraph.h b/clus/inc/WireCellClus/PRGraph.h new file mode 100644 index 000000000..cb515f04a --- /dev/null +++ b/clus/inc/WireCellClus/PRGraph.h @@ -0,0 +1,123 @@ +/** Define functions that can operate on a "trajectory" graph. + + Functions provided should be preferred over equivalent graph related + functions in `boost::` as the trajectory graph requires properly bookkeeping + when adding/removing nodes and/or edges. It is undefined behavior if you + call `boost::` graph mutators and fail to follow the bookkeeping + conventions. + + */ +#ifndef WIRECELL_CLUS_PR_GRAPH +#define WIRECELL_CLUS_PR_GRAPH + +#include "WireCellClus/PRVertex.h" +#include "WireCellClus/PRSegment.h" + +#include "WireCellUtil/Graph.h" +#include "WireCellUtil/Exceptions.h" + +namespace WireCell::Clus::PR { + + + /// Make a vertex and add it to the graph. + /// + /// This method will properly adhere to the indexing policy and is + /// recommended instead of creating a PR::Vertex by hand. + /// + /// This is templated to allow for non-default construction of the + /// PR::Vertex. Normal usage: + /// + /// @code{.cpp} + /// auto my_vtx = make_vertex(my_graph); + /// @endcode + template + VertexPtr make_vertex(Graph& g, Args&&... args) { + auto ptr = std::make_shared(std::forward(args)...); + bool ok = add_vertex(g, ptr); + if (!ok) { + raise("make_vertex failed to add vertex which should never happen!"); + } + return ptr; + } + + /// Add an existing vertex to the graph. + /// + /// Return true if graph modified. + /// + /// If the PR::Vertex already has a valid descriptor, false is returned. + /// + /// Otherwise, a new graph node is added with its bundle holding this vertex + /// and a unique index. The resulting descriptor is set on the vertex and + /// true is returned. + /// + /// User must take responsibility to provide a vertex that is consistent + /// with the indexing policy. For a safer function, use `make_vertex()`. + bool add_vertex(Graph& g, VertexPtr vtx); + + + /// Remove a vertex from the graph. + /// + /// Return true if graph modified. + /// + /// The descriptor held by the object is left invalid. + bool remove_vertex(Graph& graph, VertexPtr vtx); + + + /// Make a PR::Segment + /// + /// This makes an "orphaned" segment not associated to any graph. Call + /// `add_segment()` to add it to a graph, or better, use `make_segment()` + /// to do both at once. + template + SegmentPtr make_segment(Args&&... args) { + return std::make_shared(std::forward(args)...); + } + + /// Add a PR::Segment to the graph as an edge between the nodes of two + /// PR::Vertex instances. + /// + /// Return true if the graph was modified. Modification means that a new + /// vertex or edge was added. In the case that vtx1 and vtx2 already have + /// an edge, it is updated with the segment and that does not influence + /// true/false return value. + /// + /// If the PR::Segment already has a valid descriptor then it is not added. + /// + /// If either PR::Vertex do not have a valid descriptor then they are added + /// to the graph and will cause a true return value irrespective of the + /// status of the segment. But, users are urged to only use `make_index()` + /// to construct a PR::Vertex which will assure graph addition. + bool add_segment(Graph& g, SegmentPtr seg, VertexPtr vtx1, VertexPtr vtx2); + + + /// Make a PR::Segment and add it to the graph as an edge between two vertices. + /// + template + SegmentPtr make_segment(Graph& g, VertexPtr vtx1, VertexPtr vtx2, Args&&... args) { + SegmentPtr seg = std::make_shared(std::forward(args)...); + add_segment(g, seg, vtx1, vtx2); + return seg; + } + + + /// Remove a segment from the graph. + /// + /// Return true if graph modified. + /// + /// The descriptor held by the object is left invalid. + bool remove_segment(Graph& graph, SegmentPtr seg); + + + /// Return the end-point vertices of a segment. + /// + /// The pair will be nullptr if segment edge not in graph. + /// + /// The two Vertex objects are those associated with the source/target nodes + /// of the segment's edge. The pair is ordered. The first Vertex is the + /// one with a "wcpoint" closest to the segment's initial "wcpoint". + std::pair find_endpoints(Graph& graph, SegmentPtr seg); + + + +}; +#endif diff --git a/clus/inc/WireCellClus/PRGraphType.h b/clus/inc/WireCellClus/PRGraphType.h new file mode 100644 index 000000000..9a869ac41 --- /dev/null +++ b/clus/inc/WireCellClus/PRGraphType.h @@ -0,0 +1,224 @@ +/** Define a "trajectory" graph type. + + A trajectory graph's nodes have an associated PR::Vertex and edges have an + associated PR::Segment. These objects also carry their associated graph + descriptors. + + Application code should avoid adding nodes and edges directly using + `boost::add_vertex()` and `boost::add_edge()`. See PRGraph.h for equivalent + methods that operate on the PR::Vertex and PR::Segment objects and assure + proper bookkeeping. + + Application code that is sensitive to the order of iterating over graph + nodes should use the `PR::ordered_nodes()` function to get a stable order. + Native ordering of nodes in the graph is subject to pointer value variance. + */ + +#ifndef WIRECELL_CLUS_PR_GRAPHTYPE +#define WIRECELL_CLUS_PR_GRAPHTYPE + +#include "WireCellUtil/Graph.h" + +namespace WireCell::Clus::PR { + + // The headers for these classes include this header, so here we just + // forward-declare to avoid a cycle. + + /// The node object type for all graphs + class Vertex; + /// The shared pointer to the graph node object. + using VertexPtr = std::shared_ptr; + /// A graph node property bundle holds the shared pointer to PR::Vertex. + /// + /// Note, PR::Vertex holds a descriptor for its graph node to allow + /// navigation between graph and object representations. + struct NodeBundle { + /// A shared pointer to a PR::Vertex object + VertexPtr vertex; // shared pointer + /// A monotonically increasing, potentially sparse index uniquely + /// identifying this edge in a graph + size_t index; + }; + + + /// The edge object type for all graphs. + class Segment; + /// The shared pointer to the graph edge object. + using SegmentPtr = std::shared_ptr; + /// A graph edge property bundle holds the shared pointer to PR::Segment. + /// + /// Note, PR::Segment holds a descriptor for its graph edge to allow + /// navigation between graph and object representations. + struct EdgeBundle { + /// A shared pointer to a PR::Segment object + SegmentPtr segment; // shared pointer + /// A monotonically increasing, potentially sparse index uniquely + /// identifying this edge in a graph + size_t index; + }; + + /// A graph-level property holds internal book-keeping information. + /// + /// Normal application code need and should not access this. + struct GraphBundle { + /// The total number of nodes ever added to this graph. + /// + /// This is used to set a unique index. It is greater or equal to the + /// number of nodes currently in the graph. + size_t num_node_indices{0}; + /// The total number of edges ever added to this graph. + /// + /// This is used to set a unique index. It is greater or equal to the + /// number of edges currently in the graph. + size_t num_edge_indices{0}; + + }; + + /** The graph type. + * + * This graph uses setS for descriptor containers. It provides robust + * node/edge descriptors that will remain valid when unrelated node/edge + * descriptors are removed. The order of iterating the raw vertex or edge + * set is well defined within a given program execution. However, this + * order is based on pointer value and so may change between executions of + * an identical program. Use ordered_nodes() to produce an ordering based + * on index. + * + * User should avoid using boost::add_vertex() and boost::add_edge() on + * instances of this graph type. Instead use PR::add_vertex() and + * PR::add_segment(). + */ + using Graph = boost::adjacency_list< + boost::setS, // vertices + boost::setS, // edges + boost::undirectedS, // edge direction + NodeBundle, + EdgeBundle, + GraphBundle + >; + + /// The descriptor type for nodes. This is a `void*`. + using node_descriptor = boost::graph_traits::vertex_descriptor; + + /// A vector of node descriptors + using node_vector = std::vector; + + /// The (user) iterator on nodes. See `ordered_nodes()`. + using node_iterator = node_vector::iterator; + + /// The iterator range. + using node_range = boost::iterator_range; + + /// The descriptor type for edges. + using edge_descriptor = boost::graph_traits::edge_descriptor; + /// The (user) iterator on edges. See `ordered_edges()`. + using edge_iterator = std::vector::iterator; + using edge_range = boost::iterator_range; + + + /** Provide a hash of edge descriptors. + * + * This is needed for unordered containers and boost does not provide. + */ + struct EdgeDescriptorHash { + const Graph& g; + explicit EdgeDescriptorHash(const Graph& graph) : g(graph) {} + + size_t operator()(const edge_descriptor& ed) const { + // Get the two endpoints of the edge using the graph object. + node_descriptor u = boost::source(ed, g); + node_descriptor v = boost::target(ed, g); + + // To ensure the hash is symmetrical for an undirected graph, + // we sort the descriptors before hashing. + if (u > v) { + std::swap(u, v); + } + + // Combine the hashes of the two vertex descriptors. + size_t seed = 0; + boost::hash_combine(seed, u); + boost::hash_combine(seed, v); + return seed; + } + }; + + /** Provide equality test of edge descriptors. + * + * This is needed for unordered containers and boost does not provide. + */ + struct EdgeDescriptorEqual { + const Graph& g; + explicit EdgeDescriptorEqual(const Graph& graph) : g(graph) {} + + bool operator()(const edge_descriptor& a, const edge_descriptor& b) const { + // Two edges are equal if their endpoints are equal. + // This is a robust check that is independent of the descriptor's internal value. + node_descriptor u1 = boost::source(a, g); + node_descriptor v1 = boost::target(a, g); + node_descriptor u2 = boost::source(b, g); + node_descriptor v2 = boost::target(b, g); + + return (u1 == u2 && v1 == v2) || (u1 == v2 && v1 == u2); + } + }; + + /// An unordered set of node descriptors. + using node_unordered_set = std::unordered_set; + + /// An unordered set of edge descriptors. + using edge_unordered_set = std::unordered_set; + + + /// Return a vector of node descriptors in native graph order. + /// + /// This order is based on pointer values. + /// + /// The vector may be conveniently used to iterate over nodes: + /// + /// @code{.cpp} + /// for (const auto& vd : nodes(my_graph)) { + /// std::cout << "Node index: " << my_graph[vd].index << std::endl; + /// } + /// @endcode + node_vector graph_nodes(Graph& g); + + /// Return a vector of node descriptors ordered by index. + /// + /// The vector may be conveniently used to iterate over nodes: + /// + /// @code{.cpp} + /// for (const auto& vd : ordered_nodes(my_graph)) { + /// std::cout << "Node index: " << my_graph[vd].index << std::endl; + /// } + /// @endcode + node_vector ordered_nodes(Graph& g); + + + + /** A mixin class for Vertex/Segment to manage their stored descriptor. + */ + template + class Graphed { + public: + using descriptor_type = Descriptor; + + const descriptor_type invalid_descriptor{}; + + descriptor_type get_descriptor() const { return m_descriptor; } + void set_descriptor(descriptor_type descriptor) { m_descriptor = descriptor; } + + bool descriptor_valid() const { + return m_descriptor != invalid_descriptor; + } + void invalidate_descriptor() { + m_descriptor = invalid_descriptor; + } + + private: + descriptor_type m_descriptor{}; + }; + + +} +#endif diff --git a/clus/inc/WireCellClus/PRSegment.h b/clus/inc/WireCellClus/PRSegment.h new file mode 100644 index 000000000..7dc0a3104 --- /dev/null +++ b/clus/inc/WireCellClus/PRSegment.h @@ -0,0 +1,140 @@ +#ifndef WIRECELL_CLUS_PR_SEGMENT +#define WIRECELL_CLUS_PR_SEGMENT + +#include "WireCellClus/PRCommon.h" +#include "WireCellClus/PRGraphType.h" +#include "WireCellUtil/Flagged.h" +#include "WireCellAux/ParticleInfo.h" +#include "WireCellIface/IDetectorVolumes.h" +#include + + +namespace WireCell::Clus::PR { + + /** The flags used to categorize a segment. + * + * These are used by the Segment class via its Flagged base (from util/). + * + */ + enum class SegmentFlags { + /// The segment has no particular category. + kUndefined = 0, + /// The segment is part of a shower trajectory + kShowerTrajectory = 1<<1, + /// The segment is part of a shower topology + kShowerTopology = 1<<2, + /// The segment should not have a muon check. + kAvoidMuonCheck = 1<<3, + /// The fits are provided. + kFit = 1<<4, + }; + + + /** A segment represents a connection between vertices in a larger trajectory. + * + * A segment has: + * + * - a vector of associated 3D point and corresponding "fit" information + * which includes a potentially different 3D point. + * + * - a set of possible FLAGS (see SegmentFlags and Flagged base class) + * + * - a bare pointer to a Facade::Cluster. This may be nullptr. And it can + * be invalid if the user does something to destroy the cluster while this + * object still lives. + * + * - a generic graph edge descriptor (see Graphed base class and PR::Vertex + * commentary for more information on the nature this descriptor). + * + * Caution, although the points held by the segment should be ordered + * "along" the segment, the graph edge representing the segment is NOT + * ORDERED. This means that the point in the vertex found at the `source()` + * node for the segments edge is not necessarily closest to the segment's + * first point. See `find_endpoints()` for one way to resolve this + * directional ambiguity. + * + * Note, a PR::Segment is essentially the ProtoSegment of WCP. + */ + class Segment + : public Flagged // can set flags + , public Graphed // may live in a graph + , public HasCluster // has an associated Cluster*. + , public HasDPCs // has associated DynamicPointClouds. + , public std::enable_shared_from_this // allows shared_from_this() + { + public: + + // Getters + const std::shared_ptr& particle_info() const { return m_particle_info; } + std::shared_ptr& particle_info() { return m_particle_info; } + + // Chainable setter + Segment& particle_info(std::shared_ptr pinfo) { + m_particle_info = pinfo; + return *this; + } + + // Convenience method to check if particle info is available + bool has_particle_info() const { return m_particle_info != nullptr; } + + + /// Get the const vector of fits. + const std::vector& fits() const { return m_fits; } + + /// Get the mutable vector of fits. + std::vector& fits() { return m_fits; } + + /// Get the const original points. + const std::vector& wcpts() const { return m_wcpts; } + + /// Get the mutable original points. + std::vector& wcpts() { return m_wcpts; } + + /// Get the sign +1/0/-1 (was "flag_dir" in WCT). + int dirsign() const { return m_dirsign; } + bool dir_weak() const { return m_dir_weak; } + + // Chainable setters + + /// Chainable setter of fits vector. + Segment& fits(const std::vector& f) { m_fits = f; return *this; }; + /// Chainable setter of original points vector. + Segment& wcpts(const std::vector& pts) { m_wcpts = pts; return *this; } + /// Chainable setter of dirsign. + Segment& dirsign(int dirsign) { + if (dirsign == 0) m_dirsign = 0; + else m_dirsign = dirsign > 0 ? 1 : -1; + return *this; + } + Segment& dir_weak(bool weak) { m_dir_weak = weak; return *this; } + + // reset fit properties ... + void reset_fit_prop(); + int fit_index(int i); + void fit_index(int i, int idx); + bool fit_flag_skip(int i); + void fit_flag_skip(int i, bool flag); + + void set_fit_associate_vec(std::vector& tmp_fit_pt_vec, std::vector& tmp_fit_index, std::vector& tmp_fit_skip, const IDetectorVolumes::pointer& dv,const std::string& cloud_name="fit"); + + + private: + + std::vector m_wcpts; + std::vector m_fits; + + int m_dirsign{0}; + bool m_dir_weak{false}; + + std::shared_ptr m_particle_info{nullptr}; + + + + // Still must consider adding: + // + pcloud_fit + // + pcloud_associated + // - but NOT pcloud_associated_steiner as it is never used + }; + +} +#endif diff --git a/clus/inc/WireCellClus/PRSegmentFunctions.h b/clus/inc/WireCellClus/PRSegmentFunctions.h new file mode 100644 index 000000000..631cb6a73 --- /dev/null +++ b/clus/inc/WireCellClus/PRSegmentFunctions.h @@ -0,0 +1,116 @@ +#ifndef WIRECELL_CLUS_PRSEGMENTFUNCTIONS +#define WIRECELL_CLUS_PRSEGMENTFUNCTIONS + +#include "WireCellClus/PRGraph.h" +#include "WireCellUtil/Point.h" +#include "WireCellUtil/Units.h" +#include "WireCellUtil/D4Vector.h" +#include "WireCellIface/IDetectorVolumes.h" +#include "WireCellIface/IRecombinationModel.h" +#include "WireCellClus/ParticleDataSet.h" + +namespace WireCell::Clus::PR { + + using geo_point_t = WireCell::Point; + + /// Replace the segment in the graph with two new segments that meet at a + /// new vertex nearest to the point. + /// + /// The input segment is removed from the graph. + /// + /// The point must be withing max_dist of the segment. + /// + /// Returns true if the graph was modified. + bool break_segment(Graph& graph, SegmentPtr seg, Point point, + double max_dist=1e9*units::cm); + // patter recognition + std::tuple segment_search_kink(SegmentPtr seg, WireCell::Point& start_p, const std::string& cloud_name = "fit", double dQ_dx_threshold = 43000/units::cm ); + + /// Calculate track length from segment + /// + /// If flag == 1 and segment has fitted dx values, sum the dx values from fits. + /// If flag == 0, calculate geometric length from wcpts. + /// + /// @param seg The segment to calculate length for + /// @param flag Calculation method: 0=geometric from points, 1=from fit dx values + /// @return Track length + double segment_track_length(SegmentPtr seg, int flag = 0, int n1 = -1, int n2 = -1, WireCell::Vector dir_perp = WireCell::Vector(0,0,0)); + double segment_track_direct_length(SegmentPtr seg, int n1 = -1, int n2 = -1, WireCell::Vector dir_perp = WireCell::Vector(0,0,0)); + double segment_track_max_deviation(SegmentPtr seg, int n1 = -1, int n2 = -1); + /// Calculate track length above dQ/dx threshold + /// + /// Extracts dQ and dx from segment's fits and calculates length above threshold. + /// + /// @param seg The segment containing fit data + /// @param threshold dQ/dx threshold value + /// @return Length of track segments above threshold + double segment_track_length_threshold(SegmentPtr seg, double threshold = 75000./units::cm); + /// Calculate track length from segment using geometric distance between points + /// + /// This is a convenience function that always uses geometric calculation + /// regardless of available dx data. + /// + /// @param seg The segment to calculate length for + /// @return Geometric track length + double segment_geometric_length(SegmentPtr seg, int n1 = -1, int n2 = -1, WireCell::Vector dir_perp = WireCell::Vector(0,0,0)); + + + /// Calculate median dQ/dx for a segment + /// + /// Extracts dQ and dx from segment's fits and calculates median dQ/dx. + /// + /// @param seg The segment containing fit data + /// @return Median dQ/dx value (0 if no valid fits) + double segment_median_dQ_dx(SegmentPtr seg); + double segment_rms_dQ_dx(SegmentPtr seg); + + + /// Create and associate a DynamicPointCloud with a segment from path points + /// + /// @param segment The segment to associate the DynamicPointCloud with + /// @param path_points Vector of 3D points to process + /// @param dv Detector volume for wire plane ID determination + /// @param cloud_name Name for the DynamicPointCloud (default: "main") + void create_segment_point_cloud(SegmentPtr segment, + const std::vector& path_points, + const IDetectorVolumes::pointer& dv, + const std::string& cloud_name = "main"); + + void create_segment_fit_point_cloud(SegmentPtr segment, + const IDetectorVolumes::pointer& dv, + const std::string& cloud_name = "fit"); + + std::pair segment_get_closest_point(SegmentPtr seg, const WireCell::Point& point, const std::string& cloud_name = "fit"); + std::tuple segment_get_closest_2d_distances(SegmentPtr seg, const WireCell::Point& point, int apa, int face, const std::string& cloud_name = "fit"); + + + // PID related + bool eval_ks_ratio(double ks1, double ks2, double ratio1, double ratio2); + std::vector do_track_comp(std::vector& L , std::vector& dQ_dx, double compare_range, double offset_length, const Clus::ParticleDataSet::pointer& particle_data, double MIP_dQdx = 50000/units::cm); + // success, flag_dir, pdg_code, particle_score + std::tuple segment_do_track_pid(SegmentPtr segment, std::vector& L , std::vector& dQ_dx, double compare_range , double offset_length, bool flag_force, const Clus::ParticleDataSet::pointer& particle_data, double MIP_dQdx = 50000/units::cm); + + // direction calculation ... + WireCell::Vector segment_cal_dir_3vector(SegmentPtr seg); + WireCell::Vector segment_cal_dir_3vector(SegmentPtr seg, WireCell::Point& p, double dis_cut); + WireCell::Vector segment_cal_dir_3vector(SegmentPtr seg, int direction, int num_points, int start); + void segment_determine_dir_track(SegmentPtr segment, int start_n, int end_n, const Clus::ParticleDataSet::pointer& particle_data, const IRecombinationModel::pointer& recomb_model, double MIP_dQdx = 43000/units::cm, bool flag_print = false); + + // kinemiatics calculations ... + double segment_cal_kine_dQdx(SegmentPtr seg, const IRecombinationModel::pointer& recomb_model); + double cal_kine_dQdx(std::vector& vec_dQ, std::vector& vec_dx, const IRecombinationModel::pointer& recomb_model); + double cal_kine_range(double L, int pdg_code, const Clus::ParticleDataSet::pointer& particle_data); + // 4-momentum: E, px, py, pz + WireCell::D4Vector segment_cal_4mom(SegmentPtr segment, int pdg_code, const Clus::ParticleDataSet::pointer& particle_data, const IRecombinationModel::pointer& recomb_model, double MIP_dQdx = 50000/units::cm); + + // EMshower PID + void clustering_points_segments(std::set segments, const IDetectorVolumes::pointer& dv, const std::string& cloud_name = "associate_points", double search_range = 1.2*units::cm, double scaling_2d = 0.7); + + bool segment_is_shower_trajectory(SegmentPtr seg, double step_size = 10*units::cm, double mip_dQ_dx = 50000 / units::cm); + void segment_determine_shower_direction_trajectory(SegmentPtr segment, int start_n, int end_n, const Clus::ParticleDataSet::pointer& particle_data, const IRecombinationModel::pointer& recomb_model, double MIP_dQdx = 43000/units::cm, bool flag_print = false); + + bool segment_determine_shower_direction(SegmentPtr segment, const Clus::ParticleDataSet::pointer& particle_data, const IRecombinationModel::pointer& recomb_model, const std::string& cloud_name = "associate_points", double MIP_dQdx = 43000/units::cm, double rms_cut= 0.4*units::cm); + bool segment_is_shower_topology(SegmentPtr seg, bool tmp_val=false, double MIP_dQ_dx = 43000/units::cm); +} + +#endif diff --git a/clus/inc/WireCellClus/PRShower.h b/clus/inc/WireCellClus/PRShower.h new file mode 100644 index 000000000..edaa9f524 --- /dev/null +++ b/clus/inc/WireCellClus/PRShower.h @@ -0,0 +1,116 @@ +#ifndef WIRECELL_CLUS_PR_SHOWER +#define WIRECELL_CLUS_PR_SHOWER + +#include "WireCellClus/PRCommon.h" +#include "WireCellClus/PRTrajectoryView.h" + +#include "WireCellUtil/Flagged.h" +#include "WireCellUtil/Point.h" + +namespace WireCell::Clus::PR { + + /** The "flags" that may be set on a shower. + */ + enum class ShowerFlags { + /// The segment has no particular category. + kUndefined = 0, + /// The shower flag + kShower = 1<<1, + /// The kinematics flag + kKinematics = 1<<2, + }; + + /** The data attributes of a PR::Shower + + The original WCShower has a huge number of attributes that are merely + carried by the shower. This struct factors out that data to facilitate + writing stand-alone functions. + + Anything that is not part of the core shower-as-graph-view concept gets + lumped into this ShowerData struct. + + Note, "flags" are set via Shower's Flagged base class. + + */ + struct ShowerData + { + int particle_type; + double kenergy_range; + double kenergy_dQdx; + double kenergy_charge; + double kenergy_best; + + WireCell::Point start_point; + WireCell::Point end_point; + WireCell::Vector init_dir; + + /// 1 for direct connection, 2 for indirection connection with a gap, 3 + /// for associations (not clear if this should be connected or not + int start_connection_type; + }; + + + /** Model a shower-like view of a trajectory. + + This is the WCT equivalent to a WCT WCShower. + */ + class Shower + : public TrajectoryView + , public Flagged // can set flags + , public HasDPCs // has associated DynamicPointClouds. + { + public: + + Shower(Graph& graph); + virtual ~Shower(); + + // The bag of attributes is directly exposed to user. + ShowerData data; + + + // Getters + + /** Get the vertex that is considered the start of the shower. + */ + VertexPtr start_vertex(); + + /** Get the segment that is considered the start of the shower. + */ + SegmentPtr start_segment(); + + // Chainable setters + + /** Chainable setter of start vertex. + + The vertex must already be added to the underlying graph that this + shower views. + + The vertex will be added to the view. + + The vertex will replace any existing start vertex and not remove the + prior vertex from the shower's view. Use `Shower::remove_vertex()` + to explicitly remove from the view and `PR::remove_vertex()` to + remove it from the underlying graph, if either are required. + + If the vertex lacks a valid descriptor, eg has yet to be added to + the underlying graph, this function is a no-op and the stored + start_vertex is nullified. + */ + Shower& start_vertex(VertexPtr vtx); + + /** Chainable setter of start segment. + + This has the same semantics and caveats as the chainable setter: + `start_vertex(VertexPtr)`. + */ + Shower& start_segment(SegmentPtr seg); + + private: + + VertexPtr m_start_vertex; + SegmentPtr m_start_segment; + + }; + +} +#endif diff --git a/clus/inc/WireCellClus/PRShowerFunctions.h b/clus/inc/WireCellClus/PRShowerFunctions.h new file mode 100644 index 000000000..ea5a4432c --- /dev/null +++ b/clus/inc/WireCellClus/PRShowerFunctions.h @@ -0,0 +1,18 @@ +#ifndef WIRECELL_CLUS_PR_SHOWER_FUNCTIONS +#define WIRECELL_CLUS_PR_SHOWER_FUNCTIONS + +namespace WireCell::Clus::PR { + + /** Modify shower assuming shower kinematics. + * + * This free function is is equivalent to the method of WCP's + * WCShower::calculate_kinematics(). + */ + void shower_kinematics(ShowerPtr shower); + + void update_particle_type(ShowerPtr shower); + + void longmuon_kinematics(ShowerPtr shower); +} + +#endif diff --git a/clus/inc/WireCellClus/PRTrajectory.h b/clus/inc/WireCellClus/PRTrajectory.h new file mode 100644 index 000000000..513970913 --- /dev/null +++ b/clus/inc/WireCellClus/PRTrajectory.h @@ -0,0 +1,77 @@ +/** Basic classes and functions to build and work with trajectory graphs + */ +#ifndef WIRECELL_CLUS_PR_TRAJECTORY +#define WIRECELL_CLUS_PR_TRAJECTORY + +#include "WireCellClus/PRTrajectoryView.h" + +namespace WireCell::Clus::PR { + + class TrajectoryView; + using TrajectoryViewPtr = std::shared_ptr; + + /** Manage a trajectory graph + + A PR::Trajectory provides a collection of PR::TrajectoryView which help + to construct and query a user's trajectory graphs. + + A trajectory graph is owned by the user and is borrowed by reference by + the Trajectory (and TrajectoryView). + + A PR::Trajectory (and views) provide a mechanism to associate additional + methods and state to trajectory graph (or subgraph). + + Application code may create one or more PR::Trajectory (or view) + instances on the user's graph. An application class is recommended to + keep a base Trajectory as a data member through the application class + may extend PR::Trajectory (and PR::TrajectoryView) through inheritance. + + */ + class Trajectory { + + public: + + Trajectory(Graph& graph); + + /** Make and store a trajectory view of a given type. + * + * See PR::TrajectoryView. + */ + template + std::shared_ptr make_view() = { + auto ptr = PR::make_view(m_graph); + m_views.push_back(ptr); + return ptr; + }; + + using ViewVector = std::vector; + + /** Access all known views */ + const ViewVector& views() const { return m_views; } + + /** Number of views */ + size_t size() const { return m_views.size(); } + + /** Get an existing view as a particular type. + * + * Unlike STL at(), no exception is thrown and instead a nullptr is + * returned on out-of-bounds index or type mismatch error. + */ + template + std::shared_ptr at(size_t index) = { + if (index >= m_views.size()) return nullptr; + auto base = m_views.at(index); + return dynamic_pointer_cast(base); + } + + + private: + Graph& m_graph; + + std::vector m_views; + }; + +} + +#endif + diff --git a/clus/inc/WireCellClus/PRTrajectoryView.h b/clus/inc/WireCellClus/PRTrajectoryView.h new file mode 100644 index 000000000..5d337eda9 --- /dev/null +++ b/clus/inc/WireCellClus/PRTrajectoryView.h @@ -0,0 +1,123 @@ +/** Classes and functions related to a "view" of a trajectory graph. + * + */ + +#ifndef WIRECELL_CLUS_PR_TRAJECTORYVIEW +#define WIRECELL_CLUS_PR_TRAJECTORYVIEW + +#include "WireCellClus/PRGraphType.h" + +#include + + +namespace WireCell::Clus::PR { + + // Forward declare. + class TrajectoryView; + + // Internal, used to select nodes in a view + struct TrajectoryViewNodePredicate { + const TrajectoryView& view; + TrajectoryViewNodePredicate(const TrajectoryView& v) : view(v) {}; + bool operator()(const node_descriptor& desc) const; + }; + struct TrajectoryViewEdgePredicate { + const TrajectoryView& view; + TrajectoryViewEdgePredicate(const TrajectoryView& v) : view(v) {}; + bool operator()(const edge_descriptor& desc) const; + }; + + + /** Provide a view of a trajectory graph. + * + * A PR::TrajectoryView manages a filtered (not necessarily induced) + * subgraph on the given trajectory graph. The TrajectoryView is + * essentially a wrapper over `boost::filtered_graph` with an API that is in + * terms of PR::Vertex and PR::segment instead of bare descriptors. + * + * A PR::TrajectoryView is given and holds a borrowed reference to a user's + * graph instance and provides methods to manage a subset of the graph's + * nodes and edges that are considered "in view". + * + * It is recommended that the base PR::TrajectoryView be used by application + * code. However, it is allowed to extend PR::TrajectoryView via inheritance. + * + * A PR::TrajectoryView may be created in isolation. See + * `make_trajectoryview()` to produce one as a shared pointer and + * PR::Trajectory for a convenient way to construct and maintain a + * collection of (potentially heterotypical) TrajectoryView instances. + */ + class TrajectoryView { + public: + + using full_graph_type = Graph; + + using view_graph_type = boost::filtered_graph; + using view_node_descriptor = boost::graph_traits::vertex_descriptor; + using view_edge_descriptor = boost::graph_traits::edge_descriptor; + + TrajectoryView() = delete; + virtual ~TrajectoryView(); + + /** Construct a view on a borrowed reference a trajectory graph. + * + * Caller must assure graph object lifetime exceeds that of the view. + */ + TrajectoryView(full_graph_type& graph); + + /** Access to the underlying filtered graph. */ + const view_graph_type& view_graph() const; + + /// Return true if node descriptor is in the filter. + bool has_node(node_descriptor desc) const; + + /// Return true if edge descriptor is in the filter. + bool has_edge(edge_descriptor desc) const; + + /** Add the vertex to the filter. + + To be added to the filter the vertex must be already in the + underlying graph as determined by it having a valid descriptor. + + Return true if view is modified. + */ + bool add_vertex(VertexPtr vtx); + + /** Add the segment to the filter. + + To be added to the filter the segment must be already in the + underlying graph as determined by it having a valid descriptor. + + Return true if view is modified. + */ + bool add_segment(SegmentPtr seg); + + /** Remove the vertex from the filter. + + Return true if view is modified. + */ + bool remove_vertex(VertexPtr vtx); + + /** Remove the segment from the filter. + + Return true if view is modified. + */ + bool remove_segment(SegmentPtr seg); + + private: + view_graph_type m_graph; + node_unordered_set m_nodes; + edge_unordered_set m_edges; + }; + + /** Make a view of a particular type as a shared pointer. + * + */ + template + std::shared_ptr make_trajectoryview(Graph& graph, Args&&... args) { + return std::shared_ptr(graph, std::forward(args)...); + }; + +} + +#endif diff --git a/clus/inc/WireCellClus/PRVertex.h b/clus/inc/WireCellClus/PRVertex.h new file mode 100644 index 000000000..ffb2e2794 --- /dev/null +++ b/clus/inc/WireCellClus/PRVertex.h @@ -0,0 +1,102 @@ +#ifndef WIRECELL_CLUS_PR_VERTEX +#define WIRECELL_CLUS_PR_VERTEX + +#include "WireCellClus/PRCommon.h" +#include "WireCellUtil/Flagged.h" +#include "WireCellClus/PRGraphType.h" + +namespace WireCell::Clus::Facade { + class Cluster; +} + +namespace WireCell::Clus::PR { + + /** The flags used to categorize a Vertex + * + * These are used in Vertex via the "Flagged" base class (from util). + */ + enum class VertexFlags { + /// The vertex has no particular category + kUndefined = 0, + /// The vertex is determined to location of neutrino interaction. + kNeutrinoVertex = 1<<1, + }; + + /** A PR::Vertex instance represents a connection with one or more PR::Segment intances. + + A PR::Vertex has: + + - an associated 3D point as well as scalar "fit" information which + includes a potentially different 3D point. + + - a set of possible FLAGS (see VertexFlags and Flagged base class) + + - a bare pointer to a Facade::Cluster. This may be nullptr. And it can + be invalid if the user does something to destroy the cluster while + this object still lives. + + - an optional graph node descriptor (can be "graph_type::null_vertex()") + + A PR::Vertex must be constructed through `make_vertex()`. + + Note, a PR::Vertex is analogous to the ProtoVertex of WCP. + */ + class Vertex + : public Flagged // can set flags + , public Graphed // may live in a graph + , public HasCluster // has an associated Cluster*. + { + public: + + /// Getters + + /// The "initial" point information. + const WCPoint& wcpt() const { return m_wcpt; } + WCPoint& wcpt() { return m_wcpt; } + + /// Information about this vertex provided by some "fit". + const Fit& fit() const { return m_fit; } + Fit& fit() { return m_fit; } + + + + /// Chainable setters + Vertex& wcpt(const WCPoint& wcpt) { m_wcpt = wcpt; return *this; } + Vertex& fit(const Fit& fit) { m_fit = fit; return *this; } + + // index and range ... + void fit_index(int idx) { m_fit.index = idx; } + int fit_index() const { return m_fit.index; } + void fit_range(double range) { m_fit.range = range; } + double fit_range() const { return m_fit.range; } + void flag_fix(bool flag){m_fit.flag_fix = flag;} + bool flag_fix() const {return m_fit.flag_fix; }; + + // + void reset_fit_prop(){ + // Clear the kFitFix flag using keep_flags with inverted mask + // this->keep_flags(static_cast(~static_cast(VertexFlags::kFitFix))); + m_fit.reset(); + } + + // Distance from "initial point" to fit point. + double fit_distance() { + return m_fit.distance(m_wcpt.point); + }; + + double get_dis(WireCell::Point point){ + return m_fit.distance(point); + }; + + private: + + WCPoint m_wcpt; // "initial" point. + Fit m_fit; + + + }; + + /// See PRGraph.h for related functions. +} +#endif + diff --git a/clus/inc/WireCellClus/ParticleDataSet.h b/clus/inc/WireCellClus/ParticleDataSet.h new file mode 100644 index 000000000..580ab3f13 --- /dev/null +++ b/clus/inc/WireCellClus/ParticleDataSet.h @@ -0,0 +1,40 @@ +#ifndef WIRECELLCLUS_PARTICLEDATASET +#define WIRECELLCLUS_PARTICLEDATASET + +#include "WireCellIface/IConfigurable.h" +#include "WireCellIface/IScalarFunction.h" +#include "WireCellUtil/NamedFactory.h" +#include +#include + +namespace WireCell { + namespace Clus { + + class ParticleDataSet : public IConfigurable { + public: + typedef std::shared_ptr pointer; + + ParticleDataSet(); + virtual ~ParticleDataSet(); + + virtual void configure(const WireCell::Configuration& config); + virtual WireCell::Configuration default_configuration() const; + + // Access functions + IScalarFunction::pointer get_dEdx_function(const std::string& particle) const; + IScalarFunction::pointer get_range_function(const std::string& particle) const; + double get_particle_mass(int pdg_code) const; + std::string pdg_to_name(int pdg_code) const; + + // Get available particles + std::vector get_particles() const; + + private: + std::map m_dedx_functions; + std::map m_range_functions; + }; + + } +} + +#endif \ No newline at end of file diff --git a/clus/inc/WireCellClus/PointTreeBuilding.h b/clus/inc/WireCellClus/PointTreeBuilding.h index 7c563c0a9..a30fc3107 100644 --- a/clus/inc/WireCellClus/PointTreeBuilding.h +++ b/clus/inc/WireCellClus/PointTreeBuilding.h @@ -8,6 +8,7 @@ #include "WireCellIface/IBlobSampler.h" #include "WireCellIface/IConfigurable.h" #include "WireCellIface/IAnodePlane.h" +#include "WireCellIface/IDetectorVolumes.h" #include "WireCellAux/Logger.h" #include "WireCellUtil/PointTree.h" #include "WireCellUtil/Units.h" @@ -34,34 +35,41 @@ namespace WireCell::Clus { private: // sampling for live/dead using node_ptr = WireCell::PointCloud::Tree::Points::node_ptr; - node_ptr sample_live(const WireCell::ICluster::pointer cluster, const double tick, const double angle_u, const double angle_v, const double angle_w) const; + node_ptr sample_live(const WireCell::ICluster::pointer cluster, const double tick, const std::vector& angles) const; node_ptr sample_dead(const WireCell::ICluster::pointer cluster, const double tick) const; // add CT point cloud to the root/Grouping void add_ctpc(node_ptr& root, const WireCell::ICluster::pointer cluster) const; // wind -> xbeg, xend void add_dead_winds(node_ptr& root, const WireCell::ICluster::pointer cluster) const; + double get_time_offset(const WirePlaneId& wpid) const; + double get_drift_speed(const WirePlaneId& wpid) const; + double get_tick(const WirePlaneId& wpid) const; + size_t m_multiplicity {2}; std::vector m_tags; size_t m_count{0}; + // cache ... + mutable std::map cache_map_tick; + mutable std::map cache_map_drift_speed; + mutable std::map cache_map_time_offset; // double m_tick {0.5*units::us}; // double m_drift_speed {1.101*units::millimeter/units::us}; // double m_time_offset {-1600 * units::us}; double m_dead_threshold {1e10}; - // double m_angle_u {1.0472}; // 60 degrees - // double m_angle_v {-1.0472}; // -60 degrees - // double m_angle_w {0}; // 0 degrees // the anode to be processed IAnodePlane::pointer m_anode; + IDetectorVolumes::pointer m_dv; + // the face to be processed int m_face{0}; // the geometry helper - IClusGeomHelper::pointer m_geomhelper; + // IClusGeomHelper::pointer m_geomhelper; /** Configuration: "samplers" diff --git a/clus/inc/WireCellClus/PointTreeMerging.h b/clus/inc/WireCellClus/PointTreeMerging.h new file mode 100644 index 000000000..d5435aa4c --- /dev/null +++ b/clus/inc/WireCellClus/PointTreeMerging.h @@ -0,0 +1,62 @@ +#ifndef WIRECELL_CLUS_POINTTREEMERGING +#define WIRECELL_CLUS_POINTTREEMERGING + + +#include "WireCellAux/Logger.h" +#include "WireCellIface/ITensorSetFanin.h" +#include "WireCellIface/IConfigurable.h" +#include "WireCellIface/ITerminal.h" + +namespace WireCell::Clus { + + class PointTreeMerging + : public Aux::Logger, public ITensorSetFanin, public IConfigurable, public ITerminal + { + public: + PointTreeMerging(); + virtual ~PointTreeMerging() = default; + + virtual void configure(const WireCell::Configuration& cfg); + virtual WireCell::Configuration default_configuration() const; + + // INode, override because we get multiplicity at run time. + virtual std::vector input_types(); + + // ITensorSetFanin, a PCTree for each input ITensorSet + virtual bool operator()(const input_vector& invec, output_pointer& out); + + virtual void finalize(); + + private: + + /** Config: "inpath" + * + * The datapath for the input point graph data. This may be a + * regular expression which will be applied in a first-match + * basis against the input tensor datapaths. If the matched + * tensor is a pcdataset it is interpreted as providing the + * nodes dataset. Otherwise the matched tensor must be a + * pcgraph. + */ + std::string m_inpath{".*"}; + + /** Config: "outpath" + * + * The datapath for the resulting pcdataset. A "%d" will be + * interpolated with the ident number of the input tensor set. + */ + std::string m_outpath{""}; + + /** Config: "perf" + * + * If true, emit time/memory performance measures. Default is false. + */ + //bool m_perf{true}; + + // Count how many times we are called + size_t m_count{0}; + size_t m_multiplicity{0}; + }; +} + +#endif diff --git a/clus/inc/WireCellClus/TrackFitting.h b/clus/inc/WireCellClus/TrackFitting.h new file mode 100644 index 000000000..8c1b6a932 --- /dev/null +++ b/clus/inc/WireCellClus/TrackFitting.h @@ -0,0 +1,530 @@ +#ifndef WIRECELLCLUS_TRACKFITTING_H +#define WIRECELLCLUS_TRACKFITTING_H + +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellUtil/Logging.h" +#include "WireCellClus/PRGraph.h" + +#include + + +namespace WireCell::Clus { + + /** + * Dedicated TrackFitting class that can be instantiated and used by + * other ensemble visitors without needing to be configured as a component. + * + * This class encapsulates track fitting algorithms that can work on + * individual clusters or collections of clusters. + */ + class TrackFitting { + public: + + enum class FittingType { + Single, + Multiple + }; + + /** + * Structure to hold all track fitting parameters in one place + */ + struct Parameters { + // Diffusion coefficients (LArTPC standard values) + double DL = 6.4* pow(units::cm,2)/units::second; // m²/s, longitudinal diffusion + double DT = 9.8* pow(units::cm,2)/units::second; // m²/s, transverse diffusion + + // Software filter effects (wire dimension broadening) + double col_sigma_w_T = 0.188060 * 3*units::mm * 0.2; // Collection plane, units: wire pitch + double ind_sigma_u_T = 0.402993 * 3*units::mm * 0.3; // U induction plane + double ind_sigma_v_T = 0.402993 * 3*units::mm * 0.5; // V induction plane + + // Uncertainty parameters + double rel_uncer_ind = 0.075; // Relative uncertainty for induction planes + double rel_uncer_col = 0.05; // Relative uncertainty for collection plane + double add_uncer_ind = 0.0; // Additional uncertainty for induction + double add_uncer_col = 300.0; // Additional uncertainty for collection + + // Longitudinal filter effects (time dimension) + double add_sigma_L = 1.428249 *0.5505*units::mm /0.5; + + // Additional useful parameters for charge err estimation ... + double rel_charge_uncer = 0.1; // 10% + double add_charge_uncer = 600; // electrons + + double default_charge_th = 100; + double default_charge_err = 1000; + + double scaling_quality_th = 0.5; + double scaling_ratio = 0.05; + + double area_ratio1 = 1.8*units::mm; + double area_ratio2 = 1.7; + + double skip_default_ratio_1 = 0.25; + double skip_ratio_cut = 0.97; + double skip_ratio_1_cut = 0.75; + + double skip_angle_cut_1 = 160; + double skip_angle_cut_2 = 90; + double skip_angle_cut_3 = 45; + double skip_dis_cut = 0.5*units::cm; + + double default_dQ_dx = 5000; + + double end_point_factor=0.6; + double mid_point_factor=0.9; + int nlevel=3; + double charge_cut=2000; + + double low_dis_limit = 1.2*units::cm; // cm, lower distance limit for point organization + double end_point_limit = 0.6*units::cm; // cm, extension distance for end points + double time_tick_cut = 20; // // tick cut for point association + + // addition parameters + double share_charge_err = 8000; + double min_drift_time = 50*units::us; + double search_range = 10; // wires, or time slices (not ticks) + + double dead_ind_weight = 0.3; + double dead_col_weight = 0.9; + double close_ind_weight = 0.15; + double close_col_weight = 0.45; + double overlap_th = 0.5; + double dx_norm_length = 0.6*units::cm; + double lambda= 0.0005; + + double div_sigma = 0.6*units::cm; + }; + + /** + * Constructor + * @param fitting_type The type of fitting to perform (single or multiple tracks) + */ + explicit TrackFitting(FittingType fitting_type = FittingType::Single); + virtual ~TrackFitting() = default; + + /** + * Set the fitting type + * @param fitting_type The new fitting type to use + */ + void set_fitting_type(FittingType fitting_type) { m_fitting_type = fitting_type; } + + /** + * Get the current fitting type + * @return The current fitting type + */ + FittingType get_fitting_type() const { return m_fitting_type; } + + // Parameter management methods + + /** + * Get read-only access to current parameters + */ + const Parameters& get_parameters() const { return m_params; } + + /** + * Set new parameters (replaces all current parameters) + */ + void set_parameters(const Parameters& params) { m_params = params; } + + /** + * Set specific parameter by name + */ + void set_parameter(const std::string& name, double value); + + /** + * Get specific parameter by name + */ + double get_parameter(const std::string& name) const; + + // single track fitting utilizes the segments ... + void add_segment(std::shared_ptr segment); + /** + * Get the set of segments currently stored in this TrackFitting instance. + * @return Set of shared pointers to PR::Segment + */ + std::set> get_segments() const { return m_segments; } + void clear_segments(); + + // multi-track fitting utilized the Graph ... + void add_graph(std::shared_ptr graph); + std::shared_ptr get_graph() const { return m_graph; } + void clear_graph(); + + + + // collect charge + void prepare_data(); + + // Fill the global readout map + void fill_global_rb_map(); + + /** + * Organize original path from segment points with distance limits + * @param segment Pointer to PR::Segment containing the path points + * @param low_dis_limit Lower distance limit for point organization + * @param end_point_limit Extension distance for end points + * @return Vector of organized 3D points + */ + std::vector organize_orig_path(std::shared_ptr segment, double low_dis_limit=1.2*units::cm, double end_point_limit=0.6*units::cm); + + std::vector examine_end_ps_vec(std::shared_ptr segment, const std::vector& pts, bool flag_start, bool flag_end); + + void organize_ps_path(std::shared_ptr segment, std::vector& pts, double low_dis_limit, double end_point_limit); + + // use the m_graph to organize ... + void organize_segments_path(double low_dis_limit, double end_point_limit); + // use m_graph, after first round of fitting + void organize_segments_path_2nd(double low_dis_limit, double end_point_limit); + // use m_graph, after second round of fitting + void organize_segments_path_3rd(double step_size); + + private: + // Helper functions for organize_segments_path methods + + /** + * Check and reset vertices that are too close together + * @param edge_range The range of edges (segments) to check + */ + void check_and_reset_close_vertices(); + + /** + * Get segment vertices in correct order (start, end) + * @param segment The segment to process + * @param ed The edge descriptor + * @param start_v Output: pointer to start vertex + * @param end_v Output: pointer to end vertex + * @param vd1 Output: descriptor for vertex 1 + * @param vd2 Output: descriptor for vertex 2 + * @return true if successful, false otherwise + */ + bool get_ordered_segment_vertices( + std::shared_ptr segment, + const PR::edge_descriptor& ed, + std::shared_ptr& start_v, + std::shared_ptr& end_v, + PR::node_descriptor& vd1, + PR::node_descriptor& vd2 + ); + + /** + * Generate 2D projections and create fit vector from 3D points + * @param segment The segment for which to generate fits + * @param pts The 3D points + * @return Vector of Fit objects with 3D points and 2D projections + */ + std::vector generate_fits_with_projections( + std::shared_ptr segment, + const std::vector& pts + ); + + public: + + + + /// Internal coordinate (can be more complex) + struct Coord2D { + int apa, face, time, wire, channel; + WirePlaneLayer_t plane; // Additional internal information + + Coord2D(int a, int f, int t, int w, int c, WirePlaneLayer_t p) + : apa(a), face(f), time(t), wire(w), channel(c), plane(p) {} + + bool operator<(const Coord2D& other) const { + if (apa != other.apa) return apa < other.apa; + if (face != other.face) return face < other.face; + if (time != other.time) return time < other.time; + if (wire != other.wire) return wire < other.wire; + if (channel != other.channel) return channel < other.channel; + return plane < other.plane; + } + }; + + /// Per-plane data for 3D points (exactly matches prototype) + struct PlaneData { + std::set associated_2d_points; + double quantity; + + PlaneData() : quantity(0.0) {} + }; + + /// 3D point with per-plane associations (corrected structure) + struct Point3DInfo { + std::map plane_data; + + const PlaneData& get_plane_data(WirePlaneLayer_t plane) const { + static PlaneData empty; + auto it = plane_data.find(plane); + return (it != plane_data.end()) ? it->second : empty; + } + + void set_plane_data(WirePlaneLayer_t plane, const PlaneData& data) { + plane_data[plane] = data; + } + }; + + struct CoordReadout { + int apa, time, channel; + + CoordReadout(int a, int t, int c) + : apa(a), time(t), channel(c) {} + + bool operator<(const CoordReadout& other) const { + if (apa != other.apa) return apa < other.apa; + if (time != other.time) return time < other.time; + return channel < other.channel; + } + }; + + + /// Simple charge measurement (in ternal interface) + struct ChargeMeasurement { + double charge, charge_err; + int flag; + + ChargeMeasurement(double q = 0.0, double qe = 0.0, int f = 0) + : charge(q), charge_err(qe), flag(f) {} + }; + + + + // point associations + void form_point_association(std::shared_ptr segment, WireCell::Point &p, PlaneData& temp_2dut, PlaneData& temp_2dvt, PlaneData& temp_2dwt, double dis_cut, int nlevel, double time_tick_cut ); + + void examine_point_association(std::shared_ptr segment, WireCell::Point &p, PlaneData& temp_2dut, PlaneData& temp_2dvt, PlaneData& temp_2dwt, bool flag_end_point = false, double charge_cut = 2000); + void update_association(std::shared_ptr segment, PlaneData& temp_2dut, PlaneData& temp_2dvt, PlaneData& temp_2dwt); + + void form_map(std::vector>>& ptss, double end_point_factor=0.6, double mid_point_factor=0.9, int nlevel=3, double time_tick_cut=20, double charge_cut=2000); + void form_map_graph(bool flag_exclusion, double end_point_factor=0.6, double mid_point_factor=0.9, int nlevel=3, double time_tick_cut=20, double charge_cut=2000); + + // track trajectory fitting // should fit all APA ... + void trajectory_fit(std::vector>>& pss_vec, int charge_div_method = 1, double div_sigma = 0.6*units::cm); + WireCell::Point fit_point(WireCell::Point& init_p, int i, std::shared_ptr segment,std::map, std::map, double>>& map_Udiv_fac, std::map, std::map, double>>& map_Vdiv_fac, std::map, std::map, double>>& map_Wdiv_fac, double offset_t, double slope_x, double offset_u, double slope_yu, double slope_zu, double offset_v, double slope_yv, double slope_zv, double offset_w, double slope_yw, double slope_zw); + void multi_trajectory_fit(int charge_div_method = 1, double div_sigma = 0.6*units::cm); + + // examine trajectory ... + std::vector examine_segment_trajectory(std::shared_ptr segment, std::vector& final_ps_vec, std::vector& init_ps_vec); + bool skip_trajectory_point(WireCell::Point& p, std::pair& apa_face, int i, std::vector>>& pss_vec, std::vector>>& fine_tracking_path); + + // prepare for dQ/dx fitting + double cal_gaus_integral(int tbin, int wbin, double t_center, double t_sigma, + double w_center, double w_sigma, int flag, double nsigma, int cur_ntime_ticks); + + double cal_gaus_integral_seg(int tbin, int wbin, std::vector& t_centers, std::vector& t_sigmas, std::vector& w_centers, std::vector& w_sigmas, std::vector& weights, int flag, double nsigma, int cur_ntime_ticks); + + void update_dQ_dx_data(); + void recover_original_charge_data(); + + /** + * Calculate compact matrix analysis for wire plane sharing + * + * This function analyzes the sharing patterns between 2D measurements and 3D positions + * to compute overlap ratios and adjust weight matrix coefficients. It processes sparse + * matrices representing the relationship between 2D wire measurements and 3D positions. + * + * @param weight_matrix Reference to sparse weight matrix (MW, MV, or MU) to be modified + * @param response_matrix_transpose Transposed response matrix (RWT, RVT, or RUT) + * @param n_2d_measurements Number of 2D measurements (wire/time points) + * @param n_3d_positions Number of 3D positions + * @param cut_position Threshold for wire sharing cut (default 2.0) + * @return Vector of pairs containing overlap ratios for each 3D position + * Each pair contains (previous_neighbor_ratio, next_neighbor_ratio) + */ + std::vector> calculate_compact_matrix(Eigen::SparseMatrix& weight_matrix, const Eigen::SparseMatrix& response_matrix_transpose, int n_2d_measurements, int n_3d_positions, double cut_position = 2.0); + std::vector> calculate_compact_matrix_multi(std::vector >& connected_vec,Eigen::SparseMatrix& weight_matrix, const Eigen::SparseMatrix& response_matrix_transpose, int n_2d_measurements, int n_3d_positions, double cut_position = 2.0); + + void dQ_dx_fill(double dis_end_point_ext=0.45*units::cm); + + void dQ_dx_fit(double dis_end_point_ext=0.45*units::cm, bool flag_dQ_dx_fit_reg=true); + void dQ_dx_multi_fit(double dis_end_point_ext=0.45*units::cm, bool flag_dQ_dx_fit_reg=true); + + void do_single_tracking(std::shared_ptr segment, bool flag_dQ_dx_fit_reg= true, bool flag_dQ_dx_fit= true, bool flag_force_load_data = false, bool flag_hack = false); + void do_multi_tracking(bool flag_dQ_dx_fit_reg= true, bool flag_dQ_dx_fit= true, bool flag_force_load_data = false, bool flag_exclusion =false, bool flag_hack = false); + + + + + /** + * Get anode for a specific APA identifier + * @param apa_ident APA identifier (typically same as APA number) + * @return Pointer to IAnodePlane, or nullptr if not found + */ + IAnodePlane::pointer get_anode(int apa_ident = 0) const; + + /** + * Get all available anodes from the grouping + * @return Map of APA identifier to anode pointer + */ + std::map get_all_anodes() const; + + /** + * Get channel number for a specific wire location + * Uses hybrid caching for optimal performance + * @param apa APA number + * @param face Face number (0 or 1) + * @param plane Plane index (0=U, 1=V, 2=W typically) + * @param wire Wire index within the plane + * @return Channel number, or -1 if invalid + */ + int get_channel_for_wire(int apa, int face, int plane, int wire) const; + + /** + * Get all wires that belong to a specific channel + * @param apa APA number + * @param channel_number Channel identifier + * @return Vector of wire information (face, plane, wire_index) + */ + std::vector> get_wires_for_channel(int apa, int channel_number) const; + + /** + * Clear all caches (useful for memory management) + */ + void clear_cache() const; + + /** + * Get cache statistics for monitoring/debugging + */ + struct CacheStats { + size_t hot_planes_count; + size_t cold_entries_count; + size_t total_lookups; + size_t hot_hits; + size_t cold_hits; + double hit_rate() const { + return total_lookups > 0 ? (double)(hot_hits + cold_hits) / total_lookups : 0.0; + } + }; + CacheStats get_cache_stats() const; + + /** + * Set the detector volume for this TrackFitting instance + * @param dv Pointer to IDetectorVolumes + */ + void set_detector_volume(IDetectorVolumes::pointer dv) { m_dv = dv; } + + /** + * Set the PCTransformSet for coordinate transformations + * @param pcts Pointer to PCTransformSet interface + */ + void set_pc_transforms(IPCTransformSet::pointer pcts) { m_pcts = pcts; } + + /** + * Get the current detector volumes + * @return Pointer to detector volumes interface + */ + IDetectorVolumes::pointer get_detector_volume() const { return m_dv; } + + /** + * Get the current PCTransformSet + * @return Pointer to PCTransformSet interface + */ + IPCTransformSet::pointer get_pc_transforms() const { return m_pcts; } + + std::vector>> get_fine_tracking_path() const { return fine_tracking_path; } + std::vector get_dQ() const { return dQ; } + std::vector get_dx() const { return dx; } + std::vector get_pu() const { return pu; } + std::vector get_pv() const { return pv; } + std::vector get_pw() const { return pw; } + std::vector get_pt() const { return pt; } + std::vector> get_paf() const {return paf;} + std::vector get_reduced_chi2() const { return reduced_chi2; } + + private: + // Core parameters - centralized storage + Parameters m_params; + + // Helper method to get parameter value or default + double get_param_or_default(double param_value, double default_value) const { + return (param_value < 0) ? default_value : param_value; + } + + FittingType m_fitting_type; + IDetectorVolumes::pointer m_dv{nullptr}; + IPCTransformSet::pointer m_pcts{nullptr}; // PC Transform Set + + // cluster and grouping, CTPC is from m_grouping ... + Facade::Grouping* m_grouping{nullptr}; + std::set m_clusters; + + std::set m_blobs; + + // input segment + std::set > m_segments; + + // input graph + std::shared_ptr m_graph{nullptr}; + + // ===================================================================== + // HYBRID CACHE IMPLEMENTATION + // ===================================================================== + + // Key types for caching + using PlaneKey = std::tuple; // (apa, face, plane) + using WireKey = std::tuple; // (apa, face, plane, wire) + + // Hot cache: frequently accessed plane mappings (full plane cached) + mutable std::map> m_hot_cache; + + // Cold cache: individual wire lookups + mutable std::map m_cold_cache; + + // Access frequency tracking + mutable std::map m_access_count; + + // Cache statistics + mutable CacheStats m_cache_stats = {0, 0, 0, 0, 0}; + + // Configuration + static constexpr int HOT_THRESHOLD = 50; // Access count to promote to hot cache + + // Helper methods + void cache_entire_plane(int apa, int face, int plane) const; + int fetch_channel_from_anode(int apa, int face, int plane, int wire) const; + + + // ---------------------------------------- + // Internal Storage + // ---------------------------------------- + std::map m_charge_data; ///< Internal charge data storage using ChargeMeasurement struct + std::map m_orig_charge_data; // saved original charge measurement, if modified + + std::map> m_2d_to_3d; ///< Internal 2D→3D mapping + std::map m_3d_to_2d; ///< Internal 3D→2D mapping + + // Global (apa, time, channel) to blobs + std::map > global_rb_map; + + // global geometry + + void BuildGeometry(); + + std::map> wpid_params; + std::map > wpid_U_dir; + std::map > wpid_V_dir; + std::map > wpid_W_dir; + std::set apas; + + // Time_width, Pitch_u, pitch_v, pitch_w, for each apa/face + std::map> wpid_geoms; + + // geometry information T, U, V, W for each apa/face + std::map> wpid_offsets; + // T, slope_yu slope_zu, slope_yv slope_zv, slope_yw slope_zw + std::map, std::pair, std::pair >> wpid_slopes; + + // result + std::vector>> fine_tracking_path; + std::vector dQ; + std::vector dx; + std::vector pu; + std::vector pv; + std::vector pw; + std::vector pt; + std::vector> paf; + std::vector reduced_chi2; + }; + +} // namespace WireCell::Clus + +#endif // WIRECELLCLUS_TRACKFITTING_H \ No newline at end of file diff --git a/clus/inc/WireCellClus/TrackFittingPresets.h b/clus/inc/WireCellClus/TrackFittingPresets.h new file mode 100644 index 000000000..a22380fdd --- /dev/null +++ b/clus/inc/WireCellClus/TrackFittingPresets.h @@ -0,0 +1,101 @@ +// TrackFittingPresets.h - Create this as a new header file + +#ifndef WIRECELLCLUS_TRACKFITTING_PRESETS_H +#define WIRECELLCLUS_TRACKFITTING_PRESETS_H + +#include "WireCellClus/TrackFitting.h" + +namespace WireCell::Clus { + + /** + * Factory class for creating TrackFitting instances with preset configurations + */ + class TrackFittingPresets { + public: + + /** + * Create TrackFitting with your current hard-coded values + * This gives you exactly the same behavior as before, but centralized + */ + static TrackFitting create_with_current_values() { + TrackFitting fitter; + + TrackFitting::Parameters params; + + // Set to exactly your current hard-coded values + params.DL = 6.4* pow(units::cm,2)/units::second; // m²/s, longitudinal diffusion + params.DT = 9.8* pow(units::cm,2)/units::second; // m²/s, transverse diffusion + params.col_sigma_w_T = 0.188060 * 3*units::mm * 0.2; // Collection plane + params.ind_sigma_u_T = 0.402993 * 3*units::mm * 0.3; // U induction plane + params.ind_sigma_v_T = 0.402993 * 3*units::mm * 0.5; // V induction plane + params.rel_uncer_ind = 0.075; // Relative uncertainty for induction + params.rel_uncer_col = 0.05; // Relative uncertainty for collection + params.add_uncer_ind = 0.0; // Additional uncertainty for induction + params.add_uncer_col = 300.0; // Additional uncertainty for collection + params.add_sigma_L = 1.428249 *0.5505*units::mm/ 0.5; // Longitudinal filter effects + + // Additional useful parameters for charge err estimation ... + params.rel_charge_uncer = 0.1; // 10% + params.add_charge_uncer = 600; // electrons + + params.default_charge_th = 100; + params.default_charge_err = 1000; + + params.scaling_quality_th = 0.5; + params.scaling_ratio = 0.05; + + params.area_ratio1 = 1.8*units::mm; + params.area_ratio2 = 1.7; + + params.skip_default_ratio_1 = 0.25; + params.skip_ratio_cut = 0.97; + params.skip_ratio_1_cut = 0.75; + + params.skip_angle_cut_1 = 160; + params.skip_angle_cut_2 = 90; + params.skip_angle_cut_3 = 45; + params.skip_dis_cut = 0.5*units::cm; + + params.default_dQ_dx = 5000; // electrons + + params.end_point_factor = 0.6; + params.mid_point_factor = 0.9; + params.nlevel = 3; + params.charge_cut = 2000; + + // Distance parameters (add these if you use them in your methods) + params.low_dis_limit = 1.2*units::cm; // cm + params.end_point_limit = 0.6*units::cm; // cm + params.time_tick_cut = 20; // time tick + + // addition parameters + params.share_charge_err = 8000; + params.min_drift_time = 50*units::us; + params.search_range = 10; // wires, or time slices (not ticks) + + params.dead_ind_weight = 0.3; + params.dead_col_weight = 0.9; + params.close_ind_weight = 0.15; + params.close_col_weight = 0.45; + params.overlap_th = 0.5; + params.dx_norm_length = 0.6*units::cm; + params.lambda = 0.0005; + + params.div_sigma = 0.6*units::cm; + + fitter.set_parameters(params); + return fitter; + } + + /** + * Create TrackFitting with MicroBooNE-like parameters + */ + static TrackFitting create_microboone() { + return create_with_current_values(); // Same as your current values for now + } + + }; + +} // namespace WireCell::Clus + +#endif // WIRECELLCLUS_TRACKFITTING_PRESETS_H \ No newline at end of file diff --git a/clus/inc/WireCellClus/TrackFitting_Util.h b/clus/inc/WireCellClus/TrackFitting_Util.h new file mode 100644 index 000000000..f62c2c3a4 --- /dev/null +++ b/clus/inc/WireCellClus/TrackFitting_Util.h @@ -0,0 +1,35 @@ +#ifndef WIRECELLCLUS_TRACKFITTING_UTIL_H +#define WIRECELLCLUS_TRACKFITTING_UTIL_H + +namespace WireCell::Clus::TrackFittingUtil { + + /** Calculate ranges for track fitting using simplified coupling coefficients. + * + * This function computes available ranges for each wire plane (U, V, W) based on + * geometric coupling between planes and minimum distance constraints. + * + * @param angle_u Wire angle for U plane + * @param angle_v Wire angle for V plane + * @param angle_w Wire angle for W plane + * @param rem_dis_cut_u Remaining distance cut for U plane + * @param rem_dis_cut_v Remaining distance cut for V plane + * @param rem_dis_cut_w Remaining distance cut for W plane + * @param min_u_dis Minimum distance for U plane + * @param min_v_dis Minimum distance for V plane + * @param min_w_dis Minimum distance for W plane + * @param pitch_u Wire pitch for U plane + * @param pitch_v Wire pitch for V plane + * @param pitch_w Wire pitch for W plane + * @param range_u [out] Calculated range for U plane + * @param range_v [out] Calculated range for V plane + * @param range_w [out] Calculated range for W plane + */ + void calculate_ranges_simplified(double angle_u, double angle_v, double angle_w, + double rem_dis_cut_u, double rem_dis_cut_v, double rem_dis_cut_w, + double min_u_dis, double min_v_dis, double min_w_dis, + double pitch_u, double pitch_v, double pitch_w, + float& range_u, float& range_v, float& range_w); + +} // namespace WireCell::Clus::TrackFittingUtil + +#endif \ No newline at end of file diff --git a/clus/src/BlobSampler.cxx b/clus/src/BlobSampler.cxx index fa4cf3a15..a48c61fb8 100644 --- a/clus/src/BlobSampler.cxx +++ b/clus/src/BlobSampler.cxx @@ -110,6 +110,7 @@ struct BlobSampler::Sampler : public Aux::Logger std::string lsuffix = letter + suffix; if (samp.is_extra(lsuffix)) { ds.add(samp.cc.prefix+lsuffix, Array(vals)); + // std::cout << "test: " << samp.cc.prefix << " " << lsuffix << " " << vals.size() << std::endl; } } }; @@ -172,11 +173,11 @@ struct BlobSampler::Sampler : public Aux::Logger // coordinates. double time2drift(double time) const { - const Pimpos* colpimpos = pimpos(2); + // const Pimpos* colpimpos = pimpos(2); const double drift = (time + cc.time_offset)*cc.drift_speed; double xorig = plane_x(2); // colpimpos->origin()[0]; /// TODO: how to determine xsign? - double xsign = colpimpos->axis(0)[0]; + double xsign = anodeface->dirx(); return xorig + xsign*drift; } @@ -269,7 +270,7 @@ struct BlobSampler::Sampler : public Aux::Logger } // Per point arrays - + WirePlaneId wpid_blob{0}; // 0 is invalid, assign when we get it. duplicate over all pts later. const auto& activity = islice->activity(); auto iface = iblob->face(); for (const auto& iplane : iface->planes()) { @@ -310,7 +311,11 @@ struct BlobSampler::Sampler : public Aux::Logger wire_coord[ipt] = xwp[1]; const double pitch = xwp[2]; pitch_coord[ipt] = pitch; - int wind = pimpos->closest(pitch).first; + + // auto temp = pimpos->closest(pitch); + int wind = pimpos->closest(pitch + 0.1*units::mm).first; // shift to the higher wires in case of a tie ... + // std::cout << temp.second << " " << wind << " " << temp1.second << " " << temp1.first << std::endl; + if (wind < 0) { log->debug("sampler={}, point={} cartesian={} pimpos={}", my_ident, ipt, pts[ipt], xwp); log->error("Negative wire index: {}, will segfault soon", wind); @@ -319,16 +324,29 @@ struct BlobSampler::Sampler : public Aux::Logger wire_index[ipt] = wind; IWire::pointer iwire = iwires[wire_index[ipt]]; + const auto& wpid_wire = iwire->planeid(); + wpid_blob = WireCell::WirePlaneId(kAllLayers, wpid_wire.face(), wpid_wire.apa()); channel_ident[ipt] = iwire->channel(); channel_attach[ipt] = p_chi2i[channel_ident[ipt]]; auto ich = channels[channel_attach[ipt]]; + + auto ait = activity.find(ich); if (ait != activity.end()) { auto act = ait->second; charge_val[ipt] = act.value(); charge_unc[ipt] = act.uncertainty(); } + + // std::cout << "Test: wire_index " << wire_index[ipt] + // << " pitch_coord " << pitch_coord[ipt] + // << " wire_coord " << wire_coord[ipt] + // << " channel_ident " << channel_ident[ipt] + // << " channel_attach " << channel_attach[ipt] + // << " charge_val " << charge_val[ipt] + // << " charge_unc " << charge_unc[ipt] + // << std::endl; } nv("wire_index", wire_index); @@ -341,6 +359,11 @@ struct BlobSampler::Sampler : public Aux::Logger } // over planes + { + npts_dup nd{*this, ds, npts}; + nd("wpid", wpid_blob.ident()); + } + return ds; } @@ -381,10 +404,6 @@ struct BlobSampler::Sampler : public Aux::Logger std::tuple BlobSampler::sample_blob(const IBlob::pointer& iblob, int blob_index) { - if (!iblob) { - THROW(ValueError() << errmsg{"can not sample null blob"}); - } - PointCloud::Dataset ret_main; PointCloud::Dataset ret_aux; // size_t points_added = 0; @@ -705,60 +724,92 @@ struct Stepped : public BlobSampler::Sampler void sample(Dataset& ds, Dataset& aux) { const auto& coords = anodeface->raygrid(); auto strips = iblob->shape().strips(); - + const int ndummy_index = strips.size() == 5 ? 2 : 0; // use this to skip dummy planes + const int li[3] = {ndummy_index+0, ndummy_index+1, ndummy_index+2}; // layer index + // std::cout << "DEBUG strips.size() " << strips.size() << std::endl; + // for (const auto& strip : strips) { + // std::cout << "DEBUG strip " << strip.layer << " " << strip.bounds.first << " " << strip.bounds.second << std::endl; + // } + + // This returns the number of wire regions covered by the strip s. auto swidth = [](const Strip& s) -> int { return s.bounds.second - s.bounds.first; }; - // std::sort(strips.begin()+2, strips.end(), - // [&](const Strip& a, const Strip& b) -> bool { - // return swidth(a) < swidth(b); - // }); - // const Strip& smin = strips[2]; - // const Strip& smid = strips[3]; - // const Strip& smax = strips[4]; - - // XQ update this part of code to match WCP - Strip smax = strips[2]; int max_id = 2; - Strip smin = strips[3]; int min_id = 3; - Strip smid = strips[4]; /*int mid_id = 4;*/ - if (swidth(strips[3]) > swidth(smax)){ - smax = strips[3]; max_id = 3; + // Find the strip with largest coverage. + Strip smax = strips[li[0]]; int max_id = li[0]; + if (swidth(strips[li[1]]) > swidth(smax)){ + smax = strips[li[1]]; max_id = li[1]; } - if(swidth(strips[4]) > swidth(smax)){ - smax = strips[4]; max_id = 4; + if(swidth(strips[li[2]]) > swidth(smax)){ + smax = strips[li[2]]; max_id = li[2]; } - if (swidth(strips[2]) < swidth(smin)){ - smin = strips[2]; min_id = 2; + + // Find the strip with least coverage. + Strip smin = strips[li[1]]; int min_id = li[1]; + if (swidth(strips[li[0]]) < swidth(smin)){ + smin = strips[li[0]]; min_id = li[0]; } - if(swidth(strips[4]) < swidth(smin)){ - smin = strips[4]; min_id = 4; + if(swidth(strips[li[2]]) < swidth(smin)){ + smin = strips[li[2]]; min_id = li[2]; } - for (int i = 2;i!=5;i++){ + // Find the other strip. + Strip smid = strips[li[2]]; /*int mid_id = li[2];*/ + for (int i = li[0];i!=li[2]+1;i++){ if (i != max_id && i != min_id){ smid = strips[i]; // mid_id = i; } } + // Step sizes for the min/max directions. int nmin = std::max(min_step_size, max_step_fraction*swidth(smin)); int nmax = std::max(min_step_size, max_step_fraction*swidth(smax)); std::vector points; //XQ: is the order of 0 vs. 1 correct for the wire center??? + // + // BV: this gives a diagonal vector from the crossing point of the + // 0-rays to the crossing point of the 0-wires (0-wire = half-way from + // 0-ray to 1-ray, half-way assuming offset is default 0.5). + // + // /(0-ray) + // / /(0-wire) + // +-+-+ b + // / / + // / c +-------(0-wire) + // / / + // +---+----(0-ray) + // a + // + // if "a" is the crossing of two 0-rays and "b" is the crossing of two + // 1-rays, "adjust is the vector "ac" which is coincident with the + // crossing of the 0-wires. const Vector adjust = offset * ( coords.ray_crossing({smin.layer, 1}, {smax.layer, 1}) - coords.ray_crossing({smin.layer, 0}, {smax.layer, 0})); - const double pitch_adjust = offset * (coords.pitch_location({smin.layer, 1}, {smax.layer, 1}, smid.layer) - coords.pitch_location({smin.layer, 0}, {smax.layer, 0}, smid.layer) ); + // This gives a relative pitch distance measured in the "mid" view that + // is half the distance between crossing point of the 0-rays and the + // 1-rays in the other two views. In general, this is NOT the same as + // the magnitude of "adjust" / "ac" vector above as that diagonal of the + // min/max parallelogram is not necessarily parallel to the pitch + // direction in the third, "mid" view. The two directions are + // accidentally coincident for symmetric wire patterns like in + // MicroBooNE. + const double pitch_adjust = offset * ( + coords.pitch_location({smin.layer, 1}, {smax.layer, 1}, smid.layer) - + coords.pitch_location({smin.layer, 0}, {smax.layer, 0}, smid.layer) ); + // log->debug("offset={} adjust={},{},{}", offset, adjust.x(), adjust.y(), adjust.z()); std::set min_wires_set; std::set max_wires_set; - //XQ: is this the right way of adding the last wire? + // Load up wires for the min/max dimensions including first, along each + // step size and last wire. for (auto gmin=smin.bounds.first; gmin < smin.bounds.second; gmin += nmin) { min_wires_set.insert(gmin); } @@ -768,42 +819,650 @@ struct Stepped : public BlobSampler::Sampler } max_wires_set.insert(smax.bounds.second-1); - // std::cout << min_wires_set.size() << " " << max_wires_set.size() << " " << smin.bounds.first << " " << smin.bounds.second << " " << smax.bounds.first << " " << smax.bounds.second << " " << smid.bounds.first << " " << smid.bounds.second << " " << smin.layer << " " << smax.layer << " " << smid.layer << std::endl; - + // size_t npre_missed=0; + // size_t nrel_missed=0; for (auto it_gmin = min_wires_set.begin(); it_gmin != min_wires_set.end(); it_gmin++){ -// for (auto gmin=smin.bounds.first; gmin < smin.bounds.second; gmin += nmin) { coordinate_t cmin{smin.layer, *it_gmin}; - for (auto it_gmax = max_wires_set.begin(); it_gmax != max_wires_set.end(); it_gmax++){ - // for (auto gmax=smax.bounds.first; gmax < smax.bounds.second; gmax += nmax) { + for (auto it_gmax = max_wires_set.begin(); it_gmax != max_wires_set.end(); it_gmax++){ coordinate_t cmax{smax.layer, *it_gmax}; - // adjust wire center ... + // Added by hyu, dunno why + const double ploc0 = coords.pitch_location(cmin, cmax, 0); + const double prel0 = coords.pitch_relative(ploc0, 0); + const double ploc1 = coords.pitch_location(cmin, cmax, 1); + const double prel1 = coords.pitch_relative(ploc1, 1); + if (prel0 > 1 or prel0 < 0 or prel1 > 1 or prel1 < 0) { + // ++npre_missed; + continue; + } + + // This is the mid-view pitch of the crossings of the two *wires* associated with cmin/cmax *rays*. const double pitch = coords.pitch_location(cmin, cmax, smid.layer) + pitch_adjust; - // XQ: how was the closest wire is found, if the pitch is exactly at the middle between two wires? + + // This is the location from the mid view 0-ray to the point of + // the wire-crossing measured in units of mid view pitches. const double pitch_relative = coords.pitch_relative(pitch, smid.layer); -// auto gmid = coords.pitch_index(pitch, smid.layer); - // if (smid.in(gmid)) { - // if (smax.bounds.first==1006 && smax.bounds.second==1011) - // std::cout << smax.bounds.first << " " << smax.bounds.second << " " << smin.bounds.first << " " << smin.bounds.second << " " << smid.bounds.first << " " << smid.bounds.second - // << " " << *it_gmax << " " << *it_gmin << " " << pitch << " " << pitch_relative << " " << pitch_adjust << " " << max_id << " " << min_id << " " << mid_id << std::endl; - // if (smid.bounds.first == 707) std::cout << pitch_relative << std::endl; + if (pitch_relative > smid.bounds.first - tolerance && pitch_relative < smid.bounds.second + tolerance){ const auto pt = coords.ray_crossing(cmin, cmax); points.push_back(pt + adjust); + // log->warn("Blob {} adding point {} prel={}, bounds=[{},{}], tol={}, off={} padj={} adj={}", + // iblob->ident(), pt, pitch_relative, smid.bounds.first, smid.bounds.second, tolerance, offset, pitch_adjust, adjust); + // } + // else { + // log->warn("Blob {} not adding point prel={}, bounds=[{},{}], tol={}, off={} padj={} adj={}", + // iblob->ident(), pitch_relative, smid.bounds.first, smid.bounds.second, tolerance, offset, pitch_adjust, adjust); + // ++nrel_missed; } } } + + // if (points.empty()) { + // int ident = iblob->ident(); + // log->warn("Blob {} unsampled: minsiz={} maxsiz={} nwiresmin={} nwiresmax={} nrel={} npre={}.", + // ident, nmin, nmax, min_wires_set.size(), max_wires_set.size(), nrel_missed, npre_missed); + // for (const auto& strip : strips) { + // log->warn("Blob {} strip: {}", ident, strip); + // } + // } + intern(ds, points); // make aux dataset /// TODO: hard coded for 5 planes, i.e., wire_type is id - "2" aux.add("max_wire_interval", Array({(int)nmax})); aux.add("min_wire_interval", Array({(int)nmin})); - aux.add("max_wire_type", Array({(int)(max_id-2)})); - aux.add("min_wire_type", Array({(int)(min_id-2)})); + aux.add("max_wire_type", Array({(int)(max_id-ndummy_index)})); + aux.add("min_wire_type", Array({(int)(min_id-ndummy_index)})); + } +}; + +// =========================================================================== +// ChargeStepped Strategy Implementation for BlobSampler +// =========================================================================== + +// Implement the "charge_stepped" sampling. +// +// This is an enhanced version of "stepped" sampling that includes: +// 1. Charge-based filtering of sampling points +// 2. Bad plane handling with configurable thresholds +// 3. Conditional use of all wires vs stepped sets based on wire product +// 4. Enhanced validation logic for charge requirements +// 5. Runtime configuration override capability +// +// Based on WCPPID::calc_sampling_points() from wire-cell-pid +struct ChargeStepped : public BlobSampler::Sampler +{ + using BlobSampler::Sampler::Sampler; + ChargeStepped(const ChargeStepped&) = default; + ChargeStepped& operator=(const ChargeStepped&) = default; + virtual ~ChargeStepped() {} + + // Configuration parameters + double min_step_size{3}; + double max_step_fraction{1.0/12.0}; + double offset{0.5}; + double tolerance{0.03}; + + // Charge threshold parameters + double charge_threshold_max{4000}; + double charge_threshold_min{4000}; + double charge_threshold_other{4000}; + + // Control parameters + int max_wire_product_threshold{2500}; + bool disable_mix_dead_cell{true}; + + // Dead plane detection threshold (same as PointTreeBuilding default) + double dead_threshold{1e10}; + + virtual void configure(const Configuration& cfg) + { + min_step_size = get(cfg, "min_step_size", min_step_size); + max_step_fraction = get(cfg, "max_step_fraction", max_step_fraction); + offset = get(cfg, "offset", offset); + tolerance = get(cfg, "tolerance", tolerance); + + charge_threshold_max = get(cfg, "charge_threshold_max", charge_threshold_max); + charge_threshold_min = get(cfg, "charge_threshold_min", charge_threshold_min); + charge_threshold_other = get(cfg, "charge_threshold_other", charge_threshold_other); + + max_wire_product_threshold = get(cfg, "max_wire_product_threshold", max_wire_product_threshold); + disable_mix_dead_cell = get(cfg, "disable_mix_dead_cell", disable_mix_dead_cell); + + dead_threshold = get(cfg, "dead_threshold", dead_threshold); + } + + // Runtime configuration override + void apply_runtime_config(const Configuration& runtime_cfg) + { + if (runtime_cfg.isMember("charge_threshold_max")) { + charge_threshold_max = runtime_cfg["charge_threshold_max"].asDouble(); + } + if (runtime_cfg.isMember("charge_threshold_min")) { + charge_threshold_min = runtime_cfg["charge_threshold_min"].asDouble(); + } + if (runtime_cfg.isMember("charge_threshold_other")) { + charge_threshold_other = runtime_cfg["charge_threshold_other"].asDouble(); + } + if (runtime_cfg.isMember("disable_mix_dead_cell")) { + disable_mix_dead_cell = runtime_cfg["disable_mix_dead_cell"].asBool(); + } + if (runtime_cfg.isMember("dead_threshold")) { + dead_threshold = runtime_cfg["dead_threshold"].asDouble(); + } + } + + void sample(Dataset& ds, Dataset& aux) { + sample_with_config(ds, aux, Configuration()); + } + + void sample_with_config(Dataset& ds, Dataset& aux, const Configuration& runtime_cfg) { + // Apply runtime configuration if provided + if (!runtime_cfg.isNull()) { + apply_runtime_config(runtime_cfg); + } + + const auto& coords = anodeface->raygrid(); + auto strips = iblob->shape().strips(); + const int ndummy_index = strips.size() == 5 ? 2 : 0; + const int li[3] = {ndummy_index+0, ndummy_index+1, ndummy_index+2}; + + auto swidth = [](const Strip& s) -> int { + return s.bounds.second - s.bounds.first; + }; + + // Find strips with max, min, and middle coverage + Strip smax = strips[li[0]]; int max_id = li[0]; + if (swidth(strips[li[1]]) > swidth(smax)){ + smax = strips[li[1]]; max_id = li[1]; + } + if(swidth(strips[li[2]]) > swidth(smax)){ + smax = strips[li[2]]; max_id = li[2]; + } + + Strip smin = strips[li[1]]; int min_id = li[1]; + if (swidth(strips[li[0]]) < swidth(smin)){ + smin = strips[li[0]]; min_id = li[0]; + } + if(swidth(strips[li[2]]) < swidth(smin)){ + smin = strips[li[2]]; min_id = li[2]; + } + + Strip smid = strips[li[2]]; int mid_id = li[2]; + for (int i = li[0]; i <= li[2]; i++){ + if (i != max_id && i != min_id){ + smid = strips[i]; + mid_id = i; + } + } + + // Step sizes for the min/max directions + int nmin = std::max(min_step_size, max_step_fraction*swidth(smin)); + int nmax = std::max(min_step_size, max_step_fraction*swidth(smax)); + + // Pre-cache activity data for charge lookup + auto islice = iblob->slice(); + const auto& activity = islice->activity(); + auto iface = anodeface; + + // Detect bad planes dynamically based on charge uncertainty + std::vector plane_is_bad(3, false); + if (disable_mix_dead_cell){ + plane_is_bad[max_id - ndummy_index] = is_plane_bad(max_id, activity, iface); + plane_is_bad[min_id - ndummy_index] = is_plane_bad(min_id, activity, iface); + plane_is_bad[mid_id - ndummy_index] = is_plane_bad(mid_id, activity, iface); + } + + + + // Adjust charge thresholds based on detected bad planes + double thresh_max = plane_is_bad[max_id - ndummy_index] ? 0.0 : charge_threshold_max; + double thresh_min = plane_is_bad[min_id - ndummy_index] ? 0.0 : charge_threshold_min; + double thresh_other = plane_is_bad[mid_id - ndummy_index] ? 0.0 : charge_threshold_other; + + // if (!disable_mix_dead_cell) + // std::cout << islice->start()/islice->span()*4<< " " << (islice->start() + islice->span())/islice->span()*4 << " " + // << strips[2].bounds.first << " " << strips[2].bounds.second << " " + // << strips[3].bounds.first << " " << strips[3].bounds.second << " " + // << strips[4].bounds.first << " " << strips[4].bounds.second << " " + // << thresh_max << " " << thresh_min << " " << thresh_other << " ";// << std::endl; + + // Create stepped wire sets (mandatory wires) + std::set min_wires_set; + std::set max_wires_set; + + for (auto gmin = smin.bounds.first; gmin < smin.bounds.second; gmin += nmin) { + min_wires_set.insert(gmin); + } + min_wires_set.insert(smin.bounds.second-1); + + for (auto gmax = smax.bounds.first; gmax < smax.bounds.second; gmax += nmax) { + max_wires_set.insert(gmax); + } + max_wires_set.insert(smax.bounds.second-1); + + // Determine which wire sets to use + bool use_all_wires = (swidth(smax) * swidth(smin) <= max_wire_product_threshold); + + std::set actual_min_wires; + std::set actual_max_wires; + + if (use_all_wires) { + // Use all wires + for (auto i = smin.bounds.first; i < smin.bounds.second; i++) { + actual_min_wires.insert(i); + } + for (auto i = smax.bounds.first; i < smax.bounds.second; i++) { + actual_max_wires.insert(i); + } + } else { + // Use stepped sets + actual_min_wires = min_wires_set; + actual_max_wires = max_wires_set; + } + + // Offset adjustment for wire crossing points + const Vector adjust = offset * ( + coords.ray_crossing({smin.layer, 1}, {smax.layer, 1}) - + coords.ray_crossing({smin.layer, 0}, {smax.layer, 0})); + + const double pitch_adjust = offset * ( + coords.pitch_location({smin.layer, 1}, {smax.layer, 1}, smid.layer) - + coords.pitch_location({smin.layer, 0}, {smax.layer, 0}, smid.layer)); + + std::vector points; + + + // // Collect wire indices for each plane //debug ... + // std::vector wires_u, wires_v, wires_w; + // for (auto idx : actual_min_wires) { + // if (smin.layer - ndummy_index == 0) wires_u.push_back(idx); + // if (smin.layer - ndummy_index == 1) wires_v.push_back(idx); + // if (smin.layer - ndummy_index == 2) wires_w.push_back(idx); + // } + // for (auto idx : actual_max_wires) { + // if (smax.layer - ndummy_index == 0) wires_u.push_back(idx); + // if (smax.layer - ndummy_index == 1) wires_v.push_back(idx); + // if (smax.layer - ndummy_index == 2) wires_w.push_back(idx); + // } + // // Add actual_mid_wires collection + // std::set actual_mid_wires; + // if (use_all_wires) { + // for (auto i = smid.bounds.first; i < smid.bounds.second; i++) { + // actual_mid_wires.insert(i); + // } + // } else { + // // Only use the bounds (start and end-1) + // actual_mid_wires.insert(smid.bounds.first); + // actual_mid_wires.insert(smid.bounds.second-1); + // } + // for (auto idx : actual_mid_wires) { + // if (smid.layer - ndummy_index == 0) wires_u.push_back(idx); + // if (smid.layer - ndummy_index == 1) {wires_v.push_back(idx); } + // if (smid.layer - ndummy_index == 2) {wires_w.push_back(idx); } + // } + // // Remove duplicates + // std::sort(wires_u.begin(), wires_u.end()); + // wires_u.erase(std::unique(wires_u.begin(), wires_u.end()), wires_u.end()); + // std::sort(wires_v.begin(), wires_v.end()); + // wires_v.erase(std::unique(wires_v.begin(), wires_v.end()), wires_v.end()); + // std::sort(wires_w.begin(), wires_w.end()); + // wires_w.erase(std::unique(wires_w.begin(), wires_w.end()), wires_w.end()); + bool flag_print = false; + // Print debug info if specific sizes are matched + // if (wires_u.size() == 10 && wires_v.size() == 10 && wires_w.size() == 3) { + // std::cout << "Xin1: " << points.size() << " " << wires_u.size() << " " << wires_v.size() << " " << wires_w.size() + // << " " << actual_max_wires.size() << " " << actual_min_wires.size() << " " << disable_mix_dead_cell << " " << use_all_wires + // << " bad_plane_max=" << plane_is_bad[max_id - ndummy_index] << " " << max_id + // << " bad_plane_min=" << plane_is_bad[min_id - ndummy_index] << " " << min_id + // << " bad_plane_other=" << plane_is_bad[mid_id - ndummy_index] << " " << mid_id + // << std::endl; + // flag_print = true; + // } + // plane_is_bad[mid_id - ndummy_index] = is_plane_bad(mid_id, activity, iface, flag_print); + // debug code + + + for (auto it_gmax = actual_max_wires.begin(); it_gmax != actual_max_wires.end(); it_gmax++) { + coordinate_t cmax{smax.layer, *it_gmax}; + + bool flag_must2 = max_wires_set.find(*it_gmax) != max_wires_set.end(); + double charge2 = get_wire_charge(cmax, activity, iface, ndummy_index, flag_print); + + // if (!disable_mix_dead_cell){ + // std::cout << "max: " << charge2 << " " << *it_gmax << std::endl; + // } + + if ((!flag_must2) && (charge2 < thresh_max) && (charge2 != 0 || disable_mix_dead_cell)) { + continue; + } + // if (flag_print) { + // std::cout << "wire: " << *it_gmax << " " << charge2 << " " << flag_must2 << " " << (charge2 < thresh_max) << " " << (charge2 != 0 || disable_mix_dead_cell) << " " << thresh_max << std::endl; + // } + + + for (auto it_gmin = actual_min_wires.begin(); it_gmin != actual_min_wires.end(); it_gmin++) { + coordinate_t cmin{smin.layer, *it_gmin}; + + // Check if this is a "must" wire (from stepped set) + bool flag_must1 = min_wires_set.find(*it_gmin) != min_wires_set.end(); + + // Get charge for this wire + double charge1 = get_wire_charge(cmin, activity, iface, ndummy_index); + + // if(!disable_mix_dead_cell){ + // std::cout << "min: " << charge1 << " " << *it_gmin << std::endl; + // } + + // Apply charge filtering for non-must wires + if ((!flag_must1) && (charge1 < thresh_min) && (charge1 != 0 || disable_mix_dead_cell)) { + continue; + } + // if (flag_print) { + // std::cout << "min wire: " << *it_gmin << " " << charge1 << " " << flag_must1 << " " << (charge1 < thresh_min) << " " << (charge1 != 0 || disable_mix_dead_cell) << " " << thresh_min << std::endl; + // } + + + // Check basic bounds + const double ploc0 = coords.pitch_location(cmin, cmax, 0); + const double prel0 = coords.pitch_relative(ploc0, 0); + const double ploc1 = coords.pitch_location(cmin, cmax, 1); + const double prel1 = coords.pitch_relative(ploc1, 1); + if (prel0 > 1 || prel0 < 0 || prel1 > 1 || prel1 < 0) { + continue; + } + + // Check third plane bounds + const double pitch = coords.pitch_location(cmin, cmax, smid.layer)+ pitch_adjust; + const double pitch_relative = coords.pitch_relative(pitch, smid.layer); + + if (pitch_relative > smid.bounds.first - tolerance && + pitch_relative < smid.bounds.second + tolerance) { + + // Get charge for third plane + coordinate_t cother{smid.layer, static_cast(std::round(pitch_relative))}; + double charge3 = get_wire_charge(cother, activity, iface, ndummy_index); + + // Apply charge validation logic + if (flag_must1 && flag_must2) { + // Both wires are mandatory, no additional charge filtering + } else { + // At least one wire is not mandatory, apply charge filtering + if ((charge2 < thresh_max && (charge2 != 0 || disable_mix_dead_cell)) || // 2 is max ... + (charge1 < thresh_min && (charge1 != 0 || disable_mix_dead_cell)) || // 1 is min ... + (charge3 < thresh_other && (charge3 != 0 || disable_mix_dead_cell))) { + + + continue; + } + + // Skip if all charges are zero + if (charge1 == 0 && charge2 == 0 && charge3 == 0) { + continue; + } + } + + // if (flag_print) { + // std::cout << cmax << " " << cmin + // << " " << cother + // << " (" << charge2 << ", " << charge1 << ", " << charge3 << ")" << " " << pitch << " " << pitch_relative << " " + // << " " << thresh_max << " " << thresh_min << " " << thresh_other << " " << std::endl; + // } + + const auto pt = coords.ray_crossing(cmin, cmax); + points.push_back(pt + adjust); + } + } + } + + // if (!disable_mix_dead_cell) std::cout << points.size() << std::endl; + + //de bug ... + // if (wires_u.size() == 10 && wires_v.size() == 10 && wires_w.size() == 3) { + // std::cout << "Xin2: " << points.size() << " " << wires_u.front() << " " << wires_v.front() << " " << wires_w.front() << " " << wires_u.size() << " " << wires_v.size() << " " << wires_w.size() + // << " " << actual_max_wires.size() << " " << actual_min_wires.size() << " " << disable_mix_dead_cell << " " << use_all_wires << std::endl; + // } + // debug ... + + intern(ds, points); + + // Add auxiliary data + aux.add("max_wire_interval", Array({(int)nmax})); + aux.add("min_wire_interval", Array({(int)nmin})); + aux.add("max_wire_type", Array({(int)(max_id-ndummy_index)})); + aux.add("min_wire_type", Array({(int)(min_id-ndummy_index)})); + // aux.add("charge_threshold_max", Array({thresh_max})); + // aux.add("charge_threshold_min", Array({thresh_min})); + // aux.add("charge_threshold_other", Array({thresh_other})); + // aux.add("use_all_wires", Array({use_all_wires})); + // aux.add("bad_plane_max", Array({plane_is_bad[max_id - ndummy_index]})); + // aux.add("bad_plane_min", Array({plane_is_bad[min_id - ndummy_index]})); + // aux.add("bad_plane_other", Array({plane_is_bad[mid_id - ndummy_index]})); + } + +private: + // Helper function to detect if a plane is bad based on charge uncertainty + // Check the first wire of the blob for each plane + bool is_plane_bad(int plane_layer, + const ISlice::map_t& activity, + IAnodeFace::pointer iface, bool flag_print = false) { + + // Get the blob strips to find the first wire index for this plane + auto strips = iblob->shape().strips(); + const int ndummy_index = strips.size() == 5 ? 2 : 0; + + + if (plane_layer -ndummy_index < 0 || plane_layer - ndummy_index >= (int)iface->planes().size()) { + return false; + } + + // Find the strip for this plane layer + const Strip* target_strip = nullptr; + for (const auto& strip : strips) { + if (strip.layer == plane_layer) { + target_strip = &strip; + break; + } + } + + if (!target_strip) { + return false; + } + + // if (flag_print) std::cout << plane_layer << " " << (int)iface->planes().size() << target_strip << std::endl; + + + // Get the first and last wire indices from the strip bounds + int first_wire_index = target_strip->bounds.first; + int last_wire_index = target_strip->bounds.second - 1; + + // Lambda to check if a wire is bad based on uncertainty + auto is_wire_bad = [&](int wire_index) -> bool { + // Create coordinate for the wire + // coordinate_t coord{plane_layer, wire_index}; + + + + // Get the appropriate plane + int plane_index = plane_layer - ndummy_index; + + // if (flag_print) { + // std::cout << "Plane check: " << plane_index << " " << (int)iface->planes().size() << std::endl; + // } + + if (plane_index < 0 || plane_index >= (int)iface->planes().size()) { + return false; + } + + auto iplane = iface->planes()[plane_index]; + const IWire::vector& iwires = iplane->wires(); + const IChannel::vector& channels = iplane->channels(); + + // if (flag_print) { + // std::cout << "Wire check: " << wire_index << " " << (int)iwires.size() << std::endl; + // } + + // Bounds check for wire index + if (wire_index < 0 || wire_index >= (int)iwires.size()) { + return false; + } + + // Get the wire and its channel + IWire::pointer iwire = iwires[wire_index]; + int channel_ident = iwire->channel(); + + // Build/use cache for channel ident to index mapping (same as get_wire_charge) + auto& p_chi2i = plane_ident2index[iplane]; + if (p_chi2i.empty()) { + const size_t nchannels = channels.size(); + for (size_t chind=0; chindident()] = chind; + } + } + + // if (flag_print){ + // std::cout << "Channel ident: " << channel_ident << " " << p_chi2i.size() << std::endl; + // } + + // Look up channel index using the cache + auto chi2i_it = p_chi2i.find(channel_ident); + if (chi2i_it == p_chi2i.end()) { + return false; + } + + // if (flag_print) { + // std::cout << "Found channel index: " << chi2i_it->second << " " << (int)channels.size() << std::endl; + // } + + int channel_attach = chi2i_it->second; + if (channel_attach < 0 || channel_attach >= (int)channels.size()) { + return false; + } + + auto ich = channels[channel_attach]; + + // if (flag_print){ + // std::cout << "Checking channel: " << ich << " " << activity.size() << std::endl; + // } + + // Look up charge in activity map and check uncertainty + auto ait = activity.find(ich); + if (ait != activity.end()) { + auto act = ait->second; + double uncertainty = act.uncertainty(); + + // if (flag_print) { + // std::cout << "Checking wire " << wire_index << " in plane " << plane_layer + // << ": charge=" << act.value() << ", uncertainty=" << uncertainty + // << ", threshold=" << dead_threshold << std::endl; + // } + + // Plane is considered bad if uncertainty exceeds threshold + return uncertainty > dead_threshold; + } + + return false; + }; + + // Check both first and last wire + return is_wire_bad(first_wire_index) || is_wire_bad(last_wire_index); + } + + + // Helper function to get wire charge using the same pattern as make_dataset + double get_wire_charge(const coordinate_t& coord, + const ISlice::map_t& activity, + IAnodeFace::pointer iface, int ndummy_index = 2, bool flag_print = false) { + + // Get the appropriate plane + int plane_index = coord.layer-ndummy_index; + if (plane_index < 0 || plane_index >= (int)iface->planes().size()) { + return 0.0; + } + + auto iplane = iface->planes()[plane_index]; + const IWire::vector& iwires = iplane->wires(); + const IChannel::vector& channels = iplane->channels(); + + // Bounds check for wire index + if (coord.grid < 0 || coord.grid >= (int)iwires.size()) { + return 0.0; + } + + // Get the wire and its channel + IWire::pointer iwire = iwires[coord.grid]; + int channel_ident = iwire->channel(); + + // Build/use cache for channel ident to index mapping (same as make_dataset) + auto& p_chi2i = plane_ident2index[iplane]; + if (p_chi2i.empty()) { + const size_t nchannels = channels.size(); + for (size_t chind=0; chindident()] = chind; + } + } + + // Look up channel index using the cache + auto chi2i_it = p_chi2i.find(channel_ident); + if (chi2i_it == p_chi2i.end()) { + return 0.0; + } + + int channel_attach = chi2i_it->second; + if (channel_attach < 0 || channel_attach >= (int)channels.size()) { + return 0.0; + } + + auto ich = channels[channel_attach]; + + // Look up charge in activity map + auto ait = activity.find(ich); + + if (ait != activity.end()) { + auto act = ait->second; + return act.value(); + } + + return 0.0; } }; +// =========================================================================== +// BlobSampler Extension for Runtime Configuration +// =========================================================================== + +std::tuple +BlobSampler::sample_blob_with_config(const IBlob::pointer& iblob, + int blob_index, + const Configuration& runtime_config) { + + PointCloud::Dataset ret_main; + PointCloud::Dataset ret_aux; + + for (auto& sampler : m_samplers) { + sampler->begin_sample(blob_index, iblob); + + // Check if this is a ChargeStepped sampler + auto charge_stepped = dynamic_cast(sampler.get()); + if (charge_stepped) { + // Use the configuration-aware sampling + charge_stepped->sample_with_config(ret_main, ret_aux, runtime_config); + } else { + // Use regular sampling + sampler->sample(ret_main, ret_aux); + } + + sampler->end_sample(); + } + + return {ret_main, ret_aux}; +} + + + void BlobSampler::add_strategy(Configuration strategy) { if (strategy.isNull()) { @@ -869,8 +1528,13 @@ void BlobSampler::add_strategy(Configuration strategy) m_samplers.back()->configure(full); return; } + if (startswith(name, "charge_stepped")) { + m_samplers.push_back(std::make_unique(full, m_samplers.size())); + m_samplers.back()->configure(full); + return; + } THROW(ValueError() << errmsg{"unknown strategy: " + name}); } - + diff --git a/clus/src/ClusteringFuncs.cxx b/clus/src/ClusteringFuncs.cxx new file mode 100644 index 000000000..e6b7fdb0c --- /dev/null +++ b/clus/src/ClusteringFuncs.cxx @@ -0,0 +1,178 @@ +#include +#include "WireCellUtil/Array.h" + + +#include // temp debug + +using namespace WireCell::Clus::Facade; + + +// Add this to your clustering_util.cxx file + +std::tuple +WireCell::Clus::Facade::extract_geometry_params( + const Grouping& grouping, + const IDetectorVolumes::pointer dv) +{ + geo_point_t drift_dir(1, 0, 0); // initialize drift direction + double angle_u = 0, angle_v = 0, angle_w = 0; // initialize angles + + // Find the first valid WirePlaneId in the grouping + for (const auto& gwpid : grouping.wpids()) { + // Update drift direction based on face orientation + int face_dirx = dv->face_dirx(gwpid); + drift_dir.x(face_dirx); + + // Create wpids for all three planes with the same APA and face + WirePlaneId wpid_u(kUlayer, gwpid.face(), gwpid.apa()); + WirePlaneId wpid_v(kVlayer, gwpid.face(), gwpid.apa()); + WirePlaneId wpid_w(kWlayer, gwpid.face(), gwpid.apa()); + + // Get wire directions for all planes + Vector wire_dir_u = dv->wire_direction(wpid_u); + Vector wire_dir_v = dv->wire_direction(wpid_v); + Vector wire_dir_w = dv->wire_direction(wpid_w); + + // Calculate angles + angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + // Only need to process the first valid WirePlaneId + break; + } + + return std::make_tuple(drift_dir, angle_u, angle_v, angle_w); +} + +std::vector WireCell::Clus::Facade::merge_clusters( + cluster_connectivity_graph_t& g, + Grouping& grouping, + const std::string& aname, const std::string& pcname) +{ + std::unordered_map desc2id; + std::unordered_map > id2desc; + /*int num_components =*/ boost::connected_components(g, boost::make_assoc_property_map(desc2id)); + for (const auto& [desc, id] : desc2id) { + id2desc[id].insert(desc); + } + + std::vector fresh; + + // Note, here we do an unusual thing and COPY the vector of children + // facades. In most simple access we would get the reference to the child + // vector to save a little copy time. We explicitly copy here as we must + // preserve the original order of children facades even as we remove them + // from the grouping. As each child facade is removed, it's + // unique_ptr is returned which we ignore/drop and thus the child + // facade dies along with its node. This leaves the orig_clusters element + // that was just holding the pointer to the doomed facade now holding + // invalid memory. But, it is okay as we never revisit the same cluster in + // the grouping. All that to explain a missing "&"! :) + auto orig_clusters = grouping.children(); + + const bool savecc = aname.size() > 0 && pcname.size() > 0; + + for (const auto& [id, descs] : id2desc) { + if (descs.size() < 2) { + continue; + } + + // it starts with no cluster facade + Cluster& fresh_cluster = grouping.make_child(); + + std::vector cc; + int parent_id = 0; + + for (const auto& desc : descs) { + const int idx = g[desc]; + if (idx < 0) { // no need anymore ... + continue; + } + + auto live = orig_clusters[idx]; + fresh_cluster.from(*live); + fresh_cluster.take_children(*live, true); + + if (fresh_cluster.ident() < 0) { + fresh_cluster.set_ident(live->ident()); + } + + if (savecc) { + cc.resize(fresh_cluster.nchildren(), parent_id); + ++parent_id; + } + + grouping.destroy_child(live); + assert(live == nullptr); + } + if (savecc) { + fresh_cluster.put_pcarray(cc, aname, pcname); + } + + // Normally, it would be weird/wrong to store an address of a reference. + // But, we know the Cluster facade is held by the pc tree node that we + // just added to the grouping node. + fresh.push_back(&fresh_cluster); + } + + return fresh; +} + + +geo_vector_t WireCell::Clus::Facade::calc_pca_dir(const geo_point_t& center, const std::vector& points) +{ + // Create covariance matrix + Eigen::MatrixXd cov_matrix(3, 3); + + // Calculate covariance matrix elements + for (int i = 0; i != 3; i++) { + for (int j = i; j != 3; j++) { + cov_matrix(i, j) = 0; + for (const auto& p : points) { + if (i == 0 && j == 0) { + cov_matrix(i, j) += (p.x() - center.x()) * (p.x() - center.x()); + } + else if (i == 0 && j == 1) { + cov_matrix(i, j) += (p.x() - center.x()) * (p.y() - center.y()); + } + else if (i == 0 && j == 2) { + cov_matrix(i, j) += (p.x() - center.x()) * (p.z() - center.z()); + } + else if (i == 1 && j == 1) { + cov_matrix(i, j) += (p.y() - center.y()) * (p.y() - center.y()); + } + else if (i == 1 && j == 2) { + cov_matrix(i, j) += (p.y() - center.y()) * (p.z() - center.z()); + } + else if (i == 2 && j == 2) { + cov_matrix(i, j) += (p.z() - center.z()) * (p.z() - center.z()); + } + } + } + } + + // std::cout << "Test: " << center << " " << points.at(0) << std::endl; + // std::cout << "Test: " << center << " " << points.at(1) << std::endl; + // std::cout << "Test: " << center << " " << points.at(2) << std::endl; + + // Fill symmetric parts + cov_matrix(1, 0) = cov_matrix(0, 1); + cov_matrix(2, 0) = cov_matrix(0, 2); + cov_matrix(2, 1) = cov_matrix(1, 2); + + // Calculate eigenvalues/eigenvectors using Eigen + Eigen::SelfAdjointEigenSolver eigenSolver(cov_matrix); + auto eigen_vectors = eigenSolver.eigenvectors(); + + // std::cout << "Test: " << eigen_vectors(0,0) << " " << eigen_vectors(1,0) << " " << eigen_vectors(2,0) << std::endl; + + // Get primary direction (first eigenvector) + double norm = sqrt(eigen_vectors(0, 2) * eigen_vectors(0, 2) + + eigen_vectors(1, 2) * eigen_vectors(1, 2) + + eigen_vectors(2, 2) * eigen_vectors(2, 2)); + + return geo_vector_t(eigen_vectors(0, 2) / norm, + eigen_vectors(1, 2) / norm, + eigen_vectors(2, 2) / norm); +} diff --git a/clus/src/ClusteringFuncsMixins.cxx b/clus/src/ClusteringFuncsMixins.cxx new file mode 100644 index 000000000..8af8652c7 --- /dev/null +++ b/clus/src/ClusteringFuncsMixins.cxx @@ -0,0 +1,42 @@ + +#include "WireCellClus/ClusteringFuncsMixins.h" +#include "WireCellUtil/NamedFactory.h" + +using namespace WireCell; +using namespace WireCell::Clus; + +void NeedDV:: configure(const WireCell::Configuration &cfg) +{ + auto tn = get(cfg, "detector_volumes", "DetectorVolumes"); + m_dv = Factory::find_tn(tn); +} +void NeedRecombModel:: configure(const WireCell::Configuration &cfg) +{ + auto tn = get(cfg, "recombination_model", "BoxRecombination"); + m_recomb_model = Factory::find_tn(tn); +} + +void NeedFiducial:: configure(const WireCell::Configuration &cfg) +{ + auto tn = get(cfg, "fiducial", "DetectorVolumes"); + m_fiducial = Factory::find_tn(tn); +} + +void NeedPCTS::configure(const WireCell::Configuration &cfg) +{ + auto tn = get(cfg, "pc_transforms", "PCTransformSet"); + m_pcts = Factory::find_tn(tn); +} + +NeedScope::NeedScope(const std::string &pcname, + const std::vector &coords, + size_t depth) + : m_scope{pcname, coords, depth} +{ +} + +void NeedScope::configure(const WireCell::Configuration &cfg) +{ + m_scope.pcname = get(cfg, "pc_name", m_scope.pcname); + m_scope.coords = get(cfg, "coords", m_scope.coords); +} diff --git a/clus/src/ClusteringRecoveringBundle.cxx b/clus/src/ClusteringRecoveringBundle.cxx new file mode 100644 index 000000000..8367e3f3b --- /dev/null +++ b/clus/src/ClusteringRecoveringBundle.cxx @@ -0,0 +1,104 @@ +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellIface/IConfigurable.h" +#include "WireCellUtil/NamedFactory.h" + + +class ClusteringRecoveringBundle; +WIRECELL_FACTORY(ClusteringRecoveringBundle, ClusteringRecoveringBundle, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; + +/** + * Clustering function that processes beam-flash flagged clusters and separates them + * into individual bundles based on isolated blob components. + * This function recovers separated clusters from over-clustered beam-flash events. + */ +class ClusteringRecoveringBundle : public IConfigurable, public Clus::IEnsembleVisitor { +public: + ClusteringRecoveringBundle() {} + virtual ~ClusteringRecoveringBundle() {} + + virtual void configure(const WireCell::Configuration& config) { + m_grouping_name = get(config, "grouping", "live"); + m_array_name = get(config, "array_name", "isolated"); + m_pcarray_name = get(config, "pcarray_name", "perblob"); + } + + virtual Configuration default_configuration() const { + Configuration cfg; + cfg["grouping"] = m_grouping_name; + cfg["array_name"] = m_array_name; + cfg["pcarray_name"] = m_pcarray_name; + return cfg; + } + + virtual void visit(Ensemble& ensemble) const { + using spdlog::debug; + + // Get the specified grouping (default: "live") + auto groupings = ensemble.with_name(m_grouping_name); + if (groupings.empty()) { + debug("ClusteringRecoveringBundle: No '{}' grouping found", m_grouping_name); + return; + } + + auto& grouping = *groupings.at(0); + + // Container to hold clusters after the initial filter + std::vector filtered_clusters; + + for (auto* cluster : grouping.children()) { + if (cluster->get_flag(Flags::beam_flash)){ + filtered_clusters.push_back(cluster); + } + } + + debug("ClusteringRecoveringBundle: Found {} beam-flash flagged clusters", + filtered_clusters.size()); + + // Process each filtered cluster + for (auto* cluster : filtered_clusters) { + process_cluster(grouping, cluster); + } + + debug("ClusteringRecoveringBundle: Processing complete"); + } + +private: + std::string m_grouping_name{"live"}; + std::string m_array_name{"isolated"}; + std::string m_pcarray_name{"perblob"}; + + void process_cluster(Grouping& grouping, Cluster* cluster) const { + + + // std::cout << grouping.children().size() << " clusters before separation." << std::endl; + + // Separate the clusters into separated pieces + auto cc = cluster->get_pcarray(m_array_name, m_pcarray_name); + // Convert span to vector + std::vector cc_vec(cc.begin(), cc.end()); + + // Skip if there are fewer than 2 components to separate + if (cc_vec.size() < 2) return; + + // Perform the separation + auto splits = grouping.separate(cluster, cc_vec); + cluster->set_flag(Flags::main_cluster); + + // Apply the scope filter settings to all new clusters + for (auto& [id, new_cluster] : splits) { + // Store the split/group ID as a scalar + // new_cluster->set_scalar("split_id", id); + // // Optionally also store the original parent's ident + // new_cluster->set_scalar("parent_ident", cluster->ident()); + new_cluster->set_flag(Flags::associated_cluster); + } + + // std::cout << grouping.children().size() << " clusters after separation." << std::endl; + } +}; \ No newline at end of file diff --git a/clus/src/ClusteringRetile.cxx b/clus/src/ClusteringRetile.cxx deleted file mode 100644 index 804a23475..000000000 --- a/clus/src/ClusteringRetile.cxx +++ /dev/null @@ -1,613 +0,0 @@ -// Developers: see important comments in header file. -// -// A "FIXME" really must be fixed if you expect anything to work. Lower case -// "fixme" are "normal" fixmes that everyone ignores. - - -#include "WireCellClus/ClusteringRetile.h" - -#include "WireCellAux/SimpleBlob.h" -#include "WireCellAux/SamplingHelpers.h" - -#include "WireCellUtil/NamedFactory.h" -#include "WireCellUtil/PointTree.h" - -#include "WireCellAux/SimpleSlice.h" -#include "WireCellClus/GroupingHelper.h" - - -using namespace WireCell; - -// Segregate this weird choice for namespace. -namespace WCC = WireCell::PointCloud::Facade; - -// Nick name for less typing. -namespace WRG = WireCell::RayGrid; - - -WCC::ClusteringRetile::ClusteringRetile(const WireCell::Configuration& cfg) -{ - auto sampler = get(cfg, "sampler",""); - if (sampler.empty()) { - raise("ClusteringRetile requires an IBlobSampler type/name in 'sampler' parameter"); - } - m_sampler = Factory::find_tn(sampler); - - auto anode_tn = get(cfg, "anode",""); - if (anode_tn.empty()) { - raise("ClusteringRetile requires an IAnodePlane type/name in 'anode' parameter"); - } - int face_index = get(cfg, "face", 0); - - auto anode = Factory::find_tn(anode_tn); - m_face = anode->faces()[face_index]; - if (!m_face) { - raise("ClusteringRetile got null IAnodeFace at index=%d from %s", face_index, anode_tn); - } - - const auto& coords = m_face->raygrid(); - if (coords.nlayers() != 5) { - raise("unexpected number of ray grid layers: %d", coords.nlayers()); - } - - // Add time cut configuration - m_cut_time_low = get(cfg, "cut_time_low", -1e9); - m_cut_time_high = get(cfg, "cut_time_high", 1e9); - - // Get wire info for each plane - m_plane_infos.clear(); - m_plane_infos.push_back(Aux::get_wire_plane_info(m_face, kUlayer)); - m_plane_infos.push_back(Aux::get_wire_plane_info(m_face, kVlayer)); - m_plane_infos.push_back(Aux::get_wire_plane_info(m_face, kWlayer)); -} - - -// Step 1. Build activities from blobs in a cluster. -void WCC::ClusteringRetile::get_activity(const Cluster& cluster, std::map, std::vector >& map_slices_measures) const -{ - const int nlayers = 2+3; - - // checkme: this assumes "iend" is the usual one-past-last aka [ibeg,iend) - // forms a half-open range. I'm not sure if PointTreeBuilding is following - // this or not. - - - - // for (auto& info : plane_infos) { - // std::cout << "test1: " << info.start_index << " " << info.end_index << " " << info.total_wires << std::endl; - // } - - - int (WCC::Blob::*wmin[])(void) const = { - &WCC::Blob::u_wire_index_min, - &WCC::Blob::v_wire_index_min, - &WCC::Blob::w_wire_index_min - }; - - int (WCC::Blob::*wmax[])(void) const = { - &WCC::Blob::u_wire_index_max, - &WCC::Blob::v_wire_index_max, - &WCC::Blob::w_wire_index_max - }; - - const double hit=1.0; // actual charge value does not matter to tiling. - - for (const auto* fblob : cluster.children()) { - int tslice_beg = fblob->slice_index_min(); - int tslice_end = fblob->slice_index_max(); - - auto& measures = map_slices_measures[std::make_pair(tslice_beg, tslice_end)]; - - if (measures.size()==0){ - measures.resize(nlayers); - // what to do the first two views??? - measures[0].push_back(1); - measures[1].push_back(1); - measures[2].resize(m_plane_infos[0].total_wires, 0); - measures[3].resize(m_plane_infos[1].total_wires, 0); - measures[4].resize(m_plane_infos[2].total_wires, 0); - // std::cout << measures[2].size() << " " << measures[3].size() << " " << measures[4].size() << std::endl; - } - - // the three views ... - for (int index=0; index<3; ++index) { - const int layer = index + 2; - WRG::measure_t& m = measures[layer]; - // Make each "wire" in each blob's bounds of this plane "hit". - int ibeg = (fblob->*wmin[index])(); - int iend = (fblob->*wmax[index])(); - while (ibeg < iend) { - m[ibeg++] = hit; - } - //std::cout << ibeg << " " << iend << " " << index << " " << hit << std::endl; - } - } - - // std::cout << "Test: Org: " << map_slices_measures.size() << " " << cluster.children().size() << std::endl; - -} - - -// Step 2. Modify activity to suit. -void WCC::ClusteringRetile::hack_activity(const Cluster& cluster, std::map, std::vector >& map_slices_measures) const -{ - const double low_dis_limit = 0.3 * units::cm; - // Get path points - auto path_wcps = cluster.get_path_wcps(); - std::vector path_pts; - - // Convert list points to vector with interpolation - for (const auto& wcp : path_wcps) { - geo_point_t p= cluster.point3d(wcp); - if (path_pts.empty()) { - path_pts.push_back(p); - } else { - double dis = (p - path_pts.back()).magnitude(); - if (dis < low_dis_limit) { - path_pts.push_back(p); - } else { - int ncount = int(dis/low_dis_limit) + 1; - for (int i=0; i < ncount; i++) { - Point p1; - p1 = path_pts.back() + (p - path_pts.back()) * (i+1)/ncount; - path_pts.push_back(p1); - } - } - } - } - - std::vector> wire_limits; - for (int i=0; i!=3; i++){ - wire_limits.push_back(std::make_pair(m_plane_infos[i].start_index, m_plane_infos[i].end_index)); - } - - // this is to get the end of the time tick range = start_tick + tick_span - int tick_span = map_slices_measures.begin()->first.second - map_slices_measures.begin()->first.first; - - // Flag points that have sufficient activity around them - std::vector path_pts_flag(path_pts.size(), false); - for (size_t i = 0; i < path_pts.size(); i++) { - auto [time_tick_u, u_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i], m_face->which(), 0); - auto [time_tick_v, v_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i], m_face->which(), 1); - auto [time_tick_w, w_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i], m_face->which(), 2); - //std::cout << time_tick_u << " " << u_wire << " " << v_wire << " " << w_wire << std::endl; - - int aligned_tick = std::round(time_tick_u *1.0/ tick_span) * tick_span; - std::pair tick_range = std::make_pair(aligned_tick, aligned_tick + tick_span); - - // Check for activity in neighboring wires/time - // For each plane (U,V,W), count activity in current and adjacent wires - std::vector wire_hits = {0,0,0}; // counts for U,V,W planes - std::vector wires = {u_wire, v_wire, w_wire}; - - for (size_t plane = 0; plane < 3; plane++) { - // Check activity in current and adjacent wires - for (int delta : {-1, 0, 1}) { - int wire = wires[plane] + delta; - if (wire < wire_limits[plane].first || wire > wire_limits[plane].second) - continue; - - int layer = plane + 2; - if (map_slices_measures.find(tick_range) != map_slices_measures.end()) { - if (map_slices_measures[tick_range][layer][wire] > 0) { - wire_hits[plane] += (delta == 0) ? 1 : (delta == -1) ? 2 : 1; - } - } - } - } - - // Set flag if sufficient activity found - if (wire_hits[0] > 0 && wire_hits[1] > 0 && wire_hits[2] > 0 && - (wire_hits[0] + wire_hits[1] + wire_hits[2] >= 6)) { - path_pts_flag[i] = true; - } - // std::cout << path_pts[i] << " " << wire_hits[0] << " " << wire_hits[1] << " " << wire_hits[2] << " " << path_pts_flag[i] << " " << aligned_tick/tick_span << " " << u_wire << " " << v_wire << " " << w_wire << " " << time_tick_u << " " << std::round(time_tick_u / tick_span) << std::endl; - // std::cout << wire_hits[0] << " " << wire_hits[1] << " " << wire_hits[2] << " " << path_pts_flag[i] << std::endl; - } - - // Add missing activity based on path points - for (size_t i = 0; i < path_pts.size(); i++) { - // Skip if point is well-covered by existing activity - if (i == 0) { - if (path_pts_flag[i] && path_pts_flag[i+1]) continue; - } else if (i+1 == path_pts.size()) { - if (path_pts_flag[i] && path_pts_flag[i-1]) continue; - } else { - if (path_pts_flag[i-1] && path_pts_flag[i] && path_pts_flag[i+1]) continue; - } - - auto [time_tick_u, u_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i], m_face->which(), 0); - auto [time_tick_v, v_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i], m_face->which(), 1); - auto [time_tick_w, w_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i], m_face->which(), 2); - - int aligned_tick = std::round(time_tick_u *1.0/ tick_span) * tick_span; - - // Add activity around this point - for (int dt = -3; dt <= 3; dt++) { - int time_slice = aligned_tick + dt * tick_span; - if (time_slice < 0) continue; - - // Find or create time slice in measures map - auto slice_key = std::make_pair(time_slice, time_slice+tick_span); - if (map_slices_measures.find(slice_key) == map_slices_measures.end()) { - auto& measures = map_slices_measures[slice_key]; - measures = std::vector(5); // 2+3 layers - measures[0].push_back(1); // First layer measurement - measures[1].push_back(1); // Second layer measurement - measures[2].resize(m_plane_infos[0].total_wires, 0); - measures[3].resize(m_plane_infos[1].total_wires, 0); - measures[4].resize(m_plane_infos[2].total_wires, 0); - } - - // Add activity for each plane - std::vector wires = {u_wire, v_wire, w_wire}; - for (size_t plane = 0; plane < 3; plane++) { - auto& measures = map_slices_measures[slice_key][plane+2]; // +2 to skip first two layers - - for (int dw = -3; dw <= 3; dw++) { - int wire = wires[plane] + dw; - if (wire < wire_limits[plane].first || wire > wire_limits[plane].second || - std::abs(dw) + std::abs(dt) > 3) - continue; - measures[wire] = 1.0; // Set activity - } - } - } - } - - - // std::cout << "Test: Alt: " << map_slices_measures.size() << " " << cluster.children().size() << std::endl; - - // for (auto it = map_slices_measures.begin(); it!= map_slices_measures.end(); it++){ - // std::cout << it->first.first << " " << it->first.second << " " << it->second.size() << std::endl; - // // for (int i=0; i!=5; i++){ - // // std::cout << it->second[i].size() << " "; - // // } - // // std::cout << std::endl; - // } - -} - - - -// Step 3. Form IBlobs from activities. -std::vector WCC::ClusteringRetile::make_iblobs(std::map, std::vector >& map_slices_measures) const -{ - std::vector ret; - - const auto& coords = m_face->raygrid(); - int blob_ident=0; - int slice_ident = 0; - for (auto it = map_slices_measures.begin(); it != map_slices_measures.end(); it++){ - // Do the actual tiling. - WRG::activities_t activities = RayGrid::make_activities(m_face->raygrid(), it->second); - auto bshapes = WRG::make_blobs(coords, activities); - - //std::cout << "abc: " << bshapes.size() << " " << activities.size() << " " << std::endl; - // for (const auto& activity : activities) { - // std::cout << activity.as_string() << std::endl; - // } - - // Convert RayGrid blob shapes into IBlobs - const float blob_value = 0.0; // tiling doesn't consider particular charge - const float blob_error = 0.0; // tiling doesn't consider particular charge - - for (const auto& bshape : bshapes) { - IFrame::pointer sframe = nullptr; - - // 500 ns should be passed from outside? - ISlice::pointer slice = std::make_shared(sframe, slice_ident++, it->first.first*500*units::ns, (it->first.second - it->first.first)*500*units::ns); - - // ISlice::pointer slice = nullptr; // fixme: okay? - IBlob::pointer iblob = std::make_shared(blob_ident++, blob_value, - blob_error, bshape, slice, m_face); - // std::cout << "Test: " << iblob << std::endl; - // FIXME: (maybe okay?) GridTiling produces an IBlobSet here which holds - // ISlice info. Are we losing anything important not including that - // info? - ret.push_back(iblob); - } - } - - // std::cout << "Test: Blobs: " << ret.size() << std::endl; - - return ret; -} - -std::set -WireCell::PointCloud::Facade::ClusteringRetile::remove_bad_blobs(const Cluster& cluster, Cluster& shad_cluster, int tick_span) const -{ - // Implementation here - // Get time-organized map of original blobs - const auto& orig_time_blob_map = cluster.time_blob_map(); - - // Get time-organized map of newly created blobs - const auto& new_time_blob_map = shad_cluster.time_blob_map(); - - // Track blobs that need to be removed - std::set blobs_to_remove; - - // Examine each new blob - for (const auto& [time_slice, new_blobs] : new_time_blob_map) { - // std::cout << time_slice << " " << new_blobs.size() << std::endl; - - for (const Blob* new_blob : new_blobs) { - bool flag_good = false; - - // Check overlap with blobs in previous time slice - if (orig_time_blob_map.find(time_slice - tick_span) != orig_time_blob_map.end()) { - for (const Blob* orig_blob : orig_time_blob_map.at(time_slice - tick_span)) { - if (new_blob->overlap_fast(*orig_blob, 1)) { - flag_good = true; - break; - } - } - } - - // Check overlap with blobs in same time slice - if (!flag_good && orig_time_blob_map.find(time_slice) != orig_time_blob_map.end()) { - for (const Blob* orig_blob : orig_time_blob_map.at(time_slice)) { - if (new_blob->overlap_fast(*orig_blob, 1)) { - flag_good = true; - break; - } - } - } - - // Check overlap with blobs in next time slice - if (!flag_good && orig_time_blob_map.find(time_slice + tick_span) != orig_time_blob_map.end()) { - for (const Blob* orig_blob : orig_time_blob_map.at(time_slice + tick_span)) { - if (new_blob->overlap_fast(*orig_blob, 1)) { - flag_good = true; - break; - } - } - } - - // If no overlap found with original blobs in nearby time slices, mark for removal - if (!flag_good) { - blobs_to_remove.insert(new_blob); - } - } - } - - // Remove the bad blobs - return blobs_to_remove; - - -} - - - -void WCC::ClusteringRetile::operator()(WCC::Grouping& original, WCC::Grouping& shadow, cluster_set_t&) const -{ - // FIXME: With #377 fixed, we would make the shadow grouping here from - // scratch and add it to the input map by name, eg "shadow". Instead, we - // smash whatever is in the 2nd grouping to fill with "shadow" clusters. In - // other cluster functions this second grouping is interpreted as holding - // "dead" clusters. - - - // reset the shadown clusters' content ... - // std::cout << shadow.children().size() << std::endl; - shadow.local_pcs().clear(); - for (auto* fcluster : shadow.children()) { - shadow.remove_child(*fcluster); - } - shadow.clear_cache(); - // std::cout << shadow.children().size() << std::endl; - - - const auto [angle_u,angle_v,angle_w] = original.wire_angles(); - - for (auto* orig_cluster : original.children()) { - - // find the flash time: - auto flash = orig_cluster->get_flash(); - // int nblobs = - // orig_cluster->kd_blobs().size(); - - // Apply time cut - if (flash) { - double flash_time = flash.time(); - if (flash_time >= m_cut_time_low && flash_time <= m_cut_time_high) { - // std::cout << "Tests: " << nblobs << " at time " << flash_time/units::us << " " << m_cut_time_low/units::us << " " << m_cut_time_high/units::us << "\n"; - - // get the span of indices - auto cc = orig_cluster->get_pcarray("isolated", "perblob"); - // convert span to vector - std::vector cc_vec(cc.begin(), cc.end()); - // for (const auto& val : cc_vec) { - // std::cout << val << " "; - // } - // std::cout << std::endl; - - // use the vector for separate() - // origi_cluster still have the original main cluster ... - auto splits = original.separate(orig_cluster, cc_vec); - - std::map map_id_cluster = splits; - map_id_cluster[-1] = orig_cluster; - - Cluster *shadow_orig_cluster; - std::map shadow_splits; - - for (auto& [id, cluster] : map_id_cluster) { - - // make a shadow cluster, insert ID ... - auto& shad_cluster = shadow.make_child(); - shad_cluster.set_ident(cluster->ident()); - - // std::cout <<"bcd: " << cluster->ident() << " " << shad_cluster.ident() << std::endl; - - if (id==-1) shadow_orig_cluster = &shad_cluster; - else shadow_splits[id] = &shad_cluster; - - // find the highest and lowest points - std::pair pair_points = cluster->get_highest_lowest_points(); - //std::cout << pair_points.first << " " << pair_points.second << std::endl; - int high_idx = cluster->get_closest_point_index(pair_points.first); - int low_idx = cluster->get_closest_point_index(pair_points.second); - cluster->dijkstra_shortest_paths(high_idx, false); - cluster->cal_shortest_path(low_idx); - - // Step 1. - std::map, std::vector > map_slices_measures; - get_activity(*cluster, map_slices_measures); - - // Step 2. - hack_activity(*cluster, map_slices_measures); // may need more args - - // Step 3. Must make IBlobs for this is what the sampler takes. - auto shad_iblobs = make_iblobs(map_slices_measures); // may need more args - - // Steps 4-6. - auto niblobs = shad_iblobs.size(); - // Forgive me (and small-f fixme), but this is now the 3rd generation of - // copy-paste. Gen 2 is in UbooneClusterSource. OG is in - // PointTreeBuilding. The reason for the copy-pastes is insufficient - // factoring of the de-factor standard sampling code in PointTreeBuilding. - // Over time, it is almost guaranteed these copy-pastes become out-of-sync. - for (size_t bind=0; bindsample_blob(iblob, bind); - - // how to sample points ... - // std::cout << pc3d.size() << " " << aux.size() << " " << pc3d.get("x")->size_major() << " " << pc3d.get("y")->size_major() << " " << pc3d.get("z")->size_major() << std::endl; - // const auto& arr_x1 = pc3d.get("x")->elements(); - - pcs.emplace("3d", pc3d); - /// These seem unused and bring in yet more copy-paste code - pcs.emplace("2dp0", WireCell::Aux::make2dds(pc3d, angle_u)); - pcs.emplace("2dp1", WireCell::Aux::make2dds(pc3d, angle_v)); - pcs.emplace("2dp2", WireCell::Aux::make2dds(pc3d, angle_w)); - // std::cout << pcs["3d"].get("x")->size_major() << " " << pcs["3d"].get("y")->size_major() << " " << pcs["3d"].get("z")->size_major() << std::endl; - // const auto& arr_x = pcs["3d"].get("x")->elements(); - // std::cout << arr_x.size() << " " << arr_x1.size() << std::endl; - // std::cout << iblob->shape() << std::endl; - if (pc3d.get("x")->size_major() > 0){ - const Point center = WireCell::Aux::calc_blob_center(pcs["3d"]); - auto scalar_ds = WireCell::Aux::make_scalar_dataset(iblob, center, pcs["3d"].get("x")->size_major(), 500*units::ns); - int max_wire_interval = aux.get("max_wire_interval")->elements()[0]; - int min_wire_interval = aux.get("min_wire_interval")->elements()[0]; - int max_wire_type = aux.get("max_wire_type")->elements()[0]; - int min_wire_type = aux.get("min_wire_type")->elements()[0]; - scalar_ds.add("max_wire_interval", Array({(int)max_wire_interval})); - scalar_ds.add("min_wire_interval", Array({(int)min_wire_interval})); - scalar_ds.add("max_wire_type", Array({(int)max_wire_type})); - scalar_ds.add("min_wire_type", Array({(int)min_wire_type})); - pcs.emplace("scalar", std::move(scalar_ds)); - - shad_cluster.node()->insert(Tree::Points(std::move(pcs))); - }else{ - SPDLOG_WARN("blob {} has no points", iblob->ident()); - } - } - - int tick_span = map_slices_measures.begin()->first.second - map_slices_measures.begin()->first.first; - - // std::cout << shad_cluster.npoints() << " " << shad_cluster.nbpoints() << " " << shad_cluster.nchildren() << std::endl; - // remove blobs after creating facade_blobs ... - auto blobs_to_remove = remove_bad_blobs(*cluster, shad_cluster, tick_span); - for (const Blob* blob : blobs_to_remove) { - Blob& b = const_cast(*blob); - - shad_cluster.remove_child(b); - } - shad_cluster.clear_cache(); - - // // Reset cached data that depends on cluster contents - // shad_cluster.reset_pca(); // Reset PCA calculations - // // Force rebuild of time blob map by accessing it - // shad_cluster.time_blob_map(); - // shad_cluster.point3d(0); // This will trigger PC tree rebuild - // std::cout << shad_cluster.npoints() << " " << shad_cluster.nbpoints() << " " << shad_cluster.nchildren() << std::endl; - - // How to call overlap_fast ??? - // for (auto* fblob : shad_cluster.children()) { - // shad_cluster.remove_child(*fblob); - // } - - // std::cout << "Test: remove: " << m_sampler << " " << cluster->kd_blobs().size() << " " << shad_cluster.kd_blobs().size() << " " << niblobs << std::endl; - - // Example code to access shadown cluster information ... - // // shad cluster getting highest and lowest points and then do shortest path ... - // // find the highest and lowest points - // std::pair shad_pair_points = shad_cluster.get_highest_lowest_points(); - // //std::cout << pair_points.first << " " << pair_points.second << std::endl; - // int shad_high_idx = shad_cluster.get_closest_point_index(shad_pair_points.first); - // int shad_low_idx = shad_cluster.get_closest_point_index(shad_pair_points.second); - // shad_cluster.dijkstra_shortest_paths(shad_high_idx, false); - // shad_cluster.cal_shortest_path(shad_low_idx); - // { - // auto path_wcps = shad_cluster.get_path_wcps(); - // // Convert list points to vector with interpolation - // for (const auto& wcp : path_wcps) { - // geo_point_t p= shad_cluster.point3d(wcp); - // std::cout << p << std::endl; - // } - // } - - } - - // // FIXME: These two methods need to be added to the Cluster Facade. - // // They should set/get "cluster_id" from the "cluster_scalar" PC. - // // FIXME: above we add cluster_id to the "cluster_scalar" PC. Do we - // // want to also try to copy over the "light" index entry? - - auto cc2 = original.merge(splits,orig_cluster); - // for (const auto& val : cc2) { - // std::cout << val << " "; - // } - // std::cout << std::endl; - orig_cluster->put_pcarray(cc2, "isolated", "perblob"); - - auto cc3 = shadow.merge(shadow_splits,shadow_orig_cluster); - // for (const auto& val : cc3) { - // std::cout << val << " "; - // } - // std::cout << std::endl; - shadow_orig_cluster->put_pcarray(cc3, "isolated", "perblob"); - - - - } - // FIXME: do we need/want to copy over any PCs in the grouping? - // Specifically optical light/flash/flashlight PCs? - - } - } - - - // // Process groupings after all shadow clusters are created - // auto cluster_mapping = process_groupings(original, shadow); - // for (const auto& [orig_cluster, tuple] : cluster_mapping) { - // std::cout << orig_cluster << " " << std::get<0>(tuple) << " " << std::get<1>(tuple) << " " << std::get<2>(tuple) << std::endl; - // // auto* shad_cluster = pair.first; - // // auto* main_cluster = pair.second; - - // // // You can now use these mapped clusters for further processing - // // // For example, transfer any necessary properties or perform additional operations - // } - - - -} - -std::map> -WCC::ClusteringRetile::process_groupings( - WCC::Grouping& original, - WCC::Grouping& shadow, - const std::string& aname, - const std::string& pname) const -{ - return process_groupings_helper(original, shadow, aname, pname); -} diff --git a/clus/src/ClusteringTaggerFlagTransfer.cxx b/clus/src/ClusteringTaggerFlagTransfer.cxx new file mode 100644 index 000000000..688185aa0 --- /dev/null +++ b/clus/src/ClusteringTaggerFlagTransfer.cxx @@ -0,0 +1,188 @@ +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellIface/IConfigurable.h" +#include "WireCellUtil/NamedFactory.h" + +class ClusteringTaggerFlagTransfer; +WIRECELL_FACTORY(ClusteringTaggerFlagTransfer, ClusteringTaggerFlagTransfer, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; + +/** + * Lightweight visitor that transfers tagger information from point clouds to cluster flags + * This should run FIRST in the clustering pipeline, right after clusters are created + */ +class ClusteringTaggerFlagTransfer : public IConfigurable, public Clus::IEnsembleVisitor { +public: + ClusteringTaggerFlagTransfer() {} + virtual ~ClusteringTaggerFlagTransfer() {} + + virtual void configure(const WireCell::Configuration& config) { + // No configuration needed - this is a pure transfer operation + } + + virtual Configuration default_configuration() const { + return Configuration{}; + } + + virtual void visit(Ensemble& ensemble) const { + using spdlog::debug; + + // Process all groupings (live, dead, etc.) + for (auto* grouping : ensemble.children()) { + auto clusters = grouping->children(); + + for (auto* cluster : clusters) { + transfer_tagger_flags(*cluster); + } + + debug("ClusteringTaggerFlagTransfer: Processed {} clusters in grouping '{}'", + clusters.size(), grouping->get_name()); + } + } + +private: + void transfer_tagger_flags(Cluster& cluster) const { + const auto& lpc = cluster.value().local_pcs(); + auto it = lpc.find("tagger_info"); + if (it == lpc.end()) { + return; // No tagger info available + } + + const auto& tagger_pc = it->second; + + // Debug: List all available keys in tagger_info + // std::cout << "Xin: Cluster " << cluster.ident() << " tagger_info keys: "; + // for (const auto& key : tagger_pc.keys()) { + // std::cout << key << " "; + // } + // std::cout << std::endl; + + // Helper lambda to check if a flag should be set + auto should_set_flag = [&](const std::string& flag_name) -> bool { + auto arr = tagger_pc.get(flag_name); + // std::cout << "Xin: " << cluster.ident() << " checking " << flag_name << " - arr ptr: " << arr; + if (arr) { + // std::cout << ", size: " << arr->size_major(); + if (arr->size_major() > 0) { + int value = arr->element(0); + // std::cout << ", value: " << value; + // std::cout << std::endl; + return value > 0; + } + } + // std::cout << " - NO DATA" << std::endl; + return false; + }; + + + + // Set flags based on stored tagger information + if (should_set_flag("has_beam_flash")) { + cluster.set_flag(Flags::beam_flash); + + // Try individual flags first, fall back to array method + // Only set other flags if beam flash is true (following prototype logic) + if (should_set_flag("has_tgm") ) { + cluster.set_flag(Flags::tgm); + } + if (should_set_flag("has_low_energy") ) { + cluster.set_flag(Flags::low_energy); + } + if (should_set_flag("has_light_mismatch") ) { + cluster.set_flag(Flags::light_mismatch); + } + if (should_set_flag("has_fully_contained") ) { + cluster.set_flag(Flags::fully_contained); + } + if (should_set_flag("has_short_track_muon") ) { + cluster.set_flag(Flags::short_track_muon); + } + if (should_set_flag("has_full_detector_dead") ) { + cluster.set_flag(Flags::full_detector_dead); + } + } + // If not beam flash coincident, no additional flags are set + // if (cluster.get_flag(Flags::beam_flash)) + // std::cout << "Xin: " << cluster.ident() << " has beam_flash: " + // << cluster.get_flag(Flags::beam_flash) << " " << should_set_flag("has_beam_flash") << ", tgm: " + // << cluster.get_flag(Flags::tgm) << " " << should_set_flag("has_tgm") << ", low_energy: " + // << cluster.get_flag(Flags::low_energy) << " " << should_set_flag("has_low_energy") << ", light_mismatch: " + // << cluster.get_flag(Flags::light_mismatch) << " " << should_set_flag("has_light_mismatch") << ", fully_contained: " + // << cluster.get_flag(Flags::fully_contained) << " " << should_set_flag("has_fully_contained") << ", short_track_muon: " + // << cluster.get_flag(Flags::short_track_muon) << " " << should_set_flag("has_short_track_muon") << ", full_detector_dead: " + // << cluster.get_flag(Flags::full_detector_dead) << " " << should_set_flag("has_full_detector_dead") << std::endl; + } +}; + +// Now in ClusteringFuncs.h, we can have simple flag checking utilities: + +namespace WireCell::Clus::Facade { + namespace TaggerUtils { + + // Simple flag checking using get_flag() accessor + inline bool is_beam_flash(const Cluster& cluster) { + return cluster.get_flag(Flags::beam_flash); + } + + inline bool is_tgm(const Cluster& cluster) { + return cluster.get_flag(Flags::tgm); + } + + inline bool is_low_energy(const Cluster& cluster) { + return cluster.get_flag(Flags::low_energy); + } + + inline bool is_light_mismatch(const Cluster& cluster) { + return cluster.get_flag(Flags::light_mismatch); + } + + inline bool is_fully_contained(const Cluster& cluster) { + return cluster.get_flag(Flags::fully_contained); + } + + inline bool is_short_track_muon(const Cluster& cluster) { + return cluster.get_flag(Flags::short_track_muon); + } + + inline bool is_full_detector_dead(const Cluster& cluster) { + return cluster.get_flag(Flags::full_detector_dead); + } + + /** + * Get event type from tagger metadata (still need point cloud for this) + */ + inline int get_event_type(const Cluster& cluster) { + const auto& lpc = cluster.value().local_pcs(); + auto it = lpc.find("tagger_info"); + if (it == lpc.end()) return -1; + + auto arr = it->second.get("event_type"); + return (arr && arr->size_major() > 0) ? arr->element(0) : -1; + } + + /** + * Get cluster length from tagger metadata + */ + inline double get_cluster_length(const Cluster& cluster) { + const auto& lpc = cluster.value().local_pcs(); + auto it = lpc.find("tagger_info"); + if (it == lpc.end()) return -1.0; + + auto arr = it->second.get("cluster_length"); + return (arr && arr->size_major() > 0) ? arr->element(0) : -1.0; + } + + /** + * Check if cluster has any tagger flags set + */ + inline bool has_any_tagger_flags(const Cluster& cluster) { + return is_beam_flash(cluster) || is_tgm(cluster) || is_low_energy(cluster) || + is_light_mismatch(cluster) || is_fully_contained(cluster) || + is_short_track_muon(cluster) || is_full_detector_dead(cluster); + } + } +} \ No newline at end of file diff --git a/clus/src/Clustering_Util.cxx b/clus/src/Clustering_Util.cxx new file mode 100644 index 000000000..63207956f --- /dev/null +++ b/clus/src/Clustering_Util.cxx @@ -0,0 +1,52 @@ +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/Facade.h" +#include "WireCellClus/Facade_Cluster.h" + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; + +namespace WireCell::Clus::Facade { + +// Function to compute wire plane parameters +void compute_wireplane_params( + const std::set& wpids, + const IDetectorVolumes::pointer dv, + std::map>& wpid_params, + std::map>& wpid_U_dir, + std::map>& wpid_V_dir, + std::map>& wpid_W_dir, + std::set& apas) +{ + for (const auto& wpid : wpids) { + int apa = wpid.apa(); + int face = wpid.face(); + apas.insert(apa); + + // Create wpids for all three planes with this APA and face + WirePlaneId wpid_u(kUlayer, face, apa); + WirePlaneId wpid_v(kVlayer, face, apa); + WirePlaneId wpid_w(kWlayer, face, apa); + + // Get drift direction based on face orientation + int face_dirx = dv->face_dirx(wpid_u); + geo_point_t drift_dir(face_dirx, 0, 0); + + // Get wire directions for all planes + Vector wire_dir_u = dv->wire_direction(wpid_u); + Vector wire_dir_v = dv->wire_direction(wpid_v); + Vector wire_dir_w = dv->wire_direction(wpid_w); + + // Calculate angles + double angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + double angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + double angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + wpid_params[wpid] = std::make_tuple(drift_dir, angle_u, angle_v, angle_w); + wpid_U_dir[wpid] = std::make_pair(geo_point_t(0, std::cos(angle_u), std::sin(angle_u)), angle_u); + wpid_V_dir[wpid] = std::make_pair(geo_point_t(0, std::cos(angle_v), std::sin(angle_v)), angle_v); + wpid_W_dir[wpid] = std::make_pair(geo_point_t(0, std::cos(angle_w), std::sin(angle_w)), angle_w); + } +} + +} // namespace WireCell::Clus::Facade \ No newline at end of file diff --git a/clus/src/CreateSteinerGraph.cxx b/clus/src/CreateSteinerGraph.cxx new file mode 100644 index 000000000..a1d8c3ac2 --- /dev/null +++ b/clus/src/CreateSteinerGraph.cxx @@ -0,0 +1,240 @@ +#include "CreateSteinerGraph.h" +#include "SteinerGrapher.h" + +#include "WireCellClus/Graphs.h" +#include "WireCellUtil/PointTree.h" +#include "WireCellUtil/NamedFactory.h" +#include "WireCellClus/ClusteringFuncs.h" + + +WIRECELL_FACTORY(CreateSteinerGraph, WireCell::Clus::Steiner::CreateSteinerGraph, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) + + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; + +Steiner::CreateSteinerGraph::CreateSteinerGraph() + : Aux::Logger("CreateSteinerGraph", "clus") +{ +} +Steiner::CreateSteinerGraph::~CreateSteinerGraph() +{ +} + + +void Steiner::CreateSteinerGraph::configure(const WireCell::Configuration& cfg) +{ + m_grouping_name = get(cfg, "grouping", m_grouping_name); + m_graph_name = get(cfg, "graph", m_graph_name); + m_replace = get(cfg, "replace", m_replace); + + NeedDV::configure(cfg); + NeedPCTS::configure(cfg); + + m_grapher_config.dv = m_dv; + m_grapher_config.pcts = m_pcts; + const std::string retiler_tn = get(cfg, "retiler", "RetileCluster"); + m_grapher_config.retile = Factory::find_tn(retiler_tn); +} + +Configuration Steiner::CreateSteinerGraph::default_configuration() const +{ + Configuration cfg; + // Build the Steiner graph for clusters in this grouping. + cfg["grouping"] = m_grouping_name; + // Name of the resulting graph on the cluster + cfg["graph"] = m_graph_name; + // If true, replace any pre-existing graph with that name, else do + // nothing if one already exists. + cfg["replace"] = m_replace; + + return cfg; +} + + +void Steiner::CreateSteinerGraph::visit(Ensemble& ensemble) const +{ + auto& grouping = *ensemble.with_name(m_grouping_name).at(0); + + // Container to hold clusters after the initial filter + std::vector filtered_clusters; + + Cluster* main_cluster = nullptr; + + for (auto* cluster : grouping.children()) { + // check scope + auto& default_scope = cluster->get_default_scope(); + auto& raw_scope = cluster->get_raw_scope(); + // if scope is not raw, apply filter ... + if (default_scope.hash()!=raw_scope.hash() && (!cluster->get_scope_filter(default_scope)) ) continue; + + if (cluster->get_flag(Flags::beam_flash)){ + filtered_clusters.push_back(cluster); + if (cluster->get_flag(Flags::main_cluster)) { + main_cluster = cluster; + } + } + } + + std::cout << "CreateSteinerGraph: " << filtered_clusters.size() << " clusters with beam_flash flag." << " " << main_cluster->ident() << std::endl; + + if (main_cluster != nullptr){ + // // test steiner ... + // { + // Steiner::Grapher sg(*main_cluster, m_grapher_config, log); + // auto& graph = sg.get_graph("basic_pid"); // ensure basic_pid graph exists + // std::cout << "CreateSteinerGraph: " << boost::num_vertices(graph) << " vertices " << boost::num_edges(graph) << " edges." << std::endl; + // std::vector path_point_indices ; + // sg.create_steiner_tree(main_cluster, path_point_indices, "basic_pid", "steiner_graph", false, "steiner_pc"); + // const auto& steiner_point_cloud = sg.get_point_cloud("steiner_pc"); + // const auto& steiner_graph = sg.get_graph("steiner_graph"); + // auto& flag_terminals = sg.get_flag_steiner_terminal(); + // size_t num_true_terminals = std::count(flag_terminals.begin(), flag_terminals.end(), true); + + // std::cout << "CreateSteinerGraph: " << "steiner_graph with " + // << boost::num_vertices(steiner_graph) << " vertices and " + // << boost::num_edges(steiner_graph) << " edges." << " " << flag_terminals.size() << " " << num_true_terminals << std::endl; + // } + + + if (m_grapher_config.retile ) { + // Call the mutate function with the appropriate configuration, create new cluster + auto new_node = m_grapher_config.retile->mutate(*main_cluster->node()); + auto new_cluster_1 = new_node->value.facade(); + auto& new_cluster = grouping.make_child(); + new_cluster.take_children(*new_cluster_1); // Move all blobs from improved cluster + new_cluster.from(*main_cluster); + + // create the new graph + new_cluster.find_graph("ctpc_ref_pid", *main_cluster, m_dv, m_pcts); + + + Steiner::Grapher sg(new_cluster, m_grapher_config, log); + auto& graph = sg.get_graph("ctpc_ref_pid"); + std::cout << "CreateSteinerGraph: " << "ctpc_ref_pid with " + << boost::num_vertices(graph) << " vertices and " + << boost::num_edges(graph) << " edges." << std::endl; + + sg.establish_same_blob_steiner_edges("ctpc_ref_pid", false); + std::cout << "CreateSteinerGraph: " << "ctpc_ref_pid with " + << boost::num_vertices(graph) << " vertices and " + << boost::num_edges(graph) << " edges." << std::endl; + auto pair_points = new_cluster.get_two_boundary_wcps(); + std::cout << "CreateSteinerGraph: " << pair_points.first.x() << " " + << pair_points.first.y() << " " + << pair_points.first.z() << " | " + << pair_points.second.x() << " " + << pair_points.second.y() << " " + << pair_points.second.z() << std::endl; + auto first_index = new_cluster.get_closest_point_index(pair_points.first); + auto second_index = new_cluster.get_closest_point_index(pair_points.second); + std::vector path_point_indices = new_cluster.graph_algorithms("ctpc_ref_pid").shortest_path(first_index, second_index); + std::cout << "CreateSteinerGraph: " << first_index << " " << second_index << " # of points along path: " << path_point_indices.size() << std::endl; + + sg.remove_same_blob_steiner_edges("ctpc_ref_pid"); + std::cout << "CreateSteinerGraph: " << "ctpc_ref_pid with " + << boost::num_vertices(graph) << " vertices and " + << boost::num_edges(graph) << " edges." << std::endl; + + // path_point_indices belong to new_cluster, on which sg is based + // main_cluster is a reference to filter points ... + sg.create_steiner_tree(main_cluster, path_point_indices, "ctpc_ref_pid", "steiner_graph", false, "steiner_pc"); + const auto& steiner_point_cloud = sg.get_point_cloud("steiner_pc"); + const auto& steiner_graph = sg.get_graph("steiner_graph"); + auto& flag_terminals = sg.get_flag_steiner_terminal(); + size_t num_true_terminals = std::count(flag_terminals.begin(), flag_terminals.end(), true); + + std::cout << "CreateSteinerGraph: " << "steiner_graph with " + << boost::num_vertices(steiner_graph) << " vertices and " + << boost::num_edges(steiner_graph) << " edges." << " " << steiner_point_cloud.size() << " " << flag_terminals.size() << " " << num_true_terminals << std::endl; + + // pass the new_cluster's steiner_graph and stener_pc to the main cluster + Steiner::Grapher main_sg(*main_cluster, m_grapher_config, log); + main_sg.transfer_pc(sg, "steiner_pc", "steiner_pc"); + main_sg.transfer_graph(sg, "steiner_graph", "steiner_graph"); + + // test ... + auto pair_idx = main_cluster->get_two_boundary_steiner_graph_idx("steiner_graph", "steiner_pc", false); + std::cout << "Xin3: " << pair_idx.first << " " << pair_idx.second << " " << pair_points.first << std::endl; + auto kd_results = main_cluster->kd_steiner_knn(1, pair_points.first); + auto kd_points = main_cluster->kd_steiner_points(kd_results); + std::cout << "Xin4: " << kd_points.size() << " " << (*kd_points.begin()).first << " " << (*kd_points.begin()).second.first << " " << (*kd_points.begin()).second.second << std::endl; + + // delete new cluster from grouping after usage ... + auto* new_cluster_ptr = &new_cluster; + grouping.destroy_child(new_cluster_ptr, true); + } + } + + // for (auto* cluster : filtered_clusters) { + // bool already = cluster->has_graph(m_graph_name); + // if (already || m_replace) { + + // const auto& new_to_old = sg.get_new_to_old_mapping(); + // std::cout << "Xin2: " << cell_points_map.size() << " Graph vertices: " << boost::num_vertices(steiner_graph) << ", edges: " << boost::num_edges(steiner_graph) << " " << steiner_point_cloud.size_major() << " " << num_true_terminals << std::endl; + + + // // auto edge_weight_map = get(boost::edge_weight, steiner_graph); + // // for (auto edge_it = boost::edges(steiner_graph); edge_it.first != edge_it.second; ++edge_it.first) { + // // auto edge = *edge_it.first; + // // auto src = boost::source(edge, steiner_graph); + // // auto tgt = boost::target(edge, steiner_graph); + // // // Use the new-to-old mapping to get original vertex IDs + // // auto src_id = new_to_old.at(src); + // // auto tgt_id = new_to_old.at(tgt); + + // // // Get the edge weight using the proper accessor + // // auto weight = edge_weight_map[edge]; + + // // std::cout << "Edge from vertex " << cluster->point3d(src_id) << " to " << cluster->point3d(tgt_id) << " with weight " << weight << " " << flag_terminals[src] << " " << flag_terminals[tgt] << std::endl; + // // } + + // // for (const auto& [cell, points] : cell_points_map) { + // // // std::cout << "Xin2 Cell: " << cell->slice_index_min() << " " << cell->u_wire_index_min() << " " << cell->v_wire_index_min() << " " << cell->w_wire_index_min() << " has " << points.size() << " points." << std::endl; + // // // for (const auto& point : points) { + // // // auto info = cluster->calc_charge_wcp(point); + // // // std::cout << "Xin2 Point: " << point << " " << info.first << " " << info.second << std::endl; + // // // } + // // std::vector single_blob = {cell}; + // // auto blob_peaks = sg.find_peak_point_indices(single_blob, "basic_pid", true); + // // std::cout << "Xin2: " << cell->slice_index_min() << " " << cell->u_wire_index_min() << " " << cell->v_wire_index_min() << " " << cell->w_wire_index_min() << " " << points.size() << " " << blob_peaks.size() <get_extreme_wcps(); + // // std::cout << "Xin4: " << extrem_points.size() << std::endl; + // // for (const auto& pts : extrem_points) { + // // for (const auto& pt : pts) { + // // std::cout << "Extreme point: (" + // // << pt.x() << ", " + // // << pt.y() << ", " + // // << pt.z() << ")" << std::endl; + // // } + // // } + + // // auto extreme_boundary_points = cluster->get_two_boundary_wcps(1); + // // std::cout << "Xin3: " << extreme_boundary_points.first.x() << " " + // // << extreme_boundary_points.first.y() << " " + // // << extreme_boundary_points.first.z() << " | " + // // << extreme_boundary_points.second.x() << " " + // // << extreme_boundary_points.second.y() << " " + // // << extreme_boundary_points.second.z() << std::endl; + + // // for (const auto& [cell, points] : cell_points_map) { + // // auto total_charge = cell->estimate_total_charge(); + // // auto min_charge = cell->estimate_minimum_charge(); + + // // std::cout << "Xin2 Cell: " << cell->slice_index_min() << " " << cell->u_wire_index_min() << " " << cell->v_wire_index_min() << " " << cell->w_wire_index_min() + // // << " has " << points.size() << " points, total charge: " << total_charge + // // << ", min charge: " << min_charge << " " << cell->get_wire_charge(0, cell->u_wire_index_min()) << " " << cell->get_wire_charge_error(0, cell->u_wire_index_min()) + // // << std::endl; + // // // cell->check_dead_wire_consistency(); + // // } + + // // std::cout << grouping.get_dead_winds1(0,0,0).size() << " " << grouping.get_dead_winds(0,0,0).size() << " " << grouping.get_dead_winds1(0,0,1).size() << " " << grouping.get_dead_winds(0,0,1).size() << " " << grouping.get_dead_winds1(0,0,2).size() << " " << grouping.get_dead_winds(0,0,2).size() << std::endl; + + // } +} diff --git a/clus/src/CreateSteinerGraph.h b/clus/src/CreateSteinerGraph.h new file mode 100644 index 000000000..15dc97893 --- /dev/null +++ b/clus/src/CreateSteinerGraph.h @@ -0,0 +1,65 @@ +/** This is a private header local to clus/src/. + + It defines an "ensemble visitor" that will add a "steiner graph" to certain + clusters in a grouping. + + The Steiner::Grapher class does the work for each cluster. + */ + +#ifndef WIRECELLCLUS_CREATESTEINERGRAPH +#define WIRECELLCLUS_CREATESTEINERGRAPH + +#include "SteinerGrapher.h" + +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/Facade_Cluster.h" +#include "WireCellClus/Facade_Grouping.h" +#include "WireCellClus/Facade_Ensemble.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + +#include "WireCellAux/Logger.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + + + +namespace WireCell::Clus::Steiner { + + class CreateSteinerGraph : public Aux::Logger, public IConfigurable, public Clus::IEnsembleVisitor, + private Clus::NeedDV, private Clus::NeedPCTS{ + std::string m_grouping_name{"live"}; + std::string m_graph_name{"steiner"}; + bool m_replace{true}; + + Grapher::Config m_grapher_config; + + + public: + CreateSteinerGraph(); + virtual ~CreateSteinerGraph(); + + // IConfigurable + virtual void configure(const WireCell::Configuration& cfg); + virtual Configuration default_configuration() const; + + // IEnsembleVisitor + /// Loops over each cluster in the chosen grouping. + /// See SteinerCluster per-cluster operations. + virtual void visit(Facade::Ensemble& ensemble) const; + + private: + + /* Xin, + + No per-cluster stuff here. See SteinerGrapher.h for that + + */ + + + + }; +} + +#endif diff --git a/clus/src/DetUtils.cxx b/clus/src/DetUtils.cxx new file mode 100644 index 000000000..2d3af6d79 --- /dev/null +++ b/clus/src/DetUtils.cxx @@ -0,0 +1,51 @@ +#include "WireCellClus/DetUtils.h" + +namespace WireCell::Clus { + + std::set apa_idents(IDetectorVolumes::pointer dv) + { + std::set apas; + for (auto& [wpid_ident, iface] : dv->wpident_faces()) { + const WirePlaneId wpid(wpid_ident); + apas.insert(wpid.apa()); + } + return apas; + } + + wpid_faceparams_map face_parameters(IDetectorVolumes::pointer dv) + { + wpid_faceparams_map ret; + for (auto& [wpid_ident, iface] : dv->wpident_faces()) { + const WirePlaneId wpid(wpid_ident); + int apa = wpid.apa(); + int face = wpid.face(); + + // Create wpids for all three planes with this APA and face + WirePlaneId wpid_u(kUlayer, face, apa); + WirePlaneId wpid_v(kVlayer, face, apa); + WirePlaneId wpid_w(kWlayer, face, apa); + + // Get drift direction based on face orientation + int face_dirx = dv->face_dirx(wpid_u); + + // Get wire directions for all planes + Vector wire_dir_u = dv->wire_direction(wpid_u); + Vector wire_dir_v = dv->wire_direction(wpid_v); + Vector wire_dir_w = dv->wire_direction(wpid_w); + + // Calculate angles + double angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + double angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + double angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + ret[wpid] = {Vector(face_dirx,0,0), angle_u, angle_v, angle_w}; + } + return ret; + } + + std::shared_ptr make_dynamicpointcloud(IDetectorVolumes::pointer dv) + { + return std::make_shared(face_parameters(dv)); + } + +} diff --git a/clus/src/DynamicPointCloud.cxx b/clus/src/DynamicPointCloud.cxx new file mode 100644 index 000000000..838b49a02 --- /dev/null +++ b/clus/src/DynamicPointCloud.cxx @@ -0,0 +1,852 @@ +#include "WireCellClus/DynamicPointCloud.h" +#include "WireCellClus/Facade.h" +#include "WireCellClus/Facade_Util.h" +#include "WireCellUtil/Logging.h" + +#include +#include + +#include + +using namespace WireCell; +using namespace WireCell::Clus::Facade; +using spdlog::debug; + +#ifdef __DEBUG__ +#define LogDebug(x) std::cout << "[DPC]: " << __LINE__ << " : " << x << std::endl +#else +#define LogDebug(x) +#endif + +const static std::vector wind_bogus = {INT_MIN, INT_MIN, INT_MIN}; +const static std::vector dist_cut_bogus = {-1e12, -1e12, -1e12}; + + +DynamicPointCloud::nfkd_t &DynamicPointCloud::kd3d() const +{ + if (!m_kd3d) { + m_kd3d = std::make_unique(3); + } + return *m_kd3d; +} + +DynamicPointCloud::nfkd_t &DynamicPointCloud::kd2d(const int plane, const int face, const int apa) const +{ + WirePlaneId wpid(iplane2layer[plane], face, apa); + // SPDLOG_DEBUG("DynamicPointCloud: kd2d {} {} {} wpid {}", plane, face, apa, wpid.name()); + auto iter = m_kd2d.find(wpid.ident()); + if (iter == m_kd2d.end()) { + m_kd2d[wpid.ident()] = std::make_unique(2); + } + return *m_kd2d[wpid.ident()]; +} + +const std::unordered_map &DynamicPointCloud::kd2d_l2g(const int plane, const int face, + const int apa) const +{ + WirePlaneId wpid(iplane2layer[plane], face, apa); + auto iter = m_kd2d_index_l2g.find(wpid.ident()); + + // Debug logging for the requested wpid information + // std::cout << "DynamicPointCloud: kd2d_l2g requested for wpid " << wpid.name() + // << " (ident: " << wpid.ident() << ", face: " << wpid.face() + // << ", apa: " << wpid.apa() << ")" << std::endl; + // for (auto it = m_kd2d_index_l2g.begin(); it != m_kd2d_index_l2g.end(); ++it) { + // std::cout << "DynamicPointCloud: kd2d_l2g available wpid " << WirePlaneId(it->first).name() + // << " (ident: " << it->first << ", face: " << WirePlaneId(it->first).face() + // << ", apa: " << WirePlaneId(it->first).apa() << ")" << std::endl; + // } + + if (iter == m_kd2d_index_l2g.end()) { + // Create empty mapping for this wpid instead of raising an error + m_kd2d_index_l2g[wpid.ident()] = std::unordered_map(); + iter = m_kd2d_index_l2g.find(wpid.ident()); + SPDLOG_DEBUG("DynamicPointCloud: created empty 2D index l2g for wpid {}", wpid.name()); + } + return iter->second; +} + +const std::unordered_map > &DynamicPointCloud::kd2d_g2l(const int plane, const int face, + const int apa) const +{ + WirePlaneId wpid(iplane2layer[plane], face, apa); + auto iter = m_kd2d_index_g2l.find(wpid.ident()); + if (iter == m_kd2d_index_g2l.end()) { + raise("DynamicPointCloud: missing 2D index g2l for wpid %s", wpid.name()); + } + return iter->second; +} + + +void DynamicPointCloud::add_points(const std::vector &points) { + if (points.empty()) { + return; + } + + // Preallocate memory and get original size + size_t original_size = m_points.size(); + m_points.reserve(original_size + points.size()); + + // Move the points instead of copying + m_points.insert(m_points.end(), + std::make_move_iterator(points.begin()), + std::make_move_iterator(points.end())); + + // Process 3D KD tree + auto &kd3d = this->kd3d(); + + // Prepare batch data for 3D KD tree + NFKDVec::Tree::points_type pts3d(3); + for (size_t i = 0; i < 3; ++i) { + pts3d[i].reserve(points.size()); + } + + // Prepare maps to store 2D points for each plane and track local-to-global mappings + std::map::points_type> planes_pts; + std::map> planes_global_indices; + + // Extract data and prepare for batch processing + for (size_t i = 0; i < points.size(); ++i) { + size_t global_idx = original_size + i; + const auto &pt = points[i]; + + // Add to 3D points + pts3d[0].push_back(pt.x); + pts3d[1].push_back(pt.y); + pts3d[2].push_back(pt.z); + + // Check 2D projection validity + if (pt.x_2d.size() != 3 || pt.y_2d.size() != 3) { + raise("DynamicPointCloud: unexpected 2D projection size x_2d %d y_2d %d", + pt.x_2d.size(), pt.y_2d.size()); + } + + // Skip 2D KD if wpid is not valid + WirePlaneId wpid_volume(pt.wpid); + if (wpid_volume.face() == -1 || wpid_volume.apa() == -1) { + continue; + } + + // Process 2D points for each plane + for (size_t pindex = 0; pindex < 3; ++pindex) { + + // std::cout << "Test: " << pindex << " " << pt.x_2d[pindex].size() << " " << pt.y_2d[pindex].size() << " " << pt.wpid_2d[pindex].size() << std::endl; + + // Add 2D point to plane data + for (size_t j = 0; j < pt.x_2d[pindex].size(); ++j) { + WirePlaneId wpid_2d(pt.wpid_2d[pindex].at(j)); + WirePlaneId wpid_plane(iplane2layer[pindex], wpid_2d.face(), wpid_2d.apa()); + int key = wpid_plane.ident(); + // Initialize plane data structures if not exists + if (planes_pts.find(key) == planes_pts.end()) { + planes_pts[key] = NFKDVec::Tree::points_type(2); + planes_pts[key][0].reserve(points.size()); + planes_pts[key][1].reserve(points.size()); + planes_global_indices[key].reserve(points.size()); + } + planes_pts[key][0].push_back(pt.x_2d[pindex][j]); + planes_pts[key][1].push_back(pt.y_2d[pindex][j]); + planes_global_indices[key].push_back(global_idx); + } + } + + } + + // Batch append 3D points + kd3d.append(pts3d); + + + // Create a reverse mapping from layer to iplane based on the existing iplane2layer array + std::unordered_map layer2iplane; + for (int i = 0; i < 3; ++i) { + layer2iplane[iplane2layer[i]] = i; + } + + + // Batch append 2D points for each plane + for (const auto& [key, pts] : planes_pts) { + WirePlaneId wpid_plane(key); + int pindex = layer2iplane[wpid_plane.layer()]; + auto& kd2d = this->kd2d(pindex, wpid_plane.face(), wpid_plane.apa()); + + // Get the starting index for the new points + size_t start_idx = kd2d.npoints(); + + // Batch append 2D points + kd2d.append(pts); + + // Update index mappings + const auto& indices = planes_global_indices[key]; + for (size_t i = 0; i < indices.size(); ++i) { + size_t local_idx = start_idx + i; + size_t global_idx = indices[i]; + m_kd2d_index_l2g[key][local_idx] = global_idx; + m_kd2d_index_g2l[key][global_idx].push_back(local_idx); // save things to a vector + } + } +} + + + + +std::vector> +DynamicPointCloud::get_2d_points_info(const geo_point_t &p, const double radius, const int plane, const int face, + const int apa) const +{ + // Create WirePlaneId once + WirePlaneId wpid_volume(kAllLayers, face, apa); + + // Get KD tree and mapping + auto &kd2d = this->kd2d(plane, face, apa); + auto &l2g = this->kd2d_l2g(plane, face, apa); + + // Get angle parameters - lookup once + if (m_wpid_params.find(wpid_volume) == m_wpid_params.end()) { + raise("DynamicPointCloud: missing wpid params for wpid %s", wpid_volume.name()); + } + const auto [_, angle_u, angle_v, angle_w] = m_wpid_params.at(wpid_volume); + + // Compute projected point + const double angle = (plane == 0) ? angle_u : ((plane == 1) ? angle_v : angle_w); + const double projected_y = cos(angle) * p.z() - sin(angle) * p.y(); + + // Prepare query point + std::vector query = {p.x(), projected_y}; + + // Perform radius search + auto results = kd2d.radius(radius * radius, query); + + // Optimize for empty results case + if (results.empty()) { + return {}; + } + + // Pre-allocate return vector + std::vector> return_results; + return_results.reserve(results.size()); + + // Process results + for (const auto &[local_idx, dist_squared] : results) { + const size_t global_idx = l2g.at(local_idx); + const auto &pt = m_points[global_idx]; + + // Option 1: Return squared distances if caller doesn't need actual distances + // return_results.emplace_back(dist_squared, pt.cluster, global_idx); + + // Option 2: Compute sqrt only when needed + return_results.emplace_back(sqrt(dist_squared), pt.cluster, global_idx); + } + + return return_results; +} + + + +std::tuple +DynamicPointCloud::get_closest_2d_point_info(const geo_point_t &p, const int plane, const int face, const int apa) const +{ + // Create WirePlaneId only once + const WirePlaneId wpid_volume(kAllLayers, face, apa); + + // Get KD tree and mapping - only need l2g here, g2l isn't used + auto &kd2d = this->kd2d(plane, face, apa); + auto &l2g = this->kd2d_l2g(plane, face, apa); + + // Check and get angle parameters + auto wpid_iter = m_wpid_params.find(wpid_volume); + if (wpid_iter == m_wpid_params.end()) { + raise("DynamicPointCloud: missing wpid params for wpid %s", wpid_volume.name()); + } + + // Calculate angle more directly based on plane parameter + const auto &[_, angle_u, angle_v, angle_w] = wpid_iter->second; + const double angle = (plane == 0) ? angle_u : ((plane == 1) ? angle_v : angle_w); + + // Prepare query point more efficiently + const double projected_y = cos(angle) * p.z() - sin(angle) * p.y(); + const std::vector query = {p.x(), projected_y}; + + // Perform nearest neighbor search + auto results = kd2d.knn(1, query); + + // Early return for empty results + if (results.empty()) { + return std::make_tuple(-1.0, nullptr, static_cast(-1)); + } + + // Process the single result + const size_t local_idx = results[0].first; + const double distance = sqrt(results[0].second); // Only compute sqrt once + const size_t global_idx = l2g.at(local_idx); + const auto &pt = m_points[global_idx]; + + return std::make_tuple(distance, pt.cluster, global_idx); +} + +std::pair DynamicPointCloud::hough_transform(const geo_point_t &origin, const double dis) const +{ + auto &kd3d = this->kd3d(); + + // Preallocate with reasonable capacity to reduce reallocations + std::vector pts; + std::vector blobs; + pts.reserve(100); // Reasonable starting size, adjust based on typical usage + blobs.reserve(100); + + // Create query point + std::vector query = {origin.x(), origin.y(), origin.z()}; + + // Perform radius search + auto results = kd3d.radius(dis * dis, query); + + // Preallocate exact size now that we know it + pts.reserve(results.size()); + blobs.reserve(results.size()); + + for (const auto &[idx, _] : results) { + const auto &pt = m_points[idx]; + pts.push_back({pt.x, pt.y, pt.z}); + blobs.push_back(pt.blob); + } + + namespace bh = boost::histogram; + namespace bha = boost::histogram::algorithm; + + constexpr double pi = 3.141592653589793; + const double ten_cm = 10.0 * units::cm; + + // Parameter axis 1 is theta angle + const int nbins1 = 180; + auto theta_param = [](const Vector &dir) { + const Vector Z(0, 0, 1); + return acos(Z.dot(dir)); + }; + double min1 = 0, max1 = pi; + + // Parameter axis 2 is phi angle + const int nbins2 = 360; + const double min2 = -pi; + const double max2 = +pi; + auto phi_param = [](const Vector &dir) { + const Vector X(1, 0, 0); + const Vector Y(0, 1, 0); + return atan2(Y.dot(dir), X.dot(dir)); + }; + + auto hist = bh::make_histogram(bh::axis::regular<>(nbins1, min1, max1), bh::axis::regular<>(nbins2, min2, max2)); + + // Early return if no points found + if (pts.empty()) { + return {0.0, 0.0}; + } + + for (size_t ind = 0; ind < blobs.size(); ++ind) { + const auto *blob = blobs[ind]; + auto charge = blob ? blob->charge() : 1.0; + + if (charge <= 0) continue; + + const auto npoints = blob ? blob->npoints() : 1; + const auto &pt = pts[ind]; + + // Use the original normalization method + const Vector dir = (pt - origin).norm(); + const double r = (pt - origin).magnitude(); + + const double p1 = theta_param(dir); + const double p2 = phi_param(dir); + + if (r < ten_cm) { + hist(p1, p2, bh::weight(charge / npoints)); + } + else { + // Use original formula + hist(p1, p2, bh::weight(charge / npoints * pow(ten_cm / r, 2))); + } + } + + // Use the original max finding approach + auto indexed = bh::indexed(hist); + auto it = std::max_element(indexed.begin(), indexed.end()); + const auto &cell = *it; + return {cell.bin(0).center(), cell.bin(1).center()}; +} + + + + +geo_point_t DynamicPointCloud::vhough_transform(const geo_point_t &origin, const double dis) const +{ + const auto [th, phi] = hough_transform(origin, dis); + return {sin(th) * cos(phi), sin(th) * sin(phi), cos(th)}; +} + + + +std::vector Clus::Facade::make_points_cluster( + const Cluster *cluster, const std::map> &wpid_params, bool flag_wrap) +{ + if (!cluster) { + SPDLOG_WARN("make_points_cluster: null cluster return empty points"); + return {}; + } + + const size_t num_points = cluster->npoints(); + std::vector dpc_points; + dpc_points.reserve(num_points); + + const auto &winds = cluster->wire_indices(); + + // Cache commonly referenced WPIDs and their params to avoid map lookups + std::unordered_map> cached_params; + + for (size_t ipt = 0; ipt < num_points; ++ipt) { + geo_point_t pt = cluster->point3d(ipt); + const auto wpid = cluster->wire_plane_id(ipt); + // std::cout << " DEBUG Clus::Facade::make_points_cluster wpid " << wpid.name() << std::endl; + int wpid_ident = wpid.ident(); + + // Check cache first, then populate if needed + auto param_it = cached_params.find(wpid_ident); + if (param_it == cached_params.end()) { + auto wpid_it = wpid_params.find(wpid); + if (wpid_it == wpid_params.end()) { + raise("make_points_cluster: missing wpid params for wpid %s", wpid.name()); + } + param_it = cached_params.emplace(wpid_ident, wpid_it->second).first; + } + + const auto &[drift_dir, angle_u, angle_v, angle_w] = param_it->second; + const double angle_uvw[3] = {angle_u, angle_v, angle_w}; + + DynamicPointCloud::DPCPoint point; + point.x = pt.x(); + point.y = pt.y(); + point.z = pt.z(); + point.wpid = wpid.ident(); + point.cluster = cluster; + point.blob = cluster->blob_with_point(ipt); + + // Pre-allocate vectors with correct size + point.x_2d.resize(3); + point.y_2d.resize(3); + point.wpid_2d.resize(3); + + if (flag_wrap){ + fill_wrap_points(cluster, pt, WirePlaneId(wpid), point.x_2d, point.y_2d, point.wpid_2d); + }else{ + for (size_t pindex = 0; pindex < 3; ++pindex) { + point.x_2d[pindex].push_back(point.x); + point.y_2d[pindex].push_back(cos(angle_uvw[pindex]) * point.z - sin(angle_uvw[pindex]) * point.y); + point.wpid_2d[pindex].push_back(wpid.ident()); + } + } + + + point.wind = {winds[0][ipt], winds[1][ipt], winds[2][ipt]}; + point.dist_cut = dist_cut_bogus; + + dpc_points.push_back(std::move(point)); + } + + return dpc_points; +} + + +std::vector Clus::Facade::make_points_direct(const Cluster *cluster, const IDetectorVolumes::pointer dv, const std::map> &wpid_params, std::vector>& points_info, bool flag_wrap){ + std::vector dpc_points; + + if (!cluster) { + SPDLOG_WARN("make_points_cluster_skeleton: null cluster return empty points"); + return dpc_points; + } + dpc_points.reserve(points_info.size()); + + // Cache for angle values per wpid to avoid repeated tuple unpacking + std::unordered_map> wpid_angles_cache; + + for (auto& [test_point, wpid_test_point] : points_info) { + // std::cout << test_point << " " << wpid_test_point << std::endl; + if (wpid_params.find(wpid_test_point) == wpid_params.end()) { + raise("make_points_cluster: missing wpid params for wpid %s", wpid_test_point.name()); + } + + // Get or compute angle values for this wpid + // std::array angle_uvw; + // auto cache_it = wpid_angles_cache.find(wpid_test_point.ident()); + // if (cache_it == wpid_angles_cache.end()) { + // const auto& [drift_dir, angle_u, angle_v, angle_w] = wpid_params.at(wpid_test_point); + // angle_uvw = {angle_u, angle_v, angle_w}; + // wpid_angles_cache[wpid_test_point.ident()] = angle_uvw; + // } else { + // angle_uvw = cache_it->second; + // } + + DynamicPointCloud::DPCPoint point; + point.wpid = wpid_test_point.ident(); // Direct assignment without recreation + point.cluster = cluster; + point.blob = nullptr; + + // Pre-allocate and initialize vectors + point.x_2d.resize(3); + point.y_2d.resize(3); + point.wpid_2d.resize(3); + point.wind = wind_bogus; + point.dist_cut = dist_cut_bogus; + + point.x = test_point.x(); + point.y = test_point.y(); + point.z = test_point.z(); + + if (wpid_test_point.apa() != -1) { + // Get cached angles if available + std::array temp_angle_uvw; + auto cache_it = wpid_angles_cache.find(wpid_test_point.ident()); + if (cache_it == wpid_angles_cache.end()) { + const auto& [drift_dir, angle_u, angle_v, angle_w] = wpid_params.at(wpid_test_point); + temp_angle_uvw = {angle_u, angle_v, angle_w}; + wpid_angles_cache[wpid_test_point.ident()] = temp_angle_uvw; + } else { + temp_angle_uvw = cache_it->second; + } + + + if (flag_wrap){ + fill_wrap_points(cluster, test_point, wpid_test_point, point.x_2d, point.y_2d, point.wpid_2d); + }else{ + for (size_t pindex = 0; pindex < 3; ++pindex) { + point.x_2d[pindex].push_back(point.x); + point.y_2d[pindex].push_back(cos(temp_angle_uvw[pindex]) * point.z - + sin(temp_angle_uvw[pindex]) * point.y); + point.wpid_2d[pindex].push_back(wpid_test_point.ident()); + + } + } + // std::cout << flag_wrap << " " << point.x << " " << point.y << " " << point.z << std::endl; + // std::cout << temp_angle_uvw[0] << " " << temp_angle_uvw[1] << " " << temp_angle_uvw[2] << " " << point.x_2d[0].back() << " " << point.y_2d[0].back() << " " << point.y_2d[1].back() << " " << point.y_2d[2].back() << std::endl; + + } + // else { + // point.x_2d = {-1e12, -1e12, -1e12}; + // point.y_2d = {-1e12, -1e12, -1e12}; + // } + dpc_points.push_back(std::move(point)); + } + + + return dpc_points; + +} + + +std::vector +Clus::Facade::make_points_cluster_skeleton( + const Cluster *cluster, const IDetectorVolumes::pointer dv, + const std::map> &wpid_params, + const std::vector& path_wcps, + bool flag_wrap, + const double step) +{ + std::vector dpc_points; + + if (!cluster) { + SPDLOG_WARN("make_points_cluster_skeleton: null cluster return empty points"); + return dpc_points; + } + + // Estimate capacity to avoid reallocations + // const auto& path_wcps = cluster->get_path_wcps(); + size_t estimated_capacity = path_wcps.size() * 2; // Rough estimate + dpc_points.reserve(estimated_capacity); + + // Cache for angle values per wpid to avoid repeated tuple unpacking + std::unordered_map> wpid_angles_cache; + + geo_point_t prev_wcp = cluster->point3d(path_wcps.front()); + auto prev_wpid = cluster->wire_plane_id(path_wcps.front()); + + // Pre-computed constants + const double dist_cut_value = 2.4 * units::cm; + + for (auto it = path_wcps.begin(); it != path_wcps.end(); it++) { + geo_point_t test_point = cluster->point3d(*it); + double dis = (test_point - prev_wcp).magnitude(); + auto wpid_test_point = cluster->wire_plane_id(*it); + + if (wpid_params.find(wpid_test_point) == wpid_params.end()) { + raise("make_points_cluster: missing wpid params for wpid %s", wpid_test_point.name()); + } + + // Get or compute angle values for this wpid + std::array angle_uvw; + auto cache_it = wpid_angles_cache.find(wpid_test_point.ident()); + if (cache_it == wpid_angles_cache.end()) { + const auto& [drift_dir, angle_u, angle_v, angle_w] = wpid_params.at(wpid_test_point); + angle_uvw = {angle_u, angle_v, angle_w}; + wpid_angles_cache[wpid_test_point.ident()] = angle_uvw; + } else { + angle_uvw = cache_it->second; + } + + if (dis <= step) { + DynamicPointCloud::DPCPoint point; + point.wpid = wpid_test_point.ident(); // Direct assignment without recreation + point.cluster = cluster; + point.blob = nullptr; + + // Pre-allocate and initialize vectors + point.x_2d.resize(3); + point.y_2d.resize(3); + point.wpid_2d.resize(3); + point.wind = wind_bogus; + point.dist_cut = {dist_cut_value, dist_cut_value, dist_cut_value}; + + point.x = test_point.x(); + point.y = test_point.y(); + point.z = test_point.z(); + + if (flag_wrap){ + fill_wrap_points(cluster, test_point, WirePlaneId(wpid_test_point), point.x_2d, point.y_2d, point.wpid_2d); + }else{ + for (size_t pindex = 0; pindex < 3; ++pindex) { + point.x_2d[pindex].push_back(test_point.x()); + point.y_2d[pindex].push_back(cos(angle_uvw[pindex]) * test_point.z() - sin(angle_uvw[pindex]) * test_point.y()); + point.wpid_2d[pindex].push_back(wpid_test_point.ident()); + } + } + + dpc_points.push_back(std::move(point)); + } + else { + int num_points = int(dis / step) + 1; + + // Pre-compute direction vectors to avoid recalculation in loop + double dx = (test_point.x() - prev_wcp.x()) / num_points; + double dy = (test_point.y() - prev_wcp.y()) / num_points; + double dz = (test_point.z() - prev_wcp.z()) / num_points; + + for (int k = 0; k != num_points; k++) { + DynamicPointCloud::DPCPoint point; + + // Faster interpolation with pre-computed increments + double t = (k + 1.0); + point.x = prev_wcp.x() + t * dx; + point.y = prev_wcp.y() + t * dy; + point.z = prev_wcp.z() + t * dz; + + geo_point_t temp_point(point.x, point.y, point.z); + auto temp_wpid = WirePlaneId(get_wireplaneid(temp_point, prev_wpid, wpid_test_point, dv)); + + point.wpid = temp_wpid.ident(); // Direct assignment + point.cluster = cluster; + point.blob = nullptr; + point.wind = wind_bogus; + point.dist_cut = {dist_cut_value, dist_cut_value, dist_cut_value}; + point.x_2d.resize(3); + point.y_2d.resize(3); + point.wpid_2d.resize(3); + + + if (temp_wpid.apa() != -1) { + // Get cached angles if available + std::array temp_angle_uvw; + auto cache_it = wpid_angles_cache.find(temp_wpid.ident()); + if (cache_it == wpid_angles_cache.end()) { + const auto& [drift_dir, angle_u, angle_v, angle_w] = wpid_params.at(temp_wpid); + temp_angle_uvw = {angle_u, angle_v, angle_w}; + wpid_angles_cache[temp_wpid.ident()] = temp_angle_uvw; + } else { + temp_angle_uvw = cache_it->second; + } + + if (flag_wrap){ + fill_wrap_points(cluster, temp_point, temp_wpid, point.x_2d, point.y_2d, point.wpid_2d); + }else{ + for (size_t pindex = 0; pindex < 3; ++pindex) { + point.x_2d[pindex].push_back(point.x); + point.y_2d[pindex].push_back(cos(temp_angle_uvw[pindex]) * point.z - + sin(temp_angle_uvw[pindex]) * point.y); + point.wpid_2d[pindex].push_back(temp_wpid.ident()); + + } + } + } + // } else { + // // point.x_2d = {-1e12, -1e12, -1e12}; + // // point.y_2d = {-1e12, -1e12, -1e12}; + // } + + dpc_points.push_back(std::move(point)); + } + } + + prev_wcp = test_point; + prev_wpid = wpid_test_point; + } + + return dpc_points; +} + + + +std::vector Clus::Facade::make_points_linear_extrapolation( + const Cluster *cluster, const geo_point_t &p_test, const geo_point_t &dir_unmorm, const double range, + const double step, const double angle, const IDetectorVolumes::pointer dv, + const std::map> &wpid_params) +{ + std::vector dpc_points; + + if (!cluster) { + SPDLOG_WARN("make_points_linear_extrapolation: null cluster return empty points"); + return dpc_points; + } + + // Get wpid once from the grouping + const auto wpid = *(cluster->grouping()->wpids().begin()); + + // Check wpid early + if (wpid_params.find(wpid) == wpid_params.end()) { + raise("make_points_cluster: missing wpid params for wpid %s", wpid.name()); + } + + // Pre-compute constants + const double DEG_TO_RAD = 3.1415926/180.0; + const double sin_angle_rad = sin(angle * DEG_TO_RAD); + const double MIN_DIS_CUT = 2.4 * units::cm; + const double MAX_DIS_CUT = 13.0 * units::cm; + + // Normalize direction once + geo_point_t dir = dir_unmorm.norm(); + + // Calculate segment count and distances + int num_points = int(range / step) + 1; + double dis_seg = range / num_points; + + // Pre-compute direction scaling + double dx_step = dir.x() * dis_seg; + double dy_step = dir.y() * dis_seg; + double dz_step = dir.z() * dis_seg; + + // Get angle values once + const auto [drift_dir, angle_u, angle_v, angle_w] = wpid_params.at(wpid); + const double cos_angle_uvw[3] = {cos(angle_u), cos(angle_v), cos(angle_w)}; + const double sin_angle_uvw[3] = {sin(angle_u), sin(angle_v), sin(angle_w)}; + + // Resize once + dpc_points.resize(num_points); + + for (int k = 0; k < num_points; k++) { + // Calculate position once + double k_dis = k * dis_seg; + double x = p_test.x() + k * dx_step; + double y = p_test.y() + k * dy_step; + double z = p_test.z() + k * dz_step; + + // Calculate distance cut + const double dis_cut = std::floor(std::min(std::max(MIN_DIS_CUT, k_dis * sin_angle_rad), MAX_DIS_CUT)); + // int dis_cut_int = static_cast(dis_cut); + + // Set up point + DynamicPointCloud::DPCPoint& point = dpc_points[k]; + point.cluster = cluster; + point.blob = nullptr; + point.x = x; + point.y = y; + point.z = z; + point.wpid = wpid.ident(); + + // Initialize arrays + point.x_2d.resize(3); + point.y_2d.resize(3); + point.wpid_2d.resize(3); + point.wind = wind_bogus; + //point.dist_cut = {dis_cut_int, dis_cut_int, dis_cut_int}; + point.dist_cut = {dis_cut, dis_cut, dis_cut}; + + // Calculate 2D projections + for (size_t pindex = 0; pindex < 3; ++pindex) { + point.x_2d[pindex].push_back(x); + point.y_2d[pindex].push_back(cos_angle_uvw[pindex] * z - sin_angle_uvw[pindex] * y); + point.wpid_2d[pindex].push_back(wpid.ident()); + } + } + + return dpc_points; +} + + +void Clus::Facade::fill_wrap_points(const Cluster *cluster, const geo_point_t &point, const WirePlaneId& wpid, std::vector>& p_x, std::vector>& p_y, std::vector>& p_wpid){ + int apa = wpid.apa(); + int face = wpid.face(); + auto grouping = cluster->grouping(); + std::map> map_angles; // face -->angles + // std::cout << "fill_wrap_points: apa " << apa << " face " << face << std::endl; + const auto wire_angles = grouping->wire_angles(apa, face); + auto& angles = map_angles[face]; + angles.push_back(std::get<0>(wire_angles)); + angles.push_back(std::get<1>(wire_angles)); + angles.push_back(std::get<2>(wire_angles)); + + // find the drift time ... + const auto map_time_offset = grouping->get_time_offset().at(apa); + const auto map_drift_speed = grouping->get_drift_speed().at(apa); + double time_offset = map_time_offset.at(face); + double drift_speed = map_drift_speed.at(face); + + // std::cout << "Test: " << map_time_offset.size() << " " << map_time_offset.begin()->first << " " << std::endl; + + auto anode = grouping->get_anode(apa); + const auto iface = anode->faces()[face]; + const double time = drift2time(iface, time_offset, drift_speed, point.x()); + + const auto map_pitch_mags = grouping->pitch_mags().at(apa); + const auto map_proj_centers = grouping->proj_centers().at(apa); + + for (size_t pind = 0; pind < 3; ++pind) { + // find the wire index ... + const double angle = map_angles.at(face)[pind]; + const double pitch = map_pitch_mags.at(face).at(pind); + const double center = map_proj_centers.at(face).at(pind); + int wind = point2wind(point, angle, pitch, center); + if (wind < 0) wind = 0; + auto plane_ptr =iface->plane(pind); + const auto& wires_all = plane_ptr->wires(); + size_t max_wind = wires_all.size(); + // size_t max_wind = grouping->get_plane_channels(apa, face, iplane2layer[pind]).size() - 1; + if ((size_t)wind > max_wind) wind = max_wind; + // get channel ... + auto wire = wires_all[wind]; + int channel_number = wire->channel(); + // auto channel = grouping->get_plane_channel_wind(apa, face, iplane2layer[pind], wind); + + // get all wires + // auto wires = anode->wires(channel->ident()); + auto wires = anode->wires(channel_number); + for (const auto &wire : wires) { + auto wire_wpid = wire->planeid(); + + // std::cout << "Test: " << map_time_offset.size() << " " << map_time_offset.begin()->first << " " << wire_wpid.face() << std::endl; + p_x[pind].push_back(time2drift(anode->faces()[wire_wpid.face()], map_time_offset.at(wire_wpid.face()), map_drift_speed.at(wire_wpid.face()), time)); + if (map_angles.find(wire_wpid.face()) == map_angles.end()) { + const auto wire_angles1 = grouping->wire_angles(apa, wire_wpid.face()); + auto& angles = map_angles[wire_wpid.face()]; + angles.push_back(std::get<0>(wire_angles1)); + angles.push_back(std::get<1>(wire_angles1)); + angles.push_back(std::get<2>(wire_angles1)); + } + + // Check if this wire is the same as the original wire (wire index, apa, face are all the same) + if (wire_wpid.apa() == wpid.apa() && wire_wpid.face() == wpid.face() && wire->index() == wind) { + // Use the original wire's angles to calculate p_y + p_y[pind].push_back(cos(angles[pind]) * point.z() - sin(angles[pind]) * point.y()); + } else { + // Use the current algorithm + p_y[pind].push_back(wind2point2dproj(wind, map_angles.at(wire_wpid.face()).at(pind), map_pitch_mags.at(wire_wpid.face()).at(pind), map_proj_centers.at(wire_wpid.face()).at(pind))); + } + p_wpid[pind].push_back(WirePlaneId(kAllLayers, wire_wpid.face(), wire_wpid.apa()).ident()); + + + } + } + +} diff --git a/clus/src/Facade_Blob.cxx b/clus/src/Facade_Blob.cxx index 10e986bdb..e30c150a7 100644 --- a/clus/src/Facade_Blob.cxx +++ b/clus/src/Facade_Blob.cxx @@ -1,13 +1,13 @@ #include "WireCellClus/Facade_Blob.h" #include "WireCellClus/Facade_Cluster.h" +#include "WireCellClus/Facade_Grouping.h" #include using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; using namespace WireCell::PointCloud; -using namespace WireCell::PointCloud::Facade; -// using WireCell::PointCloud::Dataset; using namespace WireCell::PointCloud::Tree; // for "Points" node value type -// using WireCell::PointCloud::Tree::named_pointclouds_t; #include "WireCellUtil/Logging.h" using spdlog::debug; @@ -40,7 +40,7 @@ size_t Blob::hash() const boost::hash_combine(h, center_x()); boost::hash_combine(h, center_y()); boost::hash_combine(h, center_z()); - boost::hash_combine(h, face()); + boost::hash_combine(h, wpid().ident()); boost::hash_combine(h, slice_index_min()); boost::hash_combine(h, slice_index_max()); @@ -61,33 +61,88 @@ void Blob::fill_cache(BlobCache& cache) const raise("scalar PC is not scalar but size %d", pc_scalar.size_major()); } + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.charge = pc_scalar.get("charge")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.center_x = pc_scalar.get("center_x")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.center_y = pc_scalar.get("center_y")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.center_z = pc_scalar.get("center_z")->elements()[0]; - cache.face = pc_scalar.get("face")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// + cache.wpid = WirePlaneId(pc_scalar.get("wpid")->elements()[0]); + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.npoints = pc_scalar.get("npoints")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.slice_index_min = pc_scalar.get("slice_index_min")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.slice_index_max = pc_scalar.get("slice_index_max")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.u_wire_index_min = pc_scalar.get("u_wire_index_min")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.u_wire_index_max = pc_scalar.get("u_wire_index_max")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.v_wire_index_min = pc_scalar.get("v_wire_index_min")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.v_wire_index_max = pc_scalar.get("v_wire_index_max")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.w_wire_index_min = pc_scalar.get("w_wire_index_min")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.w_wire_index_max = pc_scalar.get("w_wire_index_max")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.max_wire_interval = pc_scalar.get("max_wire_interval")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.min_wire_interval = pc_scalar.get("min_wire_interval")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.max_wire_type = pc_scalar.get("max_wire_type")->elements()[0]; + /// + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. + /// cache.min_wire_type = pc_scalar.get("min_wire_type")->elements()[0]; /// - /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change the above. + /// MAKE SURE YOU UPDATE doctest_clustering_prototype.cxx if you change. /// } bool Blob::overlap_fast(const Blob& b, const int offset) const { - // check face ... - if (face() != b.face()) return false; + // check apa/face + if (wpid().apa() != b.wpid().apa()) return false; + if (wpid().face() != b.wpid().face()) return false; if (u_wire_index_min() > b.u_wire_index_max()-1 + offset) return false; if (b.u_wire_index_min() > u_wire_index_max()-1 + offset) return false; if (v_wire_index_min() > b.v_wire_index_max()-1 + offset) return false; @@ -97,6 +152,53 @@ bool Blob::overlap_fast(const Blob& b, const int offset) const return true; } + +// void Blob::check_dead_wire_consistency() const{ +// auto grouping = cluster()->grouping(); + +// // Find apa and face for this blob +// int apa = wpid().apa(); +// int face = wpid().face(); + + +// // Loop over time slices and wire indices +// // for (int slice = slice_index_min(); slice < slice_index_max(); slice += 4) { +// // for (int plane = 0; plane < 3; ++plane) { +// // int wire_min = 0, wire_max = 0; +// // if (plane == 0) { +// // wire_min = u_wire_index_min(); +// // wire_max = u_wire_index_max(); +// // } else if (plane == 1) { +// // wire_min = v_wire_index_min(); +// // wire_max = v_wire_index_max(); +// // } else if (plane == 2) { +// // wire_min = w_wire_index_min(); +// // wire_max = w_wire_index_max(); +// // } +// // for (int wire = wire_min; wire < wire_max; ++wire) { +// // bool is_dead = grouping->is_wire_dead(apa, face, plane, wire, slice); +// // auto charge_pair = grouping->get_wire_charge(apa, face, plane, wire, slice); + +// // // if (is_dead == 1) std::cout << "apa: " << apa << ", face: " << face +// // // << ", slice: " << slice << ", plane: " << plane +// // // << ", wire: " << wire +// // // << ", is_dead: " << is_dead +// // // << ", charge: (" << charge_pair.first +// // // << ", " << charge_pair.second << ")" << std::endl; +// // // std::cout << "APA: " << apa << ", Face: " << face +// // // << ", Slice: " << slice << ", Plane: " << plane +// // // << ", Wire: " << wire +// // // << ", Is Dead: " << is_dead +// // // << ", Charge: (" << charge_pair.first +// // // << ", " << charge_pair.second << ")" << std::endl; +// // // You can add your logic here for each (slice, plane, wire) +// // // Example: LogDebug("apa=" << apa << " face=" << face << " slice=" << slice << " plane=" << plane << " wire=" << wire); +// // } +// // } +// // } + +// } + geo_point_t Blob::center_pos() const { return {cache().center_x, cache().center_y, cache().center_z}; @@ -115,10 +217,11 @@ bool Blob::sanity(Log::logptr_t log) const return false; } -std::vector Blob::points() const +std::vector Blob::points(const std::string& pc_name, + const std::vector& coords) const { - const auto& pc = m_node->value.local_pcs()["3d"]; - auto sel = pc.selection({"x", "y", "z"}); + const auto& pc = m_node->value.local_pcs()[pc_name]; + auto sel = pc.selection(coords); const size_t npts = sel[0]->size_major(); std::vector ret(npts); @@ -135,8 +238,8 @@ bool Facade::blob_less(const Facade::Blob* a, const Facade::Blob* b) { if (a == b) return false; { - const auto naf = a->face(); - const auto nbf = b->face(); + const auto naf = a->wpid(); + const auto nbf = b->wpid(); if (naf < nbf) return true; if (nbf < naf) return false; } @@ -201,10 +304,219 @@ bool Facade::blob_less(const Facade::Blob* a, const Facade::Blob* b) if (na < nb) return true; if (nb < na) return false; } + // After exhausting all "content" comparison, we are left with the question, + // are these two blobs really different or not. We have two choices. We + // may compare on pointer value which will surely "break the tie" but will + // introduce randomness. We may return "false" which says "these are equal" + // in which case any unordered set/map will not hold both. Randomness is + // the better choice as we would have a better chance to detect that in some + // future bug. return a < b; } +double Blob::estimate_total_charge() const { + const Cluster* cluster_ptr = this->cluster(); + if (!cluster_ptr) { + return 0.0; + } + const Grouping* grouping = cluster_ptr->grouping(); + if (!grouping) { + return 0.0; + } + + double total_charge = 0.0; + int valid_plane_count = 0; + + const auto wpid_val = wpid(); + const int apa = wpid_val.apa(); + const int face = wpid_val.face(); + + // Process each plane (U=0, V=1, W=2) + for (int plane = 0; plane < 3; plane++) { + double plane_charge = 0.0; + bool plane_has_data = false; + + // Get wire ranges for this plane + int wire_min, wire_max; + switch (plane) { + case 0: // U plane + wire_min = u_wire_index_min(); + wire_max = u_wire_index_max(); + break; + case 1: // V plane + wire_min = v_wire_index_min(); + wire_max = v_wire_index_max(); + break; + case 2: // W plane + wire_min = w_wire_index_min(); + wire_max = w_wire_index_max(); + break; + default: + continue; + } + + // Check if this plane has valid wire ranges + if (wire_min >= wire_max) { + continue; + } + + // Iterate through time slices for this blob + int time_slice = slice_index_min(); + int num_dead_wire = 0; + // Iterate through wires in this plane + for (int wire_index = wire_min; wire_index < wire_max; wire_index++) { + // Check if wire is dead + if (grouping->is_wire_dead(apa, face, plane, wire_index, time_slice)) { + num_dead_wire++; + } + + // Get wire charge + auto charge_pair = grouping->get_wire_charge(apa, face, plane, wire_index, time_slice); + double charge = charge_pair.first; + if (charge > 0) { // Only count positive charges + plane_charge += charge; + plane_has_data = true; + } + } + if (num_dead_wire > 1 || num_dead_wire == wire_max - wire_min) plane_has_data = false; + + if (plane_has_data) { + total_charge += plane_charge; + valid_plane_count++; + } + } + + // Average across valid planes (equivalent to prototype's division by count) + if (valid_plane_count > 0) { + total_charge /= valid_plane_count; + } + + return total_charge; +} + +double Blob::estimate_minimum_charge() const { + const Cluster* cluster_ptr = this->cluster(); + if (!cluster_ptr) { + return 1e9; + } + + const Grouping* grouping = cluster_ptr->grouping(); + if (!grouping) { + return 1e9; + } + + double min_charge = 1e9; + const auto wpid_val = wpid(); + const int apa = wpid_val.apa(); + const int face = wpid_val.face(); + + // Process each plane (U=0, V=1, W=2) + for (int plane = 0; plane < 3; plane++) { + double plane_charge = 0.0; + bool plane_has_data = false; + + // Get wire ranges for this plane + int wire_min, wire_max; + switch (plane) { + case 0: // U plane + wire_min = u_wire_index_min(); + wire_max = u_wire_index_max(); + break; + case 1: // V plane + wire_min = v_wire_index_min(); + wire_max = v_wire_index_max(); + break; + case 2: // W plane + wire_min = w_wire_index_min(); + wire_max = w_wire_index_max(); + break; + default: + continue; + } + + // Check if this plane has valid wire ranges + if (wire_min >= wire_max) { + continue; // Skip this plane (equivalent to bad_planes check) + } + + // Iterate through time slices for this blob + int time_slice = slice_index_min(); + int num_dead_wire = 0; + // Iterate through wires in this plane + for (int wire_index = wire_min; wire_index < wire_max; wire_index++) { + // Check if wire is dead (equivalent to bad_planes check) + if (grouping->is_wire_dead(apa, face, plane, wire_index, time_slice)) { + num_dead_wire++; + } + + // Get wire charge + auto charge_pair = grouping->get_wire_charge(apa, face, plane, wire_index, time_slice); + double charge = charge_pair.first; + + if (charge > 0) { // Only count positive charges + plane_charge += charge; + plane_has_data = true; + } + } + if (num_dead_wire > 1 || num_dead_wire == wire_max - wire_min) plane_has_data = false; + + + // Update minimum charge if this plane has data + if (plane_has_data && plane_charge < min_charge) { + min_charge = plane_charge; + } + } + + return min_charge; +} + +double Blob::get_wire_charge(int plane, const int_t wire_index) const { + const Cluster* cluster_ptr = this->cluster(); + if (!cluster_ptr) { + return 0.0; + } + + const Grouping* grouping = cluster_ptr->grouping(); + if (!grouping) { + return 0.0; + } + + const auto wpid_val = wpid(); + const int apa = wpid_val.apa(); + const int face = wpid_val.face(); + + // Get charge for the middle time slice of this blob as representative + const int time_slice = slice_index_min(); + + auto charge_pair = grouping->get_wire_charge(apa, face, plane, wire_index, time_slice); + return charge_pair.first; +} + +double Blob::get_wire_charge_error(int plane, const int_t wire_index) const { + const Cluster* cluster_ptr = this->cluster(); + if (!cluster_ptr) { + return 1e12; // Large error indicates no data + } + + const Grouping* grouping = cluster_ptr->grouping(); + if (!grouping) { + return 1e12; // Large error indicates no data + } + + const auto wpid_val = wpid(); + const int apa = wpid_val.apa(); + const int face = wpid_val.face(); + + // Get charge error for the middle time slice of this blob as representative + const int time_slice = slice_index_min(); + + auto charge_pair = grouping->get_wire_charge(apa, face, plane, wire_index, time_slice); + return charge_pair.second; +} + + + void Facade::sort_blobs(std::vector& blobs) { std::sort(blobs.rbegin(), blobs.rend(), blob_less); } void Facade::sort_blobs(std::vector& blobs) { std::sort(blobs.rbegin(), blobs.rend(), blob_less); } diff --git a/clus/src/Facade_Cluster.cxx b/clus/src/Facade_Cluster.cxx index 3de53077b..13a1a1e5e 100644 --- a/clus/src/Facade_Cluster.cxx +++ b/clus/src/Facade_Cluster.cxx @@ -1,29 +1,30 @@ #include "WireCellClus/Facade_Blob.h" #include "WireCellClus/Facade_Cluster.h" #include "WireCellClus/Facade_Grouping.h" +#include "WireCellClus/Graphs.h" #include "WireCellUtil/Array.h" #include -#include -#include -#include -#include #include "WireCellUtil/Logging.h" +#include "make_graphs.h" + // The original developers do not care. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wparentheses" using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Graphs; using namespace WireCell::PointCloud; -using namespace WireCell::PointCloud::Facade; +using namespace WireCell::Clus::Facade; // using WireCell::PointCloud::Dataset; using namespace WireCell::PointCloud::Tree; // for "Points" node value type // using WireCell::PointCloud::Tree::named_pointclouds_t; - +using WireCell::Clus::Graphs::Weighted::GraphAlgorithms; using spdlog::debug; @@ -36,6 +37,7 @@ using spdlog::debug; + std::ostream& Facade::operator<<(std::ostream& os, const Facade::Cluster& cluster) { const auto uvwt_min = cluster.get_uvwt_min(); @@ -51,55 +53,214 @@ std::ostream& Facade::operator<<(std::ostream& os, const Facade::Cluster& cluste return os; } -Grouping* Cluster::grouping() { return this->m_node->parent->value.template facade(); } -const Grouping* Cluster::grouping() const { return this->m_node->parent->value.template facade(); } - +Grouping* Cluster::grouping() +{ + return this->m_node->parent->value.template facade(); +} +const Grouping* Cluster::grouping() const +{ + return this->m_node->parent->value.template facade(); +} -void Cluster::clear_cache() const { +void Cluster::set_default_scope(const Tree::Scope& scope) +{ + // We can not simply return if scope is unchanged as that will cause a + // crash in connect_graph_closely() functions due to bad map_mcell_* lookup. + // + // if (m_default_scope == scope) { + // return; + // } - // For now, this facade does its own cache management but we forward-call - // the Mixin just to be proper. Since our ClusterCache is the null-struct, - // this is in truth pointless. - this->Mixin::clear_cache(); + m_default_scope = scope; - // The reason to keep fine-grained cache management is not all cluster users - // need all cached values and by putting them all in fill_cache() we'd spoil - // fine-grained laziness. The cost we pay is that every single cached data - // element is a chance to introduce a cache bug. + // Clear caches that depend on the scope + clear_cache(); // Why is this here??? It does not do what the comment says. + // It clears all cache. This side-effect is needed even if + // the default scope is unchanged. - // Reset time-blob mapping - m_time_blob_map.clear(); - - // Reset blob-indices mapping - m_map_mcell_indices.clear(); - - // Reset hull data - m_hull_points.clear(); - m_hull_calculated = false; - - // Reset length and point count - m_length = 0; - m_npoints = 0; - // Reset PCA data - m_pca_calculated = false; - m_center = geo_point_t(); - for(int i = 0; i < 3; i++) { - m_pca_axis[i] = geo_vector_t(); - m_pca_values[i] = 0; + + // The PCA cache is only thing that directly depends on scope but it is not + // enough to just clear that... + // cache().pca.reset(); + // ... as connect_graph_closely() still breaks. + // For now, we leave the mystery unsolved. + +} + +void Cluster::set_scope_filter(const Tree::Scope& scope, bool flag) +{ + // Set the scope filter for the given scope + m_map_scope_filter[scope.hash()] = flag; +} + +bool Cluster::get_scope_filter(const Tree::Scope& scope) const +{ + auto it = m_map_scope_filter.find(scope.hash()); + if (it == m_map_scope_filter.end()){ + return false; + } + return it->second; +} + + +void Cluster::set_scope_transform(const Tree::Scope& scope, const std::string& transform_name) +{ + // Set the scope transform for the given scope + m_map_scope_transform[scope.hash()] = transform_name; +} + +std::string Cluster::get_scope_transform(Tree::Scope scope) const +{ + Scope the_scope; + if (scope == the_scope) { // no scope given + scope = m_default_scope; + } + auto it = m_map_scope_transform.find(scope.hash()); + if (it == m_map_scope_transform.end()){ + return "Unity"; + } + return it->second; +} + +const Tree::Scope& Cluster::get_scope(const std::string& scope_name) const +{ + if (m_scopes.find(scope_name) == m_scopes.end()) { + raise("Cluster::scope: no such scope: %s", scope_name); + } + return m_scopes.at(scope_name); +} + + +void Cluster::set_cluster_id(int cid) +{ + this->set_ident(cid); +} + +int Cluster::get_cluster_id() const +{ + return this->ident(); +} + +void Cluster::default_scope_from(const Cluster& other) +{ + auto scope = other.get_default_scope(); + this->set_default_scope(scope); + if (other.get_scope_filter(scope)) { + this->set_scope_filter(scope, other.get_scope_filter(scope)); } + this->set_scope_transform(scope, other.get_scope_transform(scope)); +} + +void Cluster::from(const Cluster& other) +{ + this->default_scope_from(other); + this->flags_from(other); - // Reset graph and path finding data - m_graph.reset(); - m_parents.clear(); - m_distances.clear(); - m_source_pt_index = -1; - m_path_wcps.clear(); - m_path_mcells.clear(); + // Copy scalar data from cluster_scalar point cloud + const auto& other_lpcs = other.value().local_pcs(); + auto it = other_lpcs.find("cluster_scalar"); + if (it != other_lpcs.end()) { + const auto& other_scalar_pc = it->second; + auto& this_lpcs = this->value().local_pcs(); + auto& this_scalar_pc = this_lpcs["cluster_scalar"]; + + // Copy all arrays from the other cluster's scalar data + for (const auto& key : other_scalar_pc.keys()) { + auto arr = other_scalar_pc.get(key); + auto arr1 = this_scalar_pc.get(key); + if (arr && !arr1) { + this_scalar_pc.add(key, *arr); + } + } + } +} + + +void Cluster::set_cluster_t0(double t0) +{ + this->set_scalar("cluster_t0", t0); +} +double Cluster::get_cluster_t0() const +{ + return this->get_scalar("cluster_t0", 0); +} + + +std::vector Cluster::add_corrected_points( + Clus::IPCTransformSet::pointer pcts, + const std::string &correction_name) +{ + const double t0 = this->get_cluster_t0(); + + // std::cout << "T0: " << t0 << " " << this->get_flash().time() << std::endl; + + std::vector blob_passed; + blob_passed.resize(children().size(), 0); // not passed by default + if (correction_name == "T0Correction") { + const auto& pct = pcts->pc_transform("T0Correction"); + for (size_t iblob = 0; iblob < this->children().size(); ++iblob) { + Blob* blob = this->children().at(iblob); + auto &lpc_3d = blob->local_pcs().at("3d"); + auto corrected_points = pct->forward(lpc_3d, {"x", "y", "z"}, + {"x_t0cor","y_t0cor","z_t0cor"}, t0, + blob->wpid().face(), blob->wpid().apa()); + lpc_3d.add("x_t0cor", *corrected_points.get("x_t0cor")); // only add x_t0cor + auto filter_result = pct->filter(corrected_points, + {"x_t0cor", "y_t0cor", "z_t0cor"}, + t0, blob->wpid().face(), blob->wpid().apa()); + auto arr_filter = filter_result.get("filter")->elements(); + for (size_t ipt = 0; ipt < arr_filter.size(); ++ipt) { + if (arr_filter[ipt] == 1) { + blob_passed[iblob] = 1; + break; // only one point pass is enough + } + } + } + // the new scope should have the same name as the correction name. This is how the code can find corrections in the code ... + m_scopes["T0Correction"] = {"3d", {"x_t0cor", "y", "z"}}; // add the new scope + } else { + raise("Cluster::add_corrected_points: no such correction: %s", correction_name); + } + return blob_passed; +} + + + +// Called first time cache() is called and the cache is invalid. +void Cluster::fill_cache(ClusterCache& cache) const +{ + // There is nothing generic to "pre fill". Instead, each individual method + // will fill the cache as needed. +} + +// blob wpids ... +std::vector Cluster::wpids_blob() const +{ + auto& wpids = cache().blob_wpids; + if (wpids.empty()) { + for (const Blob* blob : this->children()) { + wpids.push_back(blob->wpid()); + } + } + return wpids; +} + +WirePlaneId Cluster::wpid(const geo_point_t& point) const +{ + // find the closest point_index to this point + auto point_index = get_closest_point_index(point); + + // std::cout << "point_index " << point_index << " " << points()[0].size() << " " << wpids().size() << std::endl; + + // return the wpid for this point_index + return wire_plane_id(point_index); } -void Cluster::print_blobs_info() const{ + +void Cluster::print_blobs_info() const +{ for (const Blob* blob : children()) { std::cout << "U: " << blob->u_wire_index_min() << " " << blob->u_wire_index_max() << " V: " << blob->v_wire_index_min() << " " << blob->v_wire_index_max() @@ -111,7 +272,8 @@ void Cluster::print_blobs_info() const{ } } -std::string Cluster::dump() const{ +std::string Cluster::dump() const +{ const auto [u_min, v_min, w_min, t_min] = get_uvwt_min(); const auto [u_max, v_max, w_max, t_max] = get_uvwt_max(); std::stringstream ss; @@ -121,44 +283,16 @@ std::string Cluster::dump() const{ return ss.str(); } -std::string Cluster::dump_graph() const{ - if (m_graph==nullptr){ - return "empty graph"; - } - auto g = *m_graph; - std::stringstream ss; - - ss << "MCUGraph:" << std::endl; - ss << "Vertices: " << num_vertices(g) << std::endl; - ss << "Edges: " << num_edges(g) << std::endl; - - ss << "Vertex Properties:" << std::endl; - auto vrange = boost::vertices(g); - for (auto vit = vrange.first; vit != vrange.second; ++vit) { - auto v = *vit; - ss << "Vertex " << v << ": Index = " << g[v].index << point3d(g[v].index) << std::endl; - } - - ss << "Edge Properties:" << std::endl; - auto erange = boost::edges(g); - auto weightMap = get(boost::edge_weight, g); - for (auto eit = erange.first; eit != erange.second; ++eit) { - auto e = *eit; - auto src = source(e, g); - auto tgt = target(e, g); - ss << "Edge " << e << " [ " << point3d(g[src].index) << ", " << point3d(g[tgt].index) << " ]" << ": Distance = " << get(weightMap, e) << std::endl; - } - return ss.str(); -} - const Cluster::time_blob_map_t& Cluster::time_blob_map() const { - if (m_time_blob_map.empty()) { + auto& tbm = cache().time_blob_map; + if (tbm.empty()) { for (const Blob* blob : children()) { - m_time_blob_map[blob->slice_index_min()].insert(blob); + auto wpid = blob->wpid(); + tbm[wpid.apa()][wpid.face()][blob->slice_index_min()].insert(blob); } } - return m_time_blob_map; + return tbm; } geo_point_t Cluster::get_furthest_wcpoint(geo_point_t old_wcp, geo_point_t dir, const double step, @@ -171,7 +305,7 @@ geo_point_t Cluster::get_furthest_wcpoint(geo_point_t old_wcp, geo_point_t dir, geo_point_t orig_dir = dir; orig_dir = orig_dir.norm(); int counter = 0; - geo_point_t drift_dir(1, 0, 0); + geo_point_t drift_dir_abs(1, 0, 0); double old_dis = 15 * units::cm; @@ -197,10 +331,10 @@ geo_point_t Cluster::get_furthest_wcpoint(geo_point_t old_wcp, geo_point_t dir, bool flag_para = false; - double angle_1 = fabs(dir1.angle(drift_dir) - 3.1415926 / 2.) * 180. / 3.1415926; - double angle_2 = fabs(dir.angle(drift_dir) - 3.1415926 / 2.) / 3.1415926 * 180.; - double angle_3 = fabs(dir2.angle(drift_dir) - 3.1415926 / 2.) / 3.1415926 * 180.; - double angle_4 = fabs(dir3.angle(drift_dir) - 3.1415926 / 2.) / 3.1415926 * 180.; + double angle_1 = fabs(dir1.angle(drift_dir_abs) - 3.1415926 / 2.) * 180. / 3.1415926; + double angle_2 = fabs(dir.angle(drift_dir_abs) - 3.1415926 / 2.) / 3.1415926 * 180.; + double angle_3 = fabs(dir2.angle(drift_dir_abs) - 3.1415926 / 2.) / 3.1415926 * 180.; + double angle_4 = fabs(dir3.angle(drift_dir_abs) - 3.1415926 / 2.) / 3.1415926 * 180.; if (angle_1 < 5 && angle_2 < 5 || angle_3 < 2.5 && angle_4 < 2.5) flag_para = true; @@ -292,10 +426,10 @@ geo_point_t Cluster::get_furthest_wcpoint(geo_point_t old_wcp, geo_point_t dir, dir3.set(old_wcp.x() - orig_point.x(), old_wcp.y() - orig_point.y(), old_wcp.z() - orig_point.z()); flag_para = false; - double angle_1 = fabs(dir1.angle(drift_dir) - 3.1415926 / 2.) * 180. / 3.1415926; - double angle_2 = fabs(dir.angle(drift_dir) - 3.1415926 / 2.) / 3.1415926 * 180.; - double angle_3 = fabs(dir2.angle(drift_dir) - 3.1415926 / 2.) / 3.1415926 * 180.; - double angle_4 = fabs(dir3.angle(drift_dir) - 3.1415926 / 2.) / 3.1415926 * 180.; + double angle_1 = fabs(dir1.angle(drift_dir_abs) - 3.1415926 / 2.) * 180. / 3.1415926; + double angle_2 = fabs(dir.angle(drift_dir_abs) - 3.1415926 / 2.) / 3.1415926 * 180.; + double angle_3 = fabs(dir2.angle(drift_dir_abs) - 3.1415926 / 2.) / 3.1415926 * 180.; + double angle_4 = fabs(dir3.angle(drift_dir_abs) - 3.1415926 / 2.) / 3.1415926 * 180.; if (angle_1 < 7.5 && angle_2 < 7.5 || angle_3 < 5 && angle_4 < 5 && (angle_1 < 12.5 && angle_2 < 12.5)) flag_para = true; @@ -346,66 +480,133 @@ geo_point_t Cluster::get_furthest_wcpoint(geo_point_t old_wcp, geo_point_t dir, return old_wcp; } +// This function works with raw points internally, different from most of the functions ... void Cluster::adjust_wcpoints_parallel(size_t& start_idx, size_t& end_idx) const { const auto& winds = wire_indices(); - geo_point_t start_p = point3d(start_idx); - geo_point_t end_p = point3d(end_idx); + geo_point_t start_p = point3d_raw(start_idx); + geo_point_t end_p = point3d_raw(end_idx); + + WirePlaneId start_wpid = wire_plane_id(start_idx); + WirePlaneId end_wpid = wire_plane_id(end_idx); double low_x = start_p.x() - 1 * units::cm; if (end_p.x() - 1 * units::cm < low_x) low_x = end_p.x() - 1 * units::cm; double high_x = start_p.x() + 1 * units::cm; if (end_p.x() + 1 * units::cm > high_x) high_x = end_p.x() + 1 * units::cm; - // assumes u, v, w - size_t low_idxes[3] = {start_idx, start_idx, start_idx}; - size_t high_idxes[3] = {end_idx, end_idx, end_idx}; + // Create map to track lowest wire indices for each wire plane ID + std::map> map_wpid_low_indices; + std::map> map_wpid_high_indices; + + // Initialize all elements in the arrays + map_wpid_low_indices[start_wpid] = {start_idx, start_idx, start_idx}; + map_wpid_high_indices[start_wpid] = {start_idx, start_idx, start_idx}; + if (end_wpid != start_wpid){ + map_wpid_low_indices[end_wpid] = {end_idx,end_idx,end_idx}; + map_wpid_high_indices[end_wpid] = {end_idx,end_idx,end_idx}; + } + + + + // assumes u, v, w, need to expand to includ wpid ??? for (int pt_idx = 0; pt_idx != npoints(); pt_idx++) { - geo_point_t current = point3d(pt_idx); + geo_point_t current = point3d_raw(pt_idx); + WirePlaneId wpid = wire_plane_id(pt_idx); + // WirePlaneId wpid = start_wpid; + + if (pt_idx % 1000 == 0) + // std::cout << "Test: " << pt_idx << " " << wpid << npoints() << std::endl; + if (current.x() > high_x || current.x() < low_x) continue; - for (size_t pind = 0; pind != 3; ++pind) { - if (winds[pind][pt_idx] < winds[pind][low_idxes[pind]]) { - low_idxes[pind] = pt_idx; + + if (map_wpid_low_indices.find(wpid) == map_wpid_low_indices.end()) { + for (size_t pind = 0; pind != 3; ++pind) { + map_wpid_low_indices[wpid][pind] = pt_idx; } - if (winds[pind][pt_idx] > winds[pind][high_idxes[pind]]) { - high_idxes[pind] = pt_idx; + }else { + for (size_t pind = 0; pind != 3; ++pind) { + if (winds[pind][pt_idx] < winds[pind][map_wpid_low_indices[wpid][pind]]) { + map_wpid_low_indices[wpid][pind] = pt_idx; + } } } + if(map_wpid_high_indices.find(wpid) == map_wpid_high_indices.end()) { + for (size_t pind = 0; pind != 3; ++pind) { + map_wpid_high_indices[wpid][pind] = pt_idx; + } + }else { + for (size_t pind = 0; pind != 3; ++pind) { + if (winds[pind][pt_idx] > winds[pind][map_wpid_high_indices[wpid][pind]]) { + map_wpid_high_indices[wpid][pind] = pt_idx; + } + } + } } - std::vector indices, temp_indices; - std::set indices_set; - geo_point_t test_p; - bool flags[3] = {true, true, true}; - - /// HAIWANG: keeping the WCP original ordering - if (winds[0][high_idxes[0]] - winds[0][low_idxes[0]] < winds[1][high_idxes[1]] - winds[1][low_idxes[1]]) { - if (winds[0][high_idxes[0]] - winds[0][low_idxes[0]] < winds[2][high_idxes[2]] - winds[2][low_idxes[2]]) { - flags[0] = false; - } - else { - flags[2] = false; - } - } - else { - if (winds[1][high_idxes[1]] - winds[1][low_idxes[1]] < winds[2][high_idxes[2]] - winds[2][low_idxes[2]]) { - flags[1] = false; + { + // Calculate the size of the range for each wire plane across all WPIDs + int index_diff_sum[3] = {0, 0, 0}; + // Find minimum and maximum indices for each plane across all WPIDs + for (auto it = map_wpid_low_indices.begin(); it != map_wpid_low_indices.end(); ++it) { + const WirePlaneId& wpid = it->first; + const auto& low_indices = it->second; + const auto& high_indices = map_wpid_high_indices[wpid]; + + for (size_t pind = 0; pind < 3; ++pind) { + index_diff_sum[pind] += winds[pind][high_indices[pind]] - winds[pind][low_indices[pind]]; + } } - else { - flags[2] = false; + + // Create pairs of (index_difference, plane_index) for sorting + std::vector> plane_diffs; + for (int i = 0; i < 3; ++i) { + plane_diffs.push_back({index_diff_sum[i], i}); } + // Sort by index difference (ascending) + std::sort(plane_diffs.begin(), plane_diffs.end()); + // Set flag to false for the plane with smallest difference + // (keeping the two planes with largest differences) + flags[plane_diffs[0].second] = false; } + + + std::vector indices, temp_indices; + std::set indices_set; + geo_point_t test_p; for (size_t pind = 0; pind != 3; ++pind) { - if (flags[pind]) { - geo_point_t low_p = point3d(low_idxes[pind]); - geo_point_t high_p = point3d(high_idxes[pind]); - std::vector test_points = {low_p, high_p, start_p, end_p}; - for (const auto& test_point : test_points) { - temp_indices = get_closest_2d_index(test_point, 0.5 * units::cm, pind); + for (auto it = map_wpid_low_indices.begin(); it != map_wpid_low_indices.end(); ++it) { + const WirePlaneId& wpid = it->first; + const auto& low_idxes = it->second; + const auto& high_idxes = map_wpid_high_indices[wpid]; + if (flags[pind]) { + // raw data points ... + geo_point_t low_p = point3d_raw(low_idxes[pind]); + geo_point_t high_p = point3d_raw(high_idxes[pind]); + std::vector test_points = {low_p, high_p}; + for (const auto& test_point : test_points) { + temp_indices = get_closest_2d_index(test_point, 0.5 * units::cm, wpid.apa(), wpid.face(), pind); + std::copy(temp_indices.begin(), temp_indices.end(), inserter(indices_set, indices_set.begin())); + } + } + } + { + auto wpid = start_wpid; + if (flags[pind]) { + auto test_point = start_p; + temp_indices = get_closest_2d_index(test_point, 0.5 * units::cm, wpid.apa(), wpid.face(), pind); + std::copy(temp_indices.begin(), temp_indices.end(), inserter(indices_set, indices_set.begin())); + } + } + { + auto wpid = end_wpid; + if (flags[pind]) { + auto test_point = end_p; + temp_indices = get_closest_2d_index(test_point, 0.5 * units::cm, wpid.apa(), wpid.face(), pind); std::copy(temp_indices.begin(), temp_indices.end(), inserter(indices_set, indices_set.begin())); } } @@ -421,9 +622,12 @@ void Cluster::adjust_wcpoints_parallel(size_t& start_idx, size_t& end_idx) const for (size_t i = 0; i != indices.size(); i++) { // std::cout << indices.at(i) << std::endl; for (size_t j = i + 1; j != indices.size(); j++) { - double value = pow(winds[0][indices.at(i)] - winds[0][indices.at(j)], 2) + - pow(winds[1][indices.at(i)] - winds[1][indices.at(j)], 2) + - pow(winds[2][indices.at(i)] - winds[2][indices.at(j)], 2); + // double value = pow(winds[0][indices.at(i)] - winds[0][indices.at(j)], 2) + + // pow(winds[1][indices.at(i)] - winds[1][indices.at(j)], 2) + + // pow(winds[2][indices.at(i)] - winds[2][indices.at(j)], 2); + double value = pow(point3d_raw(indices.at(i)).x() - point3d_raw(indices.at(j)).x(), 2) + + pow(point3d_raw(indices.at(i)).y() - point3d_raw(indices.at(j)).y(), 2) + + pow(point3d_raw(indices.at(i)).z() - point3d_raw(indices.at(j)).z(), 2); if (value > sum_value) { // old_dis = dis; @@ -435,8 +639,8 @@ void Cluster::adjust_wcpoints_parallel(size_t& start_idx, size_t& end_idx) const new_start_idx = indices.at(j); new_end_idx = indices.at(i); } - geo_point_t new_start_p = point3d(new_start_idx); - geo_point_t new_end_p = point3d(new_end_idx); + geo_point_t new_start_p = point3d_raw(new_start_idx); + geo_point_t new_end_p = point3d_raw(new_end_idx); if (sqrt(pow(new_start_p.x() - start_p.x(), 2) + pow(new_start_p.y() - start_p.y(), 2) + pow(new_start_p.z() - start_p.z(), 2)) < 30 * units::cm && @@ -451,89 +655,76 @@ void Cluster::adjust_wcpoints_parallel(size_t& start_idx, size_t& end_idx) const } } } + + } -bool Cluster::construct_skeleton(const bool use_ctpc) -{ - if (m_path_wcps.size() > 0) return false; - // Calc_PCA(); - - // WCP::WCPointCloud& cloud = point_cloud->get_cloud(); - // WCPointCloud::WCPoint highest_wcp = cloud.pts[0]; - // WCPointCloud::WCPoint lowest_wcp = cloud.pts[0]; - geo_point_t highest_wcp = point3d(0); - geo_point_t lowest_wcp = point3d(0); - int highest_index = 0; - int lowest_index = 0; - - // geo_point_t main_dir(PCA_axis[0].x, PCA_axis[0].y, PCA_axis[0].z); - // main_dir.SetMag(1); - // TVector3 temp_pt(highest_wcp.x - center.x, highest_wcp.y - center.y, highest_wcp.z - center.z); - // double highest_value = temp_pt.Dot(main_dir); - // double lowest_value = highest_value; - geo_point_t main_dir = get_pca_axis(0); - main_dir = main_dir.norm(); - geo_point_t center = get_center(); - geo_point_t temp_pt(highest_wcp.x() - center.x(), highest_wcp.y() - center.y(), highest_wcp.z() - center.z()); - double highest_value = temp_pt.dot(main_dir); - double lowest_value = highest_value; - - // for (size_t i = 1; i < cloud.pts.size(); i++) { - // temp_pt.SetXYZ(cloud.pts[i].x - center.x, cloud.pts[i].y - center.y, cloud.pts[i].z - center.z); - // double value = temp_pt.Dot(main_dir); - // if (value > highest_value) { - // highest_value = value; - // highest_wcp = cloud.pts[i]; - // } - // else if (value < lowest_value) { - // lowest_value = value; - // lowest_wcp = cloud.pts[i]; - // } +const Cluster::sv2d_t& Cluster::sv2d(const int apa, const int face, const size_t plane) const +{ + // if (wpid.layer()!=kAllLayers) { + // raise("Cluster::sv2d() wpid.layer() {} != kAllLayers"); // } - for (int i = 1; i < npoints(); i++) { - temp_pt.set(point3d(i).x() - center.x(), point3d(i).y() - center.y(), point3d(i).z() - center.z()); - double value = temp_pt.dot(main_dir); - if (value > highest_value) { - highest_value = value; - highest_wcp = point3d(i); - highest_index = i; - } - else if (value < lowest_value) { - lowest_value = value; - lowest_wcp = point3d(i); - lowest_index = i; + const WirePlaneId wpid(kAllLayers, face, apa); + const Tree::Scope scope = {"3d", {m_scope2ds_prefix[plane]+"_x", m_scope2ds_prefix[plane]+"_y"}, 0, wpid.name()}; + return m_node->value.scoped_view(scope, + [&](const Points::node_t& node) { + const auto& lpcs = node.value.local_pcs(); + const auto& it = lpcs.find("scalar"); + if (it == lpcs.end()) { + return false; + } + const auto& pc = it->second; + const auto& wpida = pc.get("wpid"); + const auto wpidv = wpida->elements(); + if (wpidv[0] == wpid.ident()) { + return true; + } + // std::cerr << "Cluster::sv2d() wpid mismatch: " << wpidv[0] << " != " << wpid.ident() << std::endl; + return false; } - } - - dijkstra_shortest_paths(highest_index, use_ctpc); - cal_shortest_path(lowest_index); - return true; + ); } -const Cluster::sv2d_t& Cluster::sv2d(const size_t plane) const { - return m_node->value.scoped_view(scope2ds[plane]); - } - -const Cluster::kd2d_t& Cluster::kd2d(const size_t plane) const +const Cluster::kd2d_t& Cluster::kd2d(const int apa, const int face, const size_t plane) const { - const auto& sv = sv2d(plane); + const auto& sv = sv2d(apa, face, plane); return sv.kd(); } -std::vector Cluster::get_closest_2d_index(const geo_point_t& p, const double search_radius, const int plane) const { +// this point p needs to be raw point, since this is 2D PC ... +std::vector Cluster::get_closest_2d_index(const geo_point_t& p, const double search_radius, const int apa, const int face, const int plane) const { - const auto& tp = grouping()->get_params(); - double angle_uvw[3] = {tp.angle_u, tp.angle_v, tp.angle_w}; + auto angles = grouping()->wire_angles(apa,face); + double angle_uvw[3]; + angle_uvw[0] = std::get<0>(angles); + angle_uvw[1] = std::get<1>(angles); + angle_uvw[2] = std::get<2>(angles); double x = p.x(); double y = cos(angle_uvw[plane]) * p.z() - sin(angle_uvw[plane]) * p.y(); std::vector query_pt = {x, y}; - const auto& skd = kd2d(plane); + const auto& skd = kd2d(apa, face, plane); auto ret_matches = skd.radius(search_radius * search_radius, query_pt); + // local indices ... std::vector ret_index(ret_matches.size()); + // 2d scoped view ... + const auto& sv2 = sv2d(apa, face, plane); + // 3d scoped view + const auto& sv3 = sv3d(); + + const auto error_index = std::numeric_limits::max(); + + // use 2D local idx --> global-->idx --> 3D local index for (size_t i = 0; i != ret_matches.size(); i++) { - ret_index.at(i) = ret_matches.at(i).first; + size_t global_index = sv2.local_to_global(ret_matches.at(i).first); + ret_index.at(i) = sv3.global_to_local(global_index); + if (global_index == error_index || ret_index.at(i) == error_index) { + throw std::runtime_error("Failed to convert from local to global index"); + } + + // std::cout << "Test: " << ret_index.at(i) << " " << global_index << " " << ret_index.at(i) << std::endl; + // ret_index.at(i) = ret_matches.at(i).first; } return ret_index; @@ -541,47 +732,40 @@ std::vector Cluster::get_closest_2d_index(const geo_point_t& p, const do std::vector Cluster::is_connected(const Cluster& c, const int offset) const { + auto& time_blob_map1 = c.time_blob_map(); + auto& time_blob_map2 = time_blob_map(); std::vector ret; - for (const auto& [bad_start, badblobs] : c.time_blob_map()) { - for (const auto* badblob : badblobs) { - auto bad_end = badblob->slice_index_max(); // not inclusive - for (const auto& [good_start, goodblobs] : time_blob_map()) { - for (const auto* goodblob : goodblobs) { - auto good_end = goodblob->slice_index_max(); // not inclusive - if (good_end <= bad_start || good_start >= bad_end) { - continue; - } - if (goodblob->overlap_fast(*badblob, offset)) { - ret.push_back(goodblob); + + for (auto it = time_blob_map1.begin(); it != time_blob_map1.end(); it++){ + int apa = it->first; + if (time_blob_map2.find(apa) == time_blob_map2.end()) continue; // if the second one does not contain it ... + for (auto it1 = it->second.begin(); it1 != it->second.end(); it1++){ + int face = it1->first; // face + if (time_blob_map2.at(apa).find(face) == time_blob_map2.at(apa).end()) continue; + + for (const auto& [bad_start, badblobs] : time_blob_map1.at(apa).at(face)) { + for (const auto* badblob : badblobs) { + auto bad_end = badblob->slice_index_max(); // not inclusive + for (const auto& [good_start, goodblobs] : time_blob_map2.at(apa).at(face)) { + for (const auto* goodblob : goodblobs) { + auto good_end = goodblob->slice_index_max(); // not inclusive + if (good_end <= bad_start || good_start >= bad_end) { + continue; + } + if (goodblob->overlap_fast(*badblob, offset)) { + ret.push_back(goodblob); + } + } + } } - } } } + return ret; } -const Blob* Cluster::get_first_blob() const -{ - if (time_blob_map().empty()) { - raise("empty cluster has no first blob"); - } - return *(time_blob_map().begin()->second.begin()); -} - -const Blob* Cluster::get_last_blob() const -{ - if (time_blob_map().empty()) { - raise("empty cluster has no last blob"); - } - return *(time_blob_map().rbegin()->second.rbegin()); -} - -size_t Cluster::get_num_time_slices() const -{ - return time_blob_map().size(); -} std::pair Cluster::get_closest_point_along_vec(geo_point_t& p_test1, geo_point_t dir, double test_dis, double dis_step, double angle_cut, @@ -617,42 +801,232 @@ std::pair Cluster::get_closest_point_along_vec(geo_point_t& return std::make_pair(min_point, min_dis1); } -const Cluster::sv3d_t& Cluster::sv3d() const { return m_node->value.scoped_view(scope); } - -const Cluster::kd3d_t& Cluster::kd3d() const -{ - const auto& sv = m_node->value.scoped_view(scope); - return sv.kd(); -} -const Cluster::kd3d_t& Cluster::kd() const -{ - const auto& sv = m_node->value.scoped_view(scope); - return sv.kd(); +const Cluster::sv3d_t& Cluster::sv3d() const { + return sv(); // m_node->value.scoped_view(m_default_scope); } +const Cluster::kd3d_t& Cluster::kd3d() const { return sv3d().kd(); } +const Cluster::kd3d_t& Cluster::kd() const { return kd3d(); } geo_point_t Cluster::point3d(size_t point_index) const { return kd3d().point3d(point_index); } -geo_point_t Cluster::point(size_t point_index) const { return point3d(point_index); } + + +const Cluster::sv3d_t& Cluster::sv3d_raw() const { + return sv(m_scope_3d_raw); + // return m_node->value.scoped_view(m_scope_3d_raw); +} +const Cluster::kd3d_t& Cluster::kd3d_raw() const { return sv3d_raw().kd(); } +geo_point_t Cluster::point3d_raw(size_t point_index) const { return kd3d_raw().point3d(point_index); } const Cluster::points_type& Cluster::points() const { return kd3d().points(); } -int Cluster::npoints() const -{ - if (!m_npoints) { - const auto& sv = sv3d(); - m_npoints = sv.npoints(); +const Cluster::points_type& Cluster::points_raw() const { return kd3d_raw().points(); } + + +WirePlaneId Cluster::wire_plane_id(size_t point_index) const { + auto& wpids = cache().point_wpids; + if (wpids.empty()) { + wpids = points_property("wpid"); + } + return WirePlaneId(wpids[point_index]); +} + +int Cluster::wire_index(size_t point_index, int plane) const { + auto& cache_ref = cache(); + + switch(plane) { + case 0: { + if (cache_ref.point_u_wire_indices.empty()) { + cache_ref.point_u_wire_indices = points_property("uwire_index"); + } + return cache_ref.point_u_wire_indices[point_index]; + } + case 1: { + if (cache_ref.point_v_wire_indices.empty()) { + cache_ref.point_v_wire_indices = points_property("vwire_index"); + } + return cache_ref.point_v_wire_indices[point_index]; + } + case 2: { + if (cache_ref.point_w_wire_indices.empty()) { + cache_ref.point_w_wire_indices = points_property("wwire_index"); + } + return cache_ref.point_w_wire_indices[point_index]; + } + default: + raise("Invalid plane index: %d (must be 0, 1, or 2)", plane); + } + std::terminate(); // this is here mostly to quell compiler warnings about not returning a value. +} + +double Cluster::charge_value(size_t point_index, int plane) const { + auto& cache_ref = cache(); + + switch(plane) { + case 0: { + if (cache_ref.point_u_charges.empty()) { + cache_ref.point_u_charges = points_property("ucharge_val"); + //std::cout << "Xin4: " << cache_ref.point_u_charges.empty() << std::endl; + } + return cache_ref.point_u_charges[point_index]; + } + case 1: { + if (cache_ref.point_v_charges.empty()) { + cache_ref.point_v_charges = points_property("vcharge_val"); + } + return cache_ref.point_v_charges[point_index]; + } + case 2: { + if (cache_ref.point_w_charges.empty()) { + cache_ref.point_w_charges = points_property("wcharge_val"); + } + return cache_ref.point_w_charges[point_index]; + } + default: + raise("Invalid plane index: %d (must be 0, 1, or 2)", plane); + } + std::terminate(); // this is here mostly to quell compiler warnings about not returning a value. +} + +double Cluster::charge_uncertainty(size_t point_index, int plane) const { + auto& cache_ref = cache(); + + switch(plane) { + case 0: { + if (cache_ref.point_u_charge_uncs.empty()) { + cache_ref.point_u_charge_uncs = points_property("ucharge_unc"); + } + return cache_ref.point_u_charge_uncs[point_index]; + } + case 1: { + if (cache_ref.point_v_charge_uncs.empty()) { + cache_ref.point_v_charge_uncs = points_property("vcharge_unc"); + } + return cache_ref.point_v_charge_uncs[point_index]; + } + case 2: { + if (cache_ref.point_w_charge_uncs.empty()) { + cache_ref.point_w_charge_uncs = points_property("wcharge_unc"); + } + return cache_ref.point_w_charge_uncs[point_index]; + } + default: + raise("Invalid plane index: %d (must be 0, 1, or 2)", plane); + } + std::terminate(); // this is here mostly to quell compiler warnings about not returning a value. +} + +bool Cluster::is_wire_dead(size_t point_index, int plane, double dead_threshold) const { + return charge_uncertainty(point_index, plane) > dead_threshold; +} + +std::pair Cluster::calc_charge_wcp( + size_t point_index, + double charge_cut, + bool disable_dead_mix_cell) const { + + const double dead_threshold = 1e10; // Same as PointTreeBuilding + + double charge = 0; + int ncharge = 0; + + // Get exact charges for u,v,w wires using cached data + double charge_u = charge_value(point_index, 0); + double charge_v = charge_value(point_index, 1); + double charge_w = charge_value(point_index, 2); + + + + // int wire_index_u = wire_index(point_index, 0); + // int wire_index_v = wire_index(point_index, 1); + // int wire_index_w = wire_index(point_index, 2); + + bool flag_charge_u = false; + bool flag_charge_v = false; + bool flag_charge_w = false; + + // Initial flag setting based on charge threshold + if (charge_u > charge_cut) flag_charge_u = true; + if (charge_v > charge_cut) flag_charge_v = true; + if (charge_w > charge_cut) flag_charge_w = true; + + // std::cout << "Charge values: " << wire_index_u << " " << wire_index_v << " " << wire_index_w << " " << charge_u << ", " << charge_v << ", " << charge_w << " " << is_dead_u << " " << is_dead_v << " " << is_dead_w << " " << flag_charge_u << " " << flag_charge_v << " " << flag_charge_w << std::endl; + + if (disable_dead_mix_cell) { + // Add all charges first + charge += charge_u * charge_u; ncharge++; + charge += charge_v * charge_v; ncharge++; + charge += charge_w * charge_w; ncharge++; + + // Check for dead wires + bool is_dead_u = is_wire_dead(point_index, 0, dead_threshold); + bool is_dead_v = is_wire_dead(point_index, 1, dead_threshold); + bool is_dead_w = is_wire_dead(point_index, 2, dead_threshold); + + // Deal with bad planes - subtract dead wire contributions + if (is_dead_u) { + flag_charge_u = true; + charge -= charge_u * charge_u; ncharge--; + } + if (is_dead_v) { + flag_charge_v = true; + charge -= charge_v * charge_v; ncharge--; + } + if (is_dead_w) { + flag_charge_w = true; + charge -= charge_w * charge_w; ncharge--; + } + } else { + // Only use non-zero charges + if (charge_u == 0) flag_charge_u = true; + if (charge_v == 0) flag_charge_v = true; + if (charge_w == 0) flag_charge_w = true; + + if (charge_u != 0) { + charge += charge_u * charge_u; ncharge++; + } + if (charge_v != 0) { + charge += charge_v * charge_v; ncharge++; + } + if (charge_w != 0) { + charge += charge_w * charge_w; ncharge++; + } + } + + // Require more than one plane to be good + if (ncharge > 1) { + charge = sqrt(charge / ncharge); + } else { + charge = 0; } - return m_npoints; + + return std::make_pair(flag_charge_u && flag_charge_v && flag_charge_w, charge); } -size_t Cluster::nbpoints() const + + + + +int Cluster::npoints() const { - size_t ret = 0; - for (const auto* blob : children()) { - ret += blob->nbpoints(); + auto& n = cache().npoints; + if (!n) { + const auto& sv = sv3d(); + n = sv.npoints(); } - return ret; + return n; } + + +// size_t Cluster::nbpoints() const +// { +// size_t ret = 0; +// for (const auto* blob : children()) { +// ret += blob->nbpoints(); +// } +// return ret; +// } + const Cluster::wire_indices_t& Cluster::wire_indices() const { - const auto& sv = m_node->value.scoped_view(scope_wire_index); + const auto& sv = m_node->value.scoped_view(m_scope_wire_index); const auto& skd = sv.kd(); const auto& points = skd.points(); LogDebug("points size: " << points.size() << " points[0] size: " << points[0].size()); @@ -687,33 +1061,6 @@ std::pair Cluster::ndipole(const geo_point_t& point, const geo_point_t return std::make_pair(num_p1, num_p2); } -// std::pair Cluster::nprojection(const geo_point_t& point, const geo_point_t& dir, double dis) const -// { -// const auto& sv = m_node->value.scoped_view(scope); // get the kdtree -// const auto& skd = sv.kd(); -// const auto& points = skd.points(); - -// int num_p1 = 0; -// int num_p2 = 0; - -// auto rad = skd.radius(dis*dis, point); -// for (const auto& [index,_] : rad) { - -// geo_point_t dir1(points[0][index] - point.x(), -// points[1][index] - point.y(), -// points[2][index] - point.z()); - -// if (dir1.dot(dir) >= 0) { -// ++num_p1; -// } -// else{ -// ++num_p2; -// } - -// } - -// return std::make_pair(num_p1, num_p2); -// } Cluster::kd_results_t Cluster::kd_knn(int nn, const geo_point_t& query_point) const { @@ -741,6 +1088,20 @@ std::vector Cluster::kd_points(const Cluster::kd_results_t& res) co return ret; } +// std::vector Cluster::kd_points_raw(const Cluster::kd_results_t& res) +// { +// return const_cast(this)->kd_points_raw(res); +// } +// std::vector Cluster::kd_points_raw(const Cluster::kd_results_t& res) const +// { +// std::vector ret; +// const auto& points = this->points_raw(); +// for (const auto& [point_index, _] : res) { +// ret.emplace_back(points[0][point_index], points[1][point_index], points[2][point_index]); +// } +// return ret; +// } + // can't const_cast a vector. template std::vector mutify(const std::vector& c) @@ -764,6 +1125,8 @@ std::vector Cluster::kd_blobs() const return ret; } + + Blob* Cluster::blob_with_point(size_t point_index) { return const_cast(const_cast(this)->blob_with_point(point_index)); @@ -1236,128 +1599,190 @@ geo_point_t Cluster::vhough_transform(const geo_point_t& origin, const double di return {sth * cos(phi), sth * sin(phi), cth}; } -std::tuple Cluster::get_uvwt_min() const +std::tuple Cluster::get_uvwt_min(int apa, int face) const { - std::tuple ret; - bool first = true; + std::set u_set, v_set, w_set, t_set; for (const auto* blob : children()) { - const int u = blob->u_wire_index_min(); - const int v = blob->v_wire_index_min(); - const int w = blob->w_wire_index_min(); - const int t = blob->slice_index_min(); - - if (first) { - ret = {u, v, w, t}; - first = false; - continue; + auto wpid = blob->wpid(); + if (wpid.apa() != apa || wpid.face() != face) { + continue; // skip blobs not in the specified APA and face + } + + for (int i = blob->u_wire_index_min(); i < blob->u_wire_index_max(); ++i) { + u_set.insert(i); + } + for (int i = blob->v_wire_index_min(); i < blob->v_wire_index_max(); ++i) { + v_set.insert(i); + } + for (int i = blob->w_wire_index_min(); i < blob->w_wire_index_max(); ++i) { + w_set.insert(i); + } + for (int i = blob->slice_index_min(); i < blob->slice_index_max(); ++i) { + t_set.insert(i); } - get<0>(ret) = std::min(get<0>(ret), u); - get<1>(ret) = std::min(get<1>(ret), v); - get<2>(ret) = std::min(get<2>(ret), w); - get<3>(ret) = std::min(get<3>(ret), t); } + + std::tuple ret; + if (!u_set.empty()) + ret = { *u_set.begin(), *v_set.begin(), *w_set.begin(), *t_set.begin() }; + else + ret = { -1, -1, -1, -1 }; + return ret; } -std::tuple Cluster::get_uvwt_max() const +std::tuple Cluster::get_uvwt_max(int apa, int face) const { - std::tuple ret; - bool first = true; + std::set u_set, v_set, w_set, t_set; for (const auto* blob : children()) { - const int u = blob->u_wire_index_max(); - const int v = blob->v_wire_index_max(); - const int w = blob->w_wire_index_max(); - const int t = blob->slice_index_max(); - - if (first) { - ret = {u, v, w, t}; - first = false; - continue; + auto wpid = blob->wpid(); + if (wpid.apa() != apa || wpid.face() != face) { + continue; // skip blobs not in the specified APA and face + } + + for (int i = blob->u_wire_index_min(); i < blob->u_wire_index_max(); ++i) { + u_set.insert(i); + } + for (int i = blob->v_wire_index_min(); i < blob->v_wire_index_max(); ++i) { + v_set.insert(i); + } + for (int i = blob->w_wire_index_min(); i < blob->w_wire_index_max(); ++i) { + w_set.insert(i); + } + for (int i = blob->slice_index_min(); i < blob->slice_index_max(); ++i) { + t_set.insert(i); } - get<0>(ret) = std::max(get<0>(ret), u); - get<1>(ret) = std::max(get<1>(ret), v); - get<2>(ret) = std::max(get<2>(ret), w); - get<3>(ret) = std::max(get<3>(ret), t); } + + std::tuple ret; + if (!u_set.empty()) + ret = { *u_set.rbegin(), *v_set.rbegin(), *w_set.rbegin(), *t_set.rbegin() }; + else + ret = { -1, -1, -1, -1 }; return ret; } // FIXME: Is this actually correct? It does not return "ranges" but rather the // number of unique wires/ticks in the cluster. A sparse but large cluster will // be "smaller" than a small but dense cluster. -std::tuple Cluster::get_uvwt_range() const +std::map > Cluster::get_uvwt_range() const { - std::set u_set; - std::set v_set; - std::set w_set; - std::set t_set; + std::map > map_wpid_u_set; + std::map > map_wpid_v_set; + std::map > map_wpid_w_set; + std::map > map_wpid_t_set; for (const auto* blob : children()) { for (int i = blob->u_wire_index_min(); i < blob->u_wire_index_max(); ++i) { - u_set.insert(i); + map_wpid_u_set[blob->wpid()].insert(i); } for (int i = blob->v_wire_index_min(); i < blob->v_wire_index_max(); ++i) { - v_set.insert(i); + map_wpid_v_set[blob->wpid()].insert(i); } for (int i = blob->w_wire_index_min(); i < blob->w_wire_index_max(); ++i) { - w_set.insert(i); + map_wpid_w_set[blob->wpid()].insert(i); } for (int i = blob->slice_index_min(); i < blob->slice_index_max(); ++i) { - t_set.insert(i); + map_wpid_t_set[blob->wpid()].insert(i); } } - return {u_set.size(), v_set.size(), w_set.size(), t_set.size()}; + std::map > ret; + for (auto it = map_wpid_u_set.begin(); it != map_wpid_u_set.end(); ++it) { + const WirePlaneId wpid = it->first; + const auto& u_set = it->second; + const auto& v_set = map_wpid_v_set[wpid]; + const auto& w_set = map_wpid_w_set[wpid]; + const auto& t_set = map_wpid_t_set[wpid]; + ret[wpid] = {u_set.size(), v_set.size(), w_set.size(), t_set.size()}; + } + return ret; + // return {u_set.size(), v_set.size(), w_set.size(), t_set.size()}; } double Cluster::get_length() const { - if (m_length == 0) { // invalidates when a new node is set - const auto& tp = grouping()->get_params(); + auto& length = cache().length; + if (length != 0) { + return length; + } + + const auto& grouping = this->grouping(); + + auto map_wpid_uvwt = this->get_uvwt_range(); + for (const auto& [wpid, uvwt] : map_wpid_uvwt) { - const auto [u, v, w, t] = get_uvwt_range(); - const double pu = u * tp.pitch_u; - const double pv = v * tp.pitch_v; - const double pw = w * tp.pitch_w; - const double pt = t * tp.tick_drift; - m_length = std::sqrt(2. / 3. * (pu * pu + pv * pv + pw * pw) + pt * pt); + const double tick = grouping->get_tick().at(wpid.apa()).at(wpid.face()); + const double drift_speed = grouping->get_drift_speed().at(wpid.apa()).at(wpid.face()); + + // std::cout << "Test: " << wpid.apa() << " " << wpid.face() << " " << tp.tick_drift << " " << tick * drift_speed << std::endl; + + const auto [u, v, w, t] = uvwt; + auto face = grouping->get_anode(wpid.apa())->face(wpid.face()); + const double pu = u * face->plane(0)->pimpos()->pitch() ; + const double pv = v * face->plane(1)->pimpos()->pitch(); + const double pw = w * face->plane(2)->pimpos()->pitch(); + const double pt = t * tick * drift_speed; + length += std::sqrt(2. / 3. * (pu * pu + pv * pv + pw * pw) + pt * pt); } - return m_length; + + return length; } -std::tuple Facade::get_uvwt_range(const Cluster* cluster, const std::vector& b2id, const int id) +std::map > Facade::get_uvwt_range(const Cluster* cluster, const std::vector& b2id, const int id) { - std::set u_set; - std::set v_set; - std::set w_set; - std::set t_set; + std::map > map_wpid_u_set; + std::map > map_wpid_v_set; + std::map > map_wpid_w_set; + std::map > map_wpid_t_set; + for (size_t i = 0; i != b2id.size(); i++) { if (b2id.at(i) != id) continue; const auto* blob = cluster->children().at(i); for (int i = blob->u_wire_index_min(); i < blob->u_wire_index_max(); ++i) { - u_set.insert(i); + map_wpid_u_set[blob->wpid()].insert(i); } for (int i = blob->v_wire_index_min(); i < blob->v_wire_index_max(); ++i) { - v_set.insert(i); + map_wpid_v_set[blob->wpid()].insert(i); } for (int i = blob->w_wire_index_min(); i < blob->w_wire_index_max(); ++i) { - w_set.insert(i); + map_wpid_w_set[blob->wpid()].insert(i); } for (int i = blob->slice_index_min(); i < blob->slice_index_max(); ++i) { - t_set.insert(i); + map_wpid_t_set[blob->wpid()].insert(i); } } - return {u_set.size(), v_set.size(), w_set.size(), t_set.size()}; + + std::map > ret; + for (auto it = map_wpid_u_set.begin(); it != map_wpid_u_set.end(); ++it) { + const WirePlaneId wpid = it->first; + const auto& u_set = it->second; + const auto& v_set = map_wpid_v_set[wpid]; + const auto& w_set = map_wpid_w_set[wpid]; + const auto& t_set = map_wpid_t_set[wpid]; + ret[wpid] = {u_set.size(), v_set.size(), w_set.size(), t_set.size()}; + } + return ret; + + // return {u_set.size(), v_set.size(), w_set.size(), t_set.size()}; } double Facade::get_length(const Cluster* cluster, const std::vector& b2id, const int id) { - const auto [u, v, w, t] = Facade::get_uvwt_range(cluster, b2id, id); - const auto& tp = cluster->grouping()->get_params(); - const double pu = u * tp.pitch_u; - const double pv = v * tp.pitch_v; - const double pw = w * tp.pitch_w; - const double pt = t * tp.tick_drift; - return std::sqrt(2. / 3. * (pu * pu + pv * pv + pw * pw) + pt * pt); + // const auto& tp = cluster->grouping()->get_params(); + auto map_wpid_uvwt = Facade::get_uvwt_range(cluster, b2id, id); + double length = 0; + for (const auto& [wpid, uvwt] : map_wpid_uvwt) { + const double tick = cluster->grouping()->get_tick().at(wpid.apa()).at(wpid.face()); + const double drift_speed = cluster->grouping()->get_drift_speed().at(wpid.apa()).at(wpid.face()); + + const auto [u, v, w, t] = uvwt; + const double pu = u * cluster->grouping()->get_anode(wpid.apa())->face(wpid.face())->plane(0)->pimpos()->pitch(); + const double pv = v * cluster->grouping()->get_anode(wpid.apa())->face(wpid.face())->plane(1)->pimpos()->pitch(); + const double pw = w * cluster->grouping()->get_anode(wpid.apa())->face(wpid.face())->plane(2)->pimpos()->pitch(); + const double pt = t * tick * drift_speed; + length += std::sqrt(2. / 3. * (pu * pu + pv * pv + pw * pw) + pt * pt); + } + return length; } @@ -1367,11 +1792,15 @@ std::pair Cluster::get_highest_lowest_points(size_t ax const size_t npoints = points[0].size(); geo_point_t lowest_point, highest_point; + bool initialized = false; for (size_t ind = 0; ind < npoints; ++ind) { + if (is_point_excluded(ind)) continue; + geo_point_t pt(points[0][ind], points[1][ind], points[2][ind]); - if (!ind) { + if (!initialized) { lowest_point = highest_point = pt; + initialized = true; continue; } if (pt[axis] > highest_point[axis]) { @@ -1398,28 +1827,33 @@ std::pair Cluster::get_front_back_points() const std::pair Cluster::get_main_axis_points() const { - // Get first point as initial values - geo_point_t highest_point = point3d(0); - geo_point_t lowest_point = point3d(0); - - // Get main axis and ensure consistent direction (y>0) - geo_point_t main_axis = get_pca_axis(0); + // Get main axis and ensure consistent direction (y>0) + geo_point_t main_axis = get_pca().axis.at(0); if (main_axis.y() < 0) { main_axis = main_axis * -1; } - - // Initialize extreme values using projections of first point - double high_value = highest_point.dot(main_axis); - double low_value = high_value; - + + geo_point_t highest_point, lowest_point; + double high_value, low_value; + bool initialized = false; + // Loop through all points to find extremes along main axis - for (int i = 1; i < npoints(); i++) { + for (int i = 0; i < npoints(); i++) { + if (is_point_excluded(i)) continue; + geo_point_t current = point3d(i); double value = current.dot(main_axis); + if (!initialized) { + highest_point = lowest_point = current; + high_value = low_value = value; + initialized = true; + continue; + } + if (value > high_value) { highest_point = current; - high_value = value; + high_value = value; } if (value < low_value) { lowest_point = current; @@ -1427,24 +1861,46 @@ std::pair Cluster::get_main_axis_points() const } } + if (!initialized) { + throw std::runtime_error("No valid points available for get_main_axis_points"); + } + return std::make_pair(highest_point, lowest_point); } std::pair Cluster::get_two_extreme_points() const { geo_point_t extreme_wcp[6]; - for (int i = 0; i != 6; i++) { - extreme_wcp[i] = point3d(0); + bool initialized = false; + + // Find extreme points in each coordinate direction + for (int i = 0; i < npoints(); i++) { + if (is_point_excluded(i)) continue; + + geo_point_t current = point3d(i); + + if (!initialized) { + // Initialize all extremes to first valid point + for (int j = 0; j < 6; j++) { + extreme_wcp[j] = current; + } + initialized = true; + continue; + } + + // Check for new extremes + if (current.y() > extreme_wcp[0].y()) extreme_wcp[0] = current; + if (current.y() < extreme_wcp[1].y()) extreme_wcp[1] = current; + + if (current.x() > extreme_wcp[2].x()) extreme_wcp[2] = current; + if (current.x() < extreme_wcp[3].x()) extreme_wcp[3] = current; + + if (current.z() > extreme_wcp[4].z()) extreme_wcp[4] = current; + if (current.z() < extreme_wcp[5].z()) extreme_wcp[5] = current; } - for (int i = 1; i < npoints(); i++) { - if (point3d(i).y() > extreme_wcp[0].y()) extreme_wcp[0] = point3d(i); - if (point3d(i).y() < extreme_wcp[1].y()) extreme_wcp[1] = point3d(i); - if (point3d(i).x() > extreme_wcp[2].x()) extreme_wcp[2] = point3d(i); - if (point3d(i).x() < extreme_wcp[3].x()) extreme_wcp[3] = point3d(i); - - if (point3d(i).z() > extreme_wcp[4].z()) extreme_wcp[4] = point3d(i); - if (point3d(i).z() < extreme_wcp[5].z()) extreme_wcp[5] = point3d(i); + if (!initialized) { + throw std::runtime_error("No valid points available for get_two_extreme_points"); } double max_dis = -1; @@ -1472,9 +1928,9 @@ std::pair Cluster::get_two_extreme_points() const bool Cluster::sanity(Log::logptr_t log) const { { - const auto* svptr = m_node->value.get_scoped(scope); + const auto* svptr = m_node->value.get_scoped(m_default_scope); if (!svptr) { - if (log) log->debug("cluster sanity: note, not yet a scoped view {}", scope); + if (log) log->debug("cluster sanity: note, not yet a scoped view {}", m_default_scope); } } if (!nchildren()) { @@ -1482,7 +1938,7 @@ bool Cluster::sanity(Log::logptr_t log) const return false; } - const auto& sv = m_node->value.scoped_view(scope); + const auto& sv = m_node->value.scoped_view(m_default_scope); const auto& snodes = sv.nodes(); if (snodes.empty()) { if (log) log->debug("cluster sanity: no scoped nodes"); @@ -1538,6 +1994,7 @@ bool Cluster::sanity(Log::logptr_t log) const const Blob* sblob = nullptr; std::vector spoints; + for (size_t ind = 0; ind < npts; ++ind) { auto kdpt = skd.point3d(ind); @@ -1553,7 +2010,7 @@ bool Cluster::sanity(Log::logptr_t log) const const auto* tblob = tnode->value.facade(); if (tblob != sblob) { sblob = tblob; - spoints = sblob->points(); + spoints = sblob->points(get_default_scope().pcname, get_default_scope().coords); } if (minind >= spoints.size()) { @@ -1578,7 +2035,7 @@ size_t Cluster::hash() const std::size_t h = 0; boost::hash_combine(h, (size_t) (get_length() / units::mm)); auto blobs = children(); // copy vector - sort_blobs(blobs); + // sort_blobs(blobs); for (const Blob* blob : blobs) { boost::hash_combine(h, blob->hash()); } @@ -1587,2421 +2044,1751 @@ size_t Cluster::hash() const std::vector Cluster::get_blob_indices(const Blob* blob) const { - if (m_map_mcell_indices.empty()) { + auto& mmi = cache().map_mcell_indices; + if (mmi.empty()) { const auto& skd = kd3d(); for (size_t ind = 0; ind < skd.npoints(); ++ind) { - const auto* blob = blob_with_point(ind); - m_map_mcell_indices[blob].push_back(ind); + const auto* bwp = blob_with_point(ind); + mmi[bwp].push_back(ind); } } - return m_map_mcell_indices[blob]; + return mmi[blob]; } -// #define LogDebug(x) std::cout << "[yuhw]: " << __LINE__ << " : " << x << std::endl -void Cluster::Create_graph(const bool use_ctpc) const +std::vector Cluster::indices_to_points(const std::vector& path_indices) const { - // std::cout << "Create Graph!" << std::endl; - LogDebug("Create Graph! " << graph); - if (m_graph != nullptr) return; - m_graph = std::make_unique(nbpoints()); - // std::cout << "Test:" << "Create Graph!" << std::endl; - Establish_close_connected_graph(); - if (use_ctpc) Connect_graph(true); - Connect_graph(); + std::vector points; + points.reserve(path_indices.size()); + for (size_t idx : path_indices) { + points.push_back(point3d(idx)); + } + return points; } -void Cluster::Establish_close_connected_graph() const +// void Cluster::organize_points_path_vec(std::vector& path_points, double low_dis_limit) const +// { +// std::vector temp_points = path_points; +// path_points.clear(); + +// // First pass: filter based on distance +// for (size_t i = 0; i != temp_points.size(); i++) { +// if (path_points.empty()) { +// path_points.push_back(temp_points[i]); +// } +// else if (i + 1 == temp_points.size()) { +// double dis = (temp_points[i] - path_points.back()).magnitude(); +// if (dis > low_dis_limit * 0.75) { +// path_points.push_back(temp_points[i]); +// } +// } +// else { +// double dis = (temp_points[i] - path_points.back()).magnitude(); +// double dis1 = (temp_points[i + 1] - path_points.back()).magnitude(); + +// if (dis > low_dis_limit || (dis1 > low_dis_limit * 1.7 && dis > low_dis_limit * 0.75)) { +// path_points.push_back(temp_points[i]); +// } +// } +// } + +// // Second pass: filter based on angle +// temp_points = path_points; +// std::vector angles; +// for (size_t i = 0; i != temp_points.size(); i++) { +// if (i == 0 || i + 1 == temp_points.size()) { +// angles.push_back(M_PI); +// } +// else { +// geo_vector_t v1 = temp_points[i] - temp_points[i - 1]; +// geo_vector_t v2 = temp_points[i] - temp_points[i + 1]; +// angles.push_back(v1.angle(v2)); +// } +// } + +// path_points.clear(); +// for (size_t i = 0; i != temp_points.size(); i++) { +// if (angles[i] * 180.0 / M_PI >= 75) { +// path_points.push_back(temp_points[i]); +// } +// } +// } + +// // this is different from WCP implementation, the path_points is the input ... +// void Cluster::organize_path_points(std::vector& path_points, double low_dis_limit) const +// { +// // std::vector temp_points = path_points; +// path_points.clear(); +// auto indices = get_path_wcps(); +// auto temp_points = indices_to_points(indices); + +// for (size_t i = 0; i != temp_points.size(); i++) { +// if (path_points.empty()) { +// path_points.push_back(temp_points[i]); +// } +// else if (i + 1 == temp_points.size()) { +// double dis = (temp_points[i] - path_points.back()).magnitude(); +// if (dis > low_dis_limit * 0.5) { +// path_points.push_back(temp_points[i]); +// } +// } +// else { +// double dis = (temp_points[i] - path_points.back()).magnitude(); +// double dis1 = (temp_points[i + 1] - path_points.back()).magnitude(); + +// if (dis > low_dis_limit || (dis1 > low_dis_limit * 1.7 && dis > low_dis_limit * 0.5)) { +// path_points.push_back(temp_points[i]); +// } +// } +// } +// } + + +std::vector Cluster::get_hull() const { - std::map>, blob_less_functor> map_mcell_uindex_wcps; - std::map>, blob_less_functor> map_mcell_vindex_wcps; - std::map>, blob_less_functor> map_mcell_windex_wcps; + auto& hull_points = cache().hull_points; - std::map, blob_less_functor> map_mcell_indices; + if (hull_points.size()) { + return hull_points; + } + quickhull::QuickHull qh; + std::vector> pc; const auto& points = this->points(); - const auto& winds = this->wire_indices(); - LogDebug("points[0].size(): " << points[0].size() << " winds[0].size(): " << winds[0].size()); - - for (Blob* mcell : this->children()) { - std::map> map_uindex_wcps; - std::map> map_vindex_wcps; - std::map> map_windex_wcps; - - std::vector pinds = this->get_blob_indices(mcell); - for (const int pind : pinds) { - auto v = vertex(pind, *m_graph); // retrieve vertex descriptor - (*m_graph)[v].index = pind; - if (map_uindex_wcps.find(winds[0][pind]) == map_uindex_wcps.end()) { - std::set wcps; - wcps.insert(pind); - map_uindex_wcps[winds[0][pind]] = wcps; - } - else { - map_uindex_wcps[winds[0][pind]].insert(pind); - } + for (int i = 0; i != npoints(); i++) { + pc.emplace_back(points[0][i], points[1][i], points[2][i]); + } + quickhull::ConvexHull hull = qh.getConvexHull(pc, false, true); + std::set indices; - if (map_vindex_wcps.find(winds[1][pind]) == map_vindex_wcps.end()) { - std::set wcps; - wcps.insert(pind); - map_vindex_wcps[winds[1][pind]] = wcps; - } - else { - map_vindex_wcps[winds[1][pind]].insert(pind); - } + for (size_t i = 0; i != hull.getIndexBuffer().size(); i++) { + indices.insert(hull.getIndexBuffer().at(i)); + } - if (map_windex_wcps.find(winds[2][pind]) == map_windex_wcps.end()) { - std::set wcps; - wcps.insert(pind); - map_windex_wcps[winds[2][pind]] = wcps; - } - else { - map_windex_wcps[winds[2][pind]].insert(pind); - } - } - map_mcell_uindex_wcps[mcell] = map_uindex_wcps; - map_mcell_vindex_wcps[mcell] = map_vindex_wcps; - map_mcell_windex_wcps[mcell] = map_windex_wcps; + for (auto i : indices) { + hull_points.push_back({points[0][i], points[1][i], points[2][i]}); } + + return hull_points; +} - int num_edges = 0; +Cluster::PCA& Cluster::get_pca() const +{ + auto& pcaptr = cache().pca; + if (pcaptr) { + return *pcaptr; + } - // create graph for points inside the same mcell - for (Blob* mcell : this->children()) { - std::vector pinds = this->get_blob_indices(mcell); - int max_wire_interval = mcell->get_max_wire_interval(); - int min_wire_interval = mcell->get_min_wire_interval(); - // std::cout << "mcell: " << pinds.size() - // << " type " << mcell->get_max_wire_type() << " " << mcell->get_min_wire_type() - // << " interval " << max_wire_interval << " " << min_wire_interval - // << std::endl; - std::map>* map_max_index_wcps; - std::map>* map_min_index_wcps; - if (mcell->get_max_wire_type() == 0) { - map_max_index_wcps = &map_mcell_uindex_wcps[mcell]; - } - else if (mcell->get_max_wire_type() == 1) { - map_max_index_wcps = &map_mcell_vindex_wcps[mcell]; - } - else { - map_max_index_wcps = &map_mcell_windex_wcps[mcell]; - } - if (mcell->get_min_wire_type() == 0) { - map_min_index_wcps = &map_mcell_uindex_wcps[mcell]; - } - else if (mcell->get_min_wire_type() == 1) { - map_min_index_wcps = &map_mcell_vindex_wcps[mcell]; - } - else { - map_min_index_wcps = &map_mcell_windex_wcps[mcell]; - } + const auto& pcname = this->get_default_scope().pcname; + const auto& coords = this->get_default_scope().coords; - for (const int pind1 : pinds) { - int index_max_wire; - int index_min_wire; - if (mcell->get_max_wire_type() == 0) { - index_max_wire = winds[0][pind1]; - } - else if (mcell->get_max_wire_type() == 1) { - index_max_wire = winds[1][pind1]; - } - else { - index_max_wire = winds[2][pind1]; - } - if (mcell->get_min_wire_type() == 0) { - index_min_wire = winds[0][pind1]; - } - else if (mcell->get_min_wire_type() == 1) { - index_min_wire = winds[1][pind1]; - } - else { - index_min_wire = winds[2][pind1]; - } + pcaptr = std::make_unique(); + pcaptr->axis.resize(3); + pcaptr->values.resize(3,0); - std::vector*> max_wcps_set; - std::vector*> min_wcps_set; + int nsum = 0; + for (const Blob* blob : children()) { + for (const geo_point_t& p : blob->points(pcname, coords)) { + pcaptr->center += p; + nsum++; + } + } - // go through the first map and find the ones satisfying the condition - for (auto it2 = map_max_index_wcps->begin(); it2 != map_max_index_wcps->end(); it2++) { - if (fabs(it2->first - index_max_wire) <= max_wire_interval) { - max_wcps_set.push_back(&(it2->second)); - } - } - // go through the second map and find the ones satisfying the condition - for (auto it2 = map_min_index_wcps->begin(); it2 != map_min_index_wcps->end(); it2++) { - if (fabs(it2->first - index_min_wire) <= min_wire_interval) { - min_wcps_set.push_back(&(it2->second)); - } - } + // Not enough points to perform PCA. + if (nsum < 3) { + return *pcaptr; + } - std::set wcps_set1; - std::set wcps_set2; + pcaptr->center /= nsum; - for (auto it2 = max_wcps_set.begin(); it2 != max_wcps_set.end(); it2++) { - wcps_set1.insert((*it2)->begin(), (*it2)->end()); - } - for (auto it3 = min_wcps_set.begin(); it3 != min_wcps_set.end(); it3++) { - wcps_set2.insert((*it3)->begin(), (*it3)->end()); - } + Eigen::MatrixXd cov_matrix(3, 3); - { - std::set common_set; - set_intersection(wcps_set1.begin(), wcps_set1.end(), wcps_set2.begin(), wcps_set2.end(), - std::inserter(common_set, common_set.begin())); - - for (auto it4 = common_set.begin(); it4 != common_set.end(); it4++) { - int pind2 = *it4; - if (pind1 != pind2) { - // add edge ... - // auto edge = add_edge(pind1, pind2, *m_graph); - // if (edge.second) { - // (*m_graph)[edge.first].dist = sqrt(pow(points[0][pind1] - points[0][pind2], 2) + - // pow(points[1][pind1] - points[1][pind2], 2) + - // pow(points[2][pind1] - points[2][pind2], 2)); - // num_edges++; - // } - auto edge = add_edge(pind1,pind2,WireCell::PointCloud::Facade::EdgeProp(sqrt(pow(points[0][pind1] - points[0][pind2], 2) + - pow(points[1][pind1] - points[1][pind2], 2) + - pow(points[2][pind1] - points[2][pind2], 2))),*m_graph); - // std::cout << index1 << " " << index2 << " " << edge.second << std::endl; - if (edge.second){ - num_edges ++; - } - } + for (int i = 0; i != 3; i++) { + for (int j = i; j != 3; j++) { + cov_matrix(i, j) = 0; + for (const Blob* blob : children()) { + for (const geo_point_t& p : blob->points(pcname, coords)) { + cov_matrix(i, j) += (p[i] - pcaptr->center[i]) * (p[j] - pcaptr->center[j]); } } } } + cov_matrix(1, 0) = cov_matrix(0, 1); + cov_matrix(2, 0) = cov_matrix(0, 2); + cov_matrix(2, 1) = cov_matrix(1, 2); + // std::cout << cov_matrix << std::endl; - LogDebug("in-blob edges: " << num_edges); - // std::cout << "Test: in-blob edges: " << num_edges << std::endl; + // const auto eigenSolver = WireCell::Array::pca(cov_matrix); + Eigen::SelfAdjointEigenSolver eigenSolver(cov_matrix); + auto eigen_values = eigenSolver.eigenvalues(); + auto eigen_vectors = eigenSolver.eigenvectors(); - std::vector time_slices; - for (auto [time, _] : this->time_blob_map()) { - time_slices.push_back(time); + // ascending order from Eigen, we want descending + for (int i = 0; i != 3; i++) { + pcaptr->values[2-i] = eigen_values(i); + double norm = sqrt(eigen_vectors(0, i) * eigen_vectors(0, i) + eigen_vectors(1, i) * eigen_vectors(1, i) + + eigen_vectors(2, i) * eigen_vectors(2, i)); + pcaptr->axis[2-i].set(eigen_vectors(0, i) / norm, eigen_vectors(1, i) / norm, eigen_vectors(2, i) / norm); } - const int nticks_per_slice = grouping()->get_params().nticks_live_slice; - // std::cout << "time_slices size: " << time_slices.size() << std::endl; - std::vector> connected_mcells; + return *pcaptr; +} - for (size_t i = 0; i != time_slices.size(); i++) { - const auto& mcells_set = this->time_blob_map().at(time_slices.at(i)); - // create graph for points in mcell inside the same time slice - if (mcells_set.size() >= 2) { - for (auto it2 = mcells_set.begin(); it2 != mcells_set.end(); it2++) { - auto mcell1 = *it2; - auto it2p = it2; - if (it2p != mcells_set.end()) { - it2p++; - for (auto it3 = it2p; it3 != mcells_set.end(); it3++) { - auto mcell2 = *(it3); - if (mcell1->overlap_fast(*mcell2, 2)) - connected_mcells.push_back(std::make_pair(mcell1, mcell2)); - } - } - } - } - // create graph for points between connected mcells in adjacent time slices + 1, if not, + 2 - std::vector vec_mcells_set; - if (i + 1 < time_slices.size()) { - if (time_slices.at(i + 1) - time_slices.at(i) == 1*nticks_per_slice) { - vec_mcells_set.push_back(this->time_blob_map().at(time_slices.at(i + 1))); - if (i + 2 < time_slices.size()) - if (time_slices.at(i + 2) - time_slices.at(i) == 2*nticks_per_slice) - vec_mcells_set.push_back(this->time_blob_map().at(time_slices.at(i + 2))); +// std::unordered_map +std::vector Cluster::examine_x_boundary(const double low_limit, const double high_limit) +// designed to run for single face ... limits are for per face only ... +{ + double num_points[3] = {0, 0, 0}; + double x_max = -1e9; + double x_min = 1e9; + auto& mcells = this->children(); + const auto& pcname = this->get_default_scope().pcname; + const auto& coords = this->get_default_scope().coords; + + for (Blob* mcell : mcells) { + /// TODO: no caching, could be slow + std::vector pts = mcell->points(pcname, coords); + for (size_t i = 0; i != pts.size(); i++) { + if (pts.at(i).x() < low_limit) { + num_points[0]++; + if (pts.at(i).x() > x_max) x_max = pts.at(i).x(); } - else if (time_slices.at(i + 1) - time_slices.at(i) == 2*nticks_per_slice) { - vec_mcells_set.push_back(this->time_blob_map().at(time_slices.at(i + 1))); + else if (pts.at(i).x() > high_limit) { + num_points[2]++; + if (pts.at(i).x() < x_min) x_min = pts.at(i).x(); } - } - // bool flag = false; - for (size_t j = 0; j != vec_mcells_set.size(); j++) { - // if (flag) break; - auto& next_mcells_set = vec_mcells_set.at(j); - for (auto it1 = mcells_set.begin(); it1 != mcells_set.end(); it1++) { - auto mcell1 = (*it1); - for (auto it2 = next_mcells_set.begin(); it2 != next_mcells_set.end(); it2++) { - auto mcell2 = (*it2); - if (mcell1->overlap_fast(*mcell2, 2)) { - // flag = true; // correct??? - connected_mcells.push_back(std::make_pair(mcell1, mcell2)); - } - } + else { + num_points[1]++; } } - // std::cout << "yuhw: itime_slices " << i - // << " time_slices.at(i) " << time_slices.at(i) - // << " vec_mcells_set " << vec_mcells_set.size() - // << " connected_mcells " << connected_mcells.size() << std::endl; } - // std::cout << "connected_mcells size: " << connected_mcells.size() << std::endl; - // establish edge ... - const int max_num_nodes = 5; - std::map, std::set>> closest_index; - - for (auto it = connected_mcells.begin(); it != connected_mcells.end(); it++) { - auto mcell1 = (*it).first; - auto mcell2 = (*it).second; - - std::vector pinds1 = this->get_blob_indices(mcell1); - std::vector pinds2 = this->get_blob_indices(mcell2); + // std::cout + // << "npoints() " << npoints() + // << " xmax " << x_max << " xmin " << x_min + // << " low_limit " << low_limit << " high_limit " << high_limit + // << " num_points: " << num_points[0] << " " << num_points[1] << " " << num_points[2] << std::endl; - // test 2 against 1 ... - int max_wire_interval = mcell1->get_max_wire_interval(); - int min_wire_interval = mcell1->get_min_wire_interval(); - std::map>* map_max_index_wcps; - std::map>* map_min_index_wcps; + std::vector clusters; + std::vector b2groupid(mcells.size(), 0); + std::set groupids; - if (mcell1->get_max_wire_type() == 0) { - map_max_index_wcps = &map_mcell_uindex_wcps.at(mcell2); + // if (true) { + if (num_points[0] + num_points[2] < num_points[1] * 0.075) { + // PR3DCluster* cluster_1 = 0; + // PR3DCluster* cluster_2 = 0; + // PR3DCluster* cluster_3 = 0; + /// FIXME: does tolerance need to be configurable? + if (x_max < low_limit - 1.0 * units::cm && x_max > -1e8) { + // fill the small one ... + // cluster_1 = new PR3DCluster(1); + groupids.insert(1); } - else if (mcell1->get_max_wire_type() == 1) { - map_max_index_wcps = &map_mcell_vindex_wcps.at(mcell2); + if (x_min > high_limit + 1.0 * units::cm && x_min < 1e8) { + // fill the large one ... + // cluster_3 = new PR3DCluster(3); + groupids.insert(3); } - else { - map_max_index_wcps = &map_mcell_windex_wcps.at(mcell2); - } - if (mcell1->get_min_wire_type() == 0) { - map_min_index_wcps = &map_mcell_uindex_wcps.at(mcell2); - } - else if (mcell1->get_min_wire_type() == 1) { - map_min_index_wcps = &map_mcell_vindex_wcps.at(mcell2); - } - else { - map_min_index_wcps = &map_mcell_windex_wcps.at(mcell2); - } - - for (const int pind1 : pinds1) { - int index_max_wire; - int index_min_wire; - if (mcell1->get_max_wire_type() == 0) { - index_max_wire = winds[0][pind1]; - } - else if (mcell1->get_max_wire_type() == 1) { - index_max_wire = winds[1][pind1]; - } - else { - index_max_wire = winds[2][pind1]; - } - if (mcell1->get_min_wire_type() == 0) { - index_min_wire = winds[0][pind1]; - } - else if (mcell1->get_min_wire_type() == 1) { - index_min_wire = winds[1][pind1]; - } - else { - index_min_wire = winds[2][pind1]; - } - std::vector*> max_wcps_set; - std::vector*> min_wcps_set; - // go through the first map and find the ones satisfying the condition - for (auto it2 = map_max_index_wcps->begin(); it2 != map_max_index_wcps->end(); it2++) { - if (fabs(it2->first - index_max_wire) <= max_wire_interval) { - max_wcps_set.push_back(&(it2->second)); - } - } - // go through the second map and find the ones satisfying the condition - for (auto it2 = map_min_index_wcps->begin(); it2 != map_min_index_wcps->end(); it2++) { - if (fabs(it2->first - index_min_wire) <= min_wire_interval) { - min_wcps_set.push_back(&(it2->second)); - } - } - - std::set wcps_set1; - std::set wcps_set2; - - for (auto it2 = max_wcps_set.begin(); it2 != max_wcps_set.end(); it2++) { - wcps_set1.insert((*it2)->begin(), (*it2)->end()); - } - for (auto it3 = min_wcps_set.begin(); it3 != min_wcps_set.end(); it3++) { - wcps_set2.insert((*it3)->begin(), (*it3)->end()); - } - - { - std::set common_set; - set_intersection(wcps_set1.begin(), wcps_set1.end(), wcps_set2.begin(), wcps_set2.end(), - std::inserter(common_set, common_set.begin())); - - for (auto it4 = common_set.begin(); it4 != common_set.end(); it4++) { - const int pind2 = *it4; - if (pind1 != pind2) { - double dis = sqrt(pow(points[0][pind1] - points[0][pind2], 2) + - pow(points[1][pind1] - points[1][pind2], 2) + - pow(points[2][pind1] - points[2][pind2], 2)); - auto b2 = blob_with_point(pind2); - auto key = std::make_pair(pind1, b2->slice_index_min()); - - if (closest_index.find(key) == closest_index.end() ) { - std::set > temp_sets; - temp_sets.insert(std::make_pair(dis, pind2)); - closest_index[key] = temp_sets; - } - else { - closest_index[key].insert(std::make_pair(dis,pind2)); - if (closest_index[key].size()>max_num_nodes){ - auto it5 = closest_index[key].begin(); - for (int qx = 0; qx!=max_num_nodes;qx++){ - it5++; - } - closest_index[key].erase(it5,closest_index[key].end()); - } - // if (dis < closest_index[key].second || (std::abs(dis - closest_index[key].second) < 1e-10 && pind2 < closest_index[key].first)) - // closest_index[key] = std::make_pair(pind2, dis); - } - } - } - } - } - - // test 1 against 2 ... - max_wire_interval = mcell2->get_max_wire_interval(); - min_wire_interval = mcell2->get_min_wire_interval(); - if (mcell2->get_max_wire_type() == 0) { - map_max_index_wcps = &map_mcell_uindex_wcps[mcell1]; - } - else if (mcell2->get_max_wire_type() == 1) { - map_max_index_wcps = &map_mcell_vindex_wcps[mcell1]; - } - else { - map_max_index_wcps = &map_mcell_windex_wcps[mcell1]; - } - if (mcell2->get_min_wire_type() == 0) { - map_min_index_wcps = &map_mcell_uindex_wcps[mcell1]; - } - else if (mcell2->get_min_wire_type() == 1) { - map_min_index_wcps = &map_mcell_vindex_wcps[mcell1]; - } - else { - map_min_index_wcps = &map_mcell_windex_wcps[mcell1]; - } - for (const int pind1 : pinds2) { - int index_max_wire; - int index_min_wire; - if (mcell2->get_max_wire_type() == 0) { - index_max_wire = winds[0][pind1]; - } - else if (mcell2->get_max_wire_type() == 1) { - index_max_wire = winds[1][pind1]; - } - else { - index_max_wire = winds[2][pind1]; - } - if (mcell2->get_min_wire_type() == 0) { - index_min_wire = winds[0][pind1]; - } - else if (mcell2->get_min_wire_type() == 1) { - index_min_wire = winds[1][pind1]; - } - else { - index_min_wire = winds[2][pind1]; - } - std::vector*> max_wcps_set; - std::vector*> min_wcps_set; - // go through the first map and find the ones satisfying the condition - for (auto it2 = map_max_index_wcps->begin(); it2 != map_max_index_wcps->end(); it2++) { - if (fabs(it2->first - index_max_wire) <= max_wire_interval) { - max_wcps_set.push_back(&(it2->second)); - } - } - // go through the second map and find the ones satisfying the condition - for (auto it2 = map_min_index_wcps->begin(); it2 != map_min_index_wcps->end(); it2++) { - if (fabs(it2->first - index_min_wire) <= min_wire_interval) { - min_wcps_set.push_back(&(it2->second)); - } - } - - std::set wcps_set1; - std::set wcps_set2; - - for (auto it2 = max_wcps_set.begin(); it2 != max_wcps_set.end(); it2++) { - wcps_set1.insert((*it2)->begin(), (*it2)->end()); - } - for (auto it3 = min_wcps_set.begin(); it3 != min_wcps_set.end(); it3++) { - wcps_set2.insert((*it3)->begin(), (*it3)->end()); - } - - { - std::set common_set; - set_intersection(wcps_set1.begin(), wcps_set1.end(), wcps_set2.begin(), wcps_set2.end(), - std::inserter(common_set, common_set.begin())); - - for (auto it4 = common_set.begin(); it4 != common_set.end(); it4++) { - const int pind2 = *it4; - if (pind1 != pind2) { - double dis = sqrt(pow(points[0][pind1] - points[0][pind2], 2) + - pow(points[1][pind1] - points[1][pind2], 2) + - pow(points[2][pind1] - points[2][pind2], 2)); - auto b2 = blob_with_point(pind2); - auto key = std::make_pair(pind1, b2->slice_index_min()); - - if (closest_index.find(key) == closest_index.end()) { - std::set > temp_sets; - temp_sets.insert(std::make_pair(dis,pind2)); - closest_index[key] = temp_sets; - } - else { - closest_index[key].insert(std::make_pair(dis,pind2)); - if (closest_index[key].size()>max_num_nodes){ - auto it5 = closest_index[key].begin(); - for (int qx = 0; qx!=max_num_nodes;qx++){ - it5++; - } - closest_index[key].erase(it5,closest_index[key].end()); - } - //if (dis < closest_index[key].second || (std::abs(dis - closest_index[key].second) < 1e-10 && pind2 < closest_index[key].first)) closest_index[key] = std::make_pair(pind2, dis); - } - } - } - } - } - } - - for (auto it4 = closest_index.begin(); it4 != closest_index.end(); it4++) { - int index1 = it4->first.first; - // int index2 = it4->second.first; - // double dis = it4->second.second; - // auto edge = add_edge(index1, index2, WireCell::PointCloud::Facade::EdgeProp(dis), *m_graph); - // if (edge.second) { - // num_edges++; - // } - for (auto it5 = it4->second.begin(); it5!=it4->second.end(); it5++){ - int index2 = (*it5).second; - double dis = (*it5).first; - auto edge = add_edge(index1,index2,WireCell::PointCloud::Facade::EdgeProp(dis),*m_graph); - if (edge.second){ - // (*graph)[edge.first].dist = dis; - num_edges ++; - } - // protect against dead cells ... - //std::cout << dis/units::cm << std::endl; - if (it5 == it4->second.begin() && dis > 0.25*units::cm) - break; - } - - // auto edge = add_edge(index1, index2, *m_graph); - // if (edge.second) { - // (*m_graph)[edge.first].dist = dis; - // num_edges++; - // } - - } - // end of copying ... - - LogDebug("all edges: " << num_edges); - // std::cout << "Test: all edges: " << num_edges << std::endl; - -} - -void Cluster::Connect_graph(const bool use_ctpc) const { - const auto& tp = grouping()->get_params(); - // now form the connected components - std::vector component(num_vertices(*m_graph)); - const size_t num = connected_components(*m_graph, &component[0]); - - // Create ordered components - std::vector ordered_components; - ordered_components.reserve(component.size()); - for (size_t i = 0; i < component.size(); ++i) { - ordered_components.emplace_back(i); - } - - // Assign vertices to components - for (size_t i = 0; i < component.size(); ++i) { - ordered_components[component[i]].add_vertex(i); - } - - // Sort components by minimum vertex index - std::sort(ordered_components.begin(), ordered_components.end(), - [](const ComponentInfo& a, const ComponentInfo& b) { - return a.min_vertex < b.min_vertex; - }); - - LogDebug(" npoints " << npoints() << " nconnected " << num); - if (num <= 1) return; - - std::vector> pt_clouds; - std::vector> pt_clouds_global_indices; - // use this to link the global index to the local index - // std::vector> pt_clouds_global_indices(num); - // for (size_t i = 0; i != num; i++) { - // pt_clouds.push_back(std::make_shared()); - // } - // for (size_t i = 0; i != component.size(); ++i) { - // pt_clouds.at(component[i])->add({points()[0][i], points()[1][i], points()[2][i]}); - // pt_clouds_global_indices.at(component[i]).push_back(i); - // } - for (const auto& comp : ordered_components) { - auto pt_cloud = std::make_shared(); - std::vector global_indices; - for (size_t vertex_idx : comp.vertex_indices) { - pt_cloud->add({points()[0][vertex_idx], points()[1][vertex_idx], points()[2][vertex_idx]}); - global_indices.push_back(vertex_idx); - } - pt_clouds.push_back(pt_cloud); - pt_clouds_global_indices.push_back(global_indices); - } - - /// DEBUGONLY: - if (0) { - for (size_t i = 0; i != num; i++) { - std::cout << *pt_clouds.at(i) << std::endl; - std::cout << "global indices: "; - for (size_t j = 0; j != pt_clouds_global_indices.at(i).size(); j++) { - std::cout << pt_clouds_global_indices.at(i).at(j) << " "; - } - std::cout << std::endl; - } - } - - // Initiate dist. metrics - std::vector>> index_index_dis( - num, std::vector>(num)); - std::vector>> index_index_dis_mst( - num, std::vector>(num)); - - std::vector>> index_index_dis_dir1( - num, std::vector>(num)); - std::vector>> index_index_dis_dir2( - num, std::vector>(num)); - std::vector>> index_index_dis_dir_mst( - num, std::vector>(num)); - - for (size_t j = 0; j != num; j++) { - for (size_t k = 0; k != num; k++) { - index_index_dis[j][k] = std::make_tuple(-1, -1, 1e9); - index_index_dis_mst[j][k] = std::make_tuple(-1, -1, 1e9); - - index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); - index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); - index_index_dis_dir_mst[j][k] = std::make_tuple(-1, -1, 1e9); - } - } - - // Calc. dis, dis_dir1, dis_dir2 - // check against the closest distance ... - // no need to have MST ... - for (size_t j = 0; j != num; j++) { - for (size_t k = j + 1; k != num; k++) { - index_index_dis[j][k] = pt_clouds.at(j)->get_closest_points(*pt_clouds.at(k)); - - - - // std::cout << j << " " << k << " " << std::get<0>(index_index_dis[j][k]) << " " - // << std::get<1>(index_index_dis[j][k]) << " " << std::get<2>(index_index_dis[j][k]) << " counter: " << global_counter_get_closest_wcpoint << std::endl; - - if ((num < 100 && pt_clouds.at(j)->get_num_points() > 100 && pt_clouds.at(k)->get_num_points() > 100 && - (pt_clouds.at(j)->get_num_points() + pt_clouds.at(k)->get_num_points()) > 400) || - (pt_clouds.at(j)->get_num_points() > 500 && pt_clouds.at(k)->get_num_points() > 500)) { - geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); - geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); - - geo_point_t dir1 = vhough_transform(p1, 30 * units::cm, HoughParamSpace::theta_phi, pt_clouds.at(j), pt_clouds_global_indices.at(j)); - geo_point_t dir2 = vhough_transform(p2, 30 * units::cm, HoughParamSpace::theta_phi, pt_clouds.at(k), pt_clouds_global_indices.at(k)); - dir1 = dir1 * -1; - dir2 = dir2 * -1; - - std::pair result1 = pt_clouds.at(k)->get_closest_point_along_vec( - p1, dir1, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); - - if (result1.first >= 0) { - index_index_dis_dir1[j][k] = - std::make_tuple(std::get<0>(index_index_dis[j][k]), result1.first, result1.second); - } - - std::pair result2 = pt_clouds.at(j)->get_closest_point_along_vec( - p2, dir2, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); - - if (result2.first >= 0) { - index_index_dis_dir2[j][k] = - std::make_tuple(result2.first, std::get<1>(index_index_dis[j][k]), result2.second); - } - } - - // Now check the path ... - { - geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); - geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); - - double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); - double step_dis = 1.0 * units::cm; - int num_steps = dis / step_dis + 1; - int num_bad = 0; - geo_point_t test_p; - for (int ii = 0; ii != num_steps; ii++) { - test_p.set(p1.x() + (p2.x() - p1.x()) / num_steps * (ii + 1), - p1.y() + (p2.y() - p1.y()) / num_steps * (ii + 1), - p1.z() + (p2.z() - p1.z()) / num_steps * (ii + 1)); - // if (!ct_point_cloud.is_good_point(test_p)) num_bad++; - if (use_ctpc) { - /// FIXME: assumes clusters are bounded to 1 face! Need to fix this. - const bool good_point = grouping()->is_good_point(test_p, tp.face); - if (!good_point) num_bad++; - } - } - - if (num_bad > 7 || (num_bad > 2 && num_bad >= 0.75 * num_steps)) { - index_index_dis[j][k] = std::make_tuple(-1, -1, 1e9); - } - } - - // Now check the path ... - if (std::get<0>(index_index_dis_dir1[j][k]) >= 0) { - geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_dir1[j][k])); - geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_dir1[j][k])); - - double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); - double step_dis = 1.0 * units::cm; - int num_steps = dis / step_dis + 1; - int num_bad = 0; - geo_point_t test_p; - for (int ii = 0; ii != num_steps; ii++) { - test_p.set(p1.x() + (p2.x() - p1.x()) / num_steps * (ii + 1), - p1.y() + (p2.y() - p1.y()) / num_steps * (ii + 1), - p1.z() + (p2.z() - p1.z()) / num_steps * (ii + 1)); - // if (!ct_point_cloud.is_good_point(test_p)) num_bad++; - if (use_ctpc) { - /// FIXME: assumes clusters are bounded to 1 face! Need to fix this. - const bool good_point = grouping()->is_good_point(test_p, tp.face); - if (!good_point) num_bad++; - } - } - - if (num_bad > 7 || (num_bad > 2 && num_bad >= 0.75 * num_steps)) { - index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); - } - } - - // Now check the path ... - if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { - geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_dir2[j][k])); - geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_dir2[j][k])); - - double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); - double step_dis = 1.0 * units::cm; - int num_steps = dis / step_dis + 1; - int num_bad = 0; - geo_point_t test_p; - for (int ii = 0; ii != num_steps; ii++) { - test_p.set(p1.x() + (p2.x() - p1.x()) / num_steps * (ii + 1), - p1.y() + (p2.y() - p1.y()) / num_steps * (ii + 1), - p1.z() + (p2.z() - p1.z()) / num_steps * (ii + 1)); - // if (!ct_point_cloud.is_good_point(test_p)) num_bad++; - if (use_ctpc) { - /// FIXME: assumes clusters are bounded to 1 face! Need to fix this. - const bool good_point = grouping()->is_good_point(test_p, tp.face); - if (!good_point) num_bad++; - } - } - - if (num_bad > 7 || (num_bad > 2 && num_bad >= 0.75 * num_steps)) { - index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); - } - } - } - } - - // deal with MST of first type - { - boost::adjacency_list> - temp_graph(num); - - for (size_t j = 0; j != num; j++) { - for (size_t k = j + 1; k != num; k++) { - int index1 = j; - int index2 = k; - if (std::get<0>(index_index_dis[j][k]) >= 0) { - add_edge(index1, index2, std::get<2>(index_index_dis[j][k]), temp_graph); - // LogDebug(index1 << " " << index2 << " " << std::get<2>(index_index_dis[j][k])); - } - } - } - - // Process MST - process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_mst); - } - - // MST of the direction ... - { - boost::adjacency_list> - temp_graph(num); - - for (size_t j = 0; j != num; j++) { - for (size_t k = j + 1; k != num; k++) { - int index1 = j; - int index2 = k; - if (std::get<0>(index_index_dis_dir1[j][k]) >= 0 || std::get<0>(index_index_dis_dir2[j][k]) >= 0) { - add_edge( - index1, index2, - std::min(std::get<2>(index_index_dis_dir1[j][k]), std::get<2>(index_index_dis_dir2[j][k])), - temp_graph); - // LogDebug(index1 << " " << index2 << " " - // << std::min(std::get<2>(index_index_dis_dir1[j][k]), - // std::get<2>(index_index_dis_dir2[j][k]))); - } - } - } - - process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_dir_mst); - - } - - for (size_t j = 0; j != num; j++) { - for (size_t k = j + 1; k != num; k++) { - if (std::get<2>(index_index_dis[j][k]) < 3 * units::cm) { - index_index_dis_mst[j][k] = index_index_dis[j][k]; - } - - // establish the path ... - if (std::get<0>(index_index_dis_mst[j][k]) >= 0) { - const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_mst[j][k])); - const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_mst[j][k])); - // auto edge = - // add_edge(gind1, gind2, *m_graph); - // LogDebug(gind1 << " " << gind2 << " " << std::get<2>(index_index_dis_mst[j][k])); - float dis; - // if (edge.second) { - if (std::get<2>(index_index_dis_mst[j][k]) > 5 * units::cm) { - dis = std::get<2>(index_index_dis_mst[j][k]); - } - else { - dis = std::get<2>(index_index_dis_mst[j][k]); - } - // } - /*auto edge =*/ add_edge(gind1, gind2, WireCell::PointCloud::Facade::EdgeProp(dis), *m_graph); - } - - if (std::get<0>(index_index_dis_dir_mst[j][k]) >= 0) { - if (std::get<0>(index_index_dis_dir1[j][k]) >= 0) { - const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir1[j][k])); - const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir1[j][k])); - //auto edge = add_edge(gind1, gind2, *m_graph); - // LogDebug(gind1 << " " << gind2 << " " << std::get<2>(index_index_dis_dir1[j][k])); - float dis; - // if (edge.second) { - if (std::get<2>(index_index_dis_dir1[j][k]) > 5 * units::cm) { - dis = std::get<2>(index_index_dis_dir1[j][k]) * 1.1; + // std::cout << "groupids size: " << groupids.size() << std::endl; + if (!groupids.empty()) { + // cluster_2 = new PR3DCluster(2); + groupids.insert(2); + for (size_t idx=0; idx < mcells.size(); idx++) { + Blob *mcell = mcells.at(idx); + if (mcell->points(pcname, coords)[0].x() < low_limit) { + if (groupids.find(1) != groupids.end()) { + // cluster_1->AddCell(mcell, mcell->GetTimeSlice()); + b2groupid[idx] = 1; } else { - dis = std::get<2>(index_index_dis_dir1[j][k]); + // cluster_2->AddCell(mcell, mcell->GetTimeSlice()); + b2groupid[idx] = 2; } - // } - /*auto edge =*/ add_edge(gind1, gind2, WireCell::PointCloud::Facade::EdgeProp(dis), *m_graph); } - if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { - const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir2[j][k])); - const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir2[j][k])); - // auto edge = add_edge(gind1, gind2, *m_graph); - // LogDebug(gind1 << " " << gind2 << " " << std::get<2>(index_index_dis_dir2[j][k])); - // if (edge.second) { - float dis; - if (std::get<2>(index_index_dis_dir2[j][k]) > 5 * units::cm) { - dis = std::get<2>(index_index_dis_dir2[j][k]) * 1.1; + else if (mcell->points(pcname, coords)[0].x() > high_limit) { + if (groupids.find(3) != groupids.end()) { + // cluster_3->AddCell(mcell, mcell->GetTimeSlice()); + b2groupid[idx] = 3; } else { - dis = std::get<2>(index_index_dis_dir2[j][k]); + // cluster_2->AddCell(mcell, mcell->GetTimeSlice()); + b2groupid[idx] = 2; } - // } - /*auto edge =*/ add_edge(gind1, gind2, WireCell::PointCloud::Facade::EdgeProp(dis), *m_graph); - } - } - - } // k - } // j -} -// #define LogDebug(x) - -void Cluster::Connect_graph() const{ - // now form the connected components - std::vector component(num_vertices(*m_graph)); - const size_t num = connected_components(*m_graph, &component[0]); - - // Create ordered components - std::vector ordered_components; - ordered_components.reserve(component.size()); - for (size_t i = 0; i < component.size(); ++i) { - ordered_components.emplace_back(i); - } - - // Assign vertices to components - for (size_t i = 0; i < component.size(); ++i) { - ordered_components[component[i]].add_vertex(i); - } - - // Sort components by minimum vertex index - std::sort(ordered_components.begin(), ordered_components.end(), - [](const ComponentInfo& a, const ComponentInfo& b) { - return a.min_vertex < b.min_vertex; - }); - - LogDebug(" npoints " << npoints() << " nconnected " << num); - if (num <= 1) return; - - std::vector> pt_clouds; - std::vector> pt_clouds_global_indices; - // use this to link the global index to the local index - // std::vector> pt_clouds_global_indices(num); - // for (size_t i = 0; i != num; i++) { - // pt_clouds.push_back(std::make_shared()); - // } - // for (size_t i = 0; i != component.size(); ++i) { - // pt_clouds.at(component[i])->add({points()[0][i], points()[1][i], points()[2][i]}); - // pt_clouds_global_indices.at(component[i]).push_back(i); - // } - // Create point clouds using ordered components - for (const auto& comp : ordered_components) { - auto pt_cloud = std::make_shared(); - std::vector global_indices; - - for (size_t vertex_idx : comp.vertex_indices) { - pt_cloud->add({points()[0][vertex_idx], points()[1][vertex_idx], points()[2][vertex_idx]}); - global_indices.push_back(vertex_idx); - } - - pt_clouds.push_back(pt_cloud); - pt_clouds_global_indices.push_back(global_indices); - } - - /// DEBUGONLY: - if (0) { - for (size_t i = 0; i != num; i++) { - std::cout << *pt_clouds.at(i) << std::endl; - std::cout << "global indices: "; - for (size_t j = 0; j != pt_clouds_global_indices.at(i).size(); j++) { - std::cout << pt_clouds_global_indices.at(i).at(j) << " "; - } - std::cout << std::endl; - } - } - - // Initiate dist. metrics - std::vector>> index_index_dis( - num, std::vector>(num)); - std::vector>> index_index_dis_mst( - num, std::vector>(num)); - - std::vector>> index_index_dis_dir1( - num, std::vector>(num)); - std::vector>> index_index_dis_dir2( - num, std::vector>(num)); - std::vector>> index_index_dis_dir_mst( - num, std::vector>(num)); - - for (size_t j = 0; j != num; j++) { - for (size_t k = 0; k != num; k++) { - index_index_dis[j][k] = std::make_tuple(-1, -1, 1e9); - index_index_dis_mst[j][k] = std::make_tuple(-1, -1, 1e9); - - index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); - index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); - index_index_dis_dir_mst[j][k] = std::make_tuple(-1, -1, 1e9); - } - } - - boost::adjacency_list> - temp_graph(num); - - for (size_t j=0;j!=num;j++){ - for (size_t k=j+1;k!=num;k++){ - index_index_dis[j][k] = pt_clouds.at(j)->get_closest_points(*pt_clouds.at(k)); - - // geo_point_t test_p3 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); - // geo_point_t test_p4 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); - // if (nchildren()==3449) std::cout << "A0: " << test_p3 << " " << test_p4 << " " << j << " " << k << " " << pt_clouds.at(j)->get_num_points() << " " << pt_clouds.at(k)->get_num_points() << " " << std::get<2>(index_index_dis[j][k])/units::cm << std::endl; - - - int index1 = j; - int index2 = k; - /*auto edge =*/ add_edge(index1,index2, std::get<2>(index_index_dis[j][k]), temp_graph); - } - } - - process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_mst); - - // { - // std::vector possible_root_vertex; - // std::vector component(num_vertices(temp_graph)); - // const int num1 = connected_components(temp_graph, &component[0]); - // possible_root_vertex.resize(num1); - // std::vector::size_type i; - // for (i = 0; i != component.size(); ++i) { - // possible_root_vertex.at(component[i]) = i; - // } - - // for (size_t i = 0; i != possible_root_vertex.size(); i++) { - // std::vector::vertex_descriptor> predecessors(num_vertices(temp_graph)); - - // prim_minimum_spanning_tree(temp_graph, &predecessors[0], - // boost::root_vertex(possible_root_vertex.at(i))); - - // for (size_t j = 0; j != predecessors.size(); ++j) { - // if (predecessors[j] != j) { - // if (j < predecessors[j]) { - // index_index_dis_mst[j][predecessors[j]] = index_index_dis[j][predecessors[j]]; - // } - // else { - // index_index_dis_mst[predecessors[j]][j] = index_index_dis[predecessors[j]][j]; - // } - // // std::cout << j << " " << predecessors[j] << " " << std::endl; - // } - // else { - // // std::cout << j << " " << std::endl; - // } - // } - // } - // } - - for (size_t j = 0; j != num; j++) { - for (size_t k = j + 1; k != num; k++) { - if (std::get<2>(index_index_dis[j][k])<3*units::cm){ - index_index_dis_mst[j][k] = index_index_dis[j][k]; - } - - if (num < 100) - if (pt_clouds.at(j)->get_num_points()>100 && pt_clouds.at(k)->get_num_points()>100 && - (pt_clouds.at(j)->get_num_points()+pt_clouds.at(k)->get_num_points()) > 400){ - geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); - geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); - - geo_point_t dir1 = vhough_transform(p1, 30 * units::cm, HoughParamSpace::theta_phi, pt_clouds.at(j), pt_clouds_global_indices.at(j)); - geo_point_t dir2 = vhough_transform(p2, 30 * units::cm, HoughParamSpace::theta_phi, pt_clouds.at(k), pt_clouds_global_indices.at(k)); - dir1 = dir1 * -1; - dir2 = dir2 * -1; - - std::pair result1 = pt_clouds.at(k)->get_closest_point_along_vec( - p1, dir1, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); - - if (result1.first >= 0) { - index_index_dis_dir1[j][k] = - std::make_tuple(std::get<0>(index_index_dis[j][k]), result1.first, result1.second); - } - - std::pair result2 = pt_clouds.at(j)->get_closest_point_along_vec( - p2, dir2, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); - - if (result2.first >= 0) { - index_index_dis_dir2[j][k] = - std::make_tuple(result2.first, std::get<1>(index_index_dis[j][k]), result2.second); - } - } - } - } - - // MST for the directionality ... - { - boost::adjacency_list> - temp_graph(num); - - for (size_t j = 0; j != num; j++) { - for (size_t k = j + 1; k != num; k++) { - int index1 = j; - int index2 = k; - if (std::get<0>(index_index_dis_dir1[j][k]) >= 0 || std::get<0>(index_index_dis_dir2[j][k]) >= 0) { - add_edge( - index1, index2, - std::min(std::get<2>(index_index_dis_dir1[j][k]), std::get<2>(index_index_dis_dir2[j][k])), - temp_graph); - // LogDebug(index1 << " " << index2 << " " - // << std::min(std::get<2>(index_index_dis_dir1[j][k]), - // std::get<2>(index_index_dis_dir2[j][k]))); - } - } - } - - process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_dir_mst); - // { - // std::vector possible_root_vertex; - // std::vector component(num_vertices(temp_graph)); - // const int num1 = connected_components(temp_graph, &component[0]); - // possible_root_vertex.resize(num1); - // std::vector::size_type i; - // for (i = 0; i != component.size(); ++i) { - // possible_root_vertex.at(component[i]) = i; - // } - - // for (size_t i = 0; i != possible_root_vertex.size(); i++) { - // std::vector::vertex_descriptor> predecessors(num_vertices(temp_graph)); - // prim_minimum_spanning_tree(temp_graph, &predecessors[0], - // boost::root_vertex(possible_root_vertex.at(i))); - // for (size_t j = 0; j != predecessors.size(); ++j) { - // if (predecessors[j] != j) { - // if (j < predecessors[j]) { - // index_index_dis_dir_mst[j][predecessors[j]] = index_index_dis[j][predecessors[j]]; - // } - // else { - // index_index_dis_dir_mst[predecessors[j]][j] = index_index_dis[predecessors[j]][j]; - // } - // // std::cout << j << " " << predecessors[j] << " " << std::endl; - // } - // else { - // // std::cout << j << " " << std::endl; - // } - // } - // } - // } - } - - // now complete graph according to the direction - // according to direction ... - for (size_t j = 0; j != num; j++) { - for (size_t k = j + 1; k != num; k++) { - if (std::get<0>(index_index_dis_mst[j][k]) >= 0) { - const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_mst[j][k])); - const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_mst[j][k])); - - // geo_point_t test_p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_mst[j][k])); - // geo_point_t test_p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_mst[j][k])); - // if (nchildren()==3449) std::cout << "A1: " << test_p1 << " " << test_p2 << " " << j << " " << k << " " << pt_clouds.at(j)->get_num_points() << " " << pt_clouds.at(k)->get_num_points() << " " << std::get<2>(index_index_dis_mst[j][k])/units::cm << std::endl; - - - // auto edge = - // add_edge(gind1, gind2, *m_graph); - // // LogDebug(gind1 << " " << gind2 << " " << std::get<2>(index_index_dis_mst[j][k])); - // if (edge.second) { - float dis; - if (std::get<2>(index_index_dis_mst[j][k]) > 5 * units::cm) { - dis = std::get<2>(index_index_dis_mst[j][k]); } else { - dis = std::get<2>(index_index_dis_mst[j][k]); - } - // } - /*auto edge =*/ add_edge(gind1, gind2, WireCell::PointCloud::Facade::EdgeProp(dis), *m_graph); - } - - if (std::get<0>(index_index_dis_dir_mst[j][k]) >= 0) { - if (std::get<0>(index_index_dis_dir1[j][k]) >= 0) { - const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir1[j][k])); - const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir1[j][k])); - - // geo_point_t test_p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_dir1[j][k])); - // geo_point_t test_p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_dir1[j][k])); - // if (nchildren()==3449) std::cout << "A2: " << test_p1 << " " << test_p2 << " " << j << " " << k << " " << pt_clouds.at(j)->get_num_points() << " " << pt_clouds.at(k)->get_num_points()<< " " << std::get<2>(index_index_dis_dir1[j][k])/units::cm << std::endl; - - // auto edge = add_edge(gind1, gind2, *m_graph); - // // LogDebug(gind1 << " " << gind2 << " " << std::get<2>(index_index_dis_dir1[j][k])); - // if (edge.second) { - float dis; - if (std::get<2>(index_index_dis_dir1[j][k]) > 5 * units::cm) { - dis = std::get<2>(index_index_dis_dir1[j][k]) * 1.2; - } - else { - dis = std::get<2>(index_index_dis_dir1[j][k]); - } - // } - /*auto edge =*/ add_edge(gind1, gind2, WireCell::PointCloud::Facade::EdgeProp(dis), *m_graph); - } - if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { - const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir2[j][k])); - const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir2[j][k])); - // auto edge = add_edge(gind1, gind2, *m_graph); - // // LogDebug(gind1 << " " << gind2 << " " << std::get<2>(index_index_dis_dir2[j][k])); - // if (edge.second) { - - // geo_point_t test_p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_dir2[j][k])); - // geo_point_t test_p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_dir2[j][k])); - // if (nchildren()==3449) std::cout << "A3: " << test_p1 << " " << test_p2 << " " << j << " " << k << " " << pt_clouds.at(j)->get_num_points() << " " << pt_clouds.at(k)->get_num_points()<< " " << std::get<2>(index_index_dis_dir2[j][k])/units::cm << std::endl; - - float dis; - if (std::get<2>(index_index_dis_dir2[j][k]) > 5 * units::cm) { - dis = std::get<2>(index_index_dis_dir2[j][k]) * 1.2; - } - else { - dis = std::get<2>(index_index_dis_dir2[j][k]); - } - // } - /*auto edge =*/ add_edge(gind1, gind2, WireCell::PointCloud::Facade::EdgeProp(dis), *m_graph); + // cluster_2->AddCell(mcell, mcell->GetTimeSlice()); + b2groupid[idx] = 2; } } - + // if (cluster_1 != 0) clusters.push_back(cluster_1); + // clusters.push_back(cluster_2); + // if (cluster_3 != 0) clusters.push_back(cluster_3); } } - - + return b2groupid; } +bool Cluster::judge_vertex(geo_point_t& p_test, IDetectorVolumes::pointer dv, const double asy_cut, const double occupied_cut) +{ + p_test = this->calc_ave_pos(p_test, 3 * units::cm); -void Cluster::Connect_graph_overclustering_protection(const bool use_ctpc) const { - // Constants for wire angles - const auto& tp = grouping()->get_params(); - //std::cout << "Test: face " << tp.face << std::endl; - - // const double pi = 3.141592653589793; - const geo_vector_t drift_dir(1, 0, 0); - const auto [angle_u,angle_v,angle_w] = grouping()->wire_angles(); - const geo_point_t U_dir(0,cos(angle_u),sin(angle_u)); - const geo_point_t V_dir(0,cos(angle_v),sin(angle_v)); - const geo_point_t W_dir(0,cos(angle_w),sin(angle_w)); - - // Form connected components - std::vector component(num_vertices(*m_graph)); - const size_t num = connected_components(*m_graph, &component[0]); - - LogDebug(" npoints " << npoints() << " nconnected " << num); - if (num <= 1) return; - - // Create point clouds using connected components - std::vector> pt_clouds; - std::vector> pt_clouds_global_indices; - - // Create ordered components - std::vector ordered_components; - ordered_components.reserve(component.size()); - for (size_t i = 0; i < component.size(); ++i) { - ordered_components.emplace_back(i); - } - - // Assign vertices to components - for (size_t i = 0; i < component.size(); ++i) { - ordered_components[component[i]].add_vertex(i); - } - - // Sort components by minimum vertex index - std::sort(ordered_components.begin(), ordered_components.end(), - [](const ComponentInfo& a, const ComponentInfo& b) { - return a.min_vertex < b.min_vertex; - }); - - // Create point clouds for each component - for (const auto& comp : ordered_components) { - auto pt_cloud = std::make_shared(); - std::vector global_indices; - - for (size_t vertex_idx : comp.vertex_indices) { - pt_cloud->add({points()[0][vertex_idx], points()[1][vertex_idx], points()[2][vertex_idx]}); - global_indices.push_back(vertex_idx); - } - pt_clouds.push_back(pt_cloud); - pt_clouds_global_indices.push_back(global_indices); - } - - // pt_clouds.resize(num); - // pt_clouds_global_indices.resize(num); - - // // Initialize all point clouds - // for(size_t j = 0; j < num; j++) { - // pt_clouds[j] = std::make_shared(); - // } + geo_point_t dir = this->vhough_transform(p_test, 15 * units::cm); - // // Add points directly using component mapping - // for(size_t i = 0; i < component.size(); ++i) { - // pt_clouds[component[i]]->add({points()[0][i], points()[1][i], points()[2][i]}); - // pt_clouds_global_indices[component[i]].push_back(i); - // } + // judge if this is end points + std::pair num_pts = this->ndipole(p_test, dir, 25 * units::cm); - // std::cout << "Test: "<< num << std::endl; + if ((num_pts.first + num_pts.second) == 0) return false; - // Initialize distance metrics - std::vector>> index_index_dis(num, std::vector>(num)); - std::vector>> index_index_dis_mst(num, std::vector>(num)); - std::vector>> index_index_dis_dir1(num, std::vector>(num)); - std::vector>> index_index_dis_dir2(num, std::vector>(num)); - std::vector>> index_index_dis_dir_mst(num, std::vector>(num)); + double asy = std::abs(num_pts.first - num_pts.second) / (num_pts.first + num_pts.second); - // Initialize all distances to inf - for (size_t j = 0; j != num; j++) { - for (size_t k = 0; k != num; k++) { - index_index_dis[j][k] = std::make_tuple(-1, -1, 1e9); - index_index_dis_mst[j][k] = std::make_tuple(-1, -1, 1e9); - index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); - index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); - index_index_dis_dir_mst[j][k] = std::make_tuple(-1, -1, 1e9); - } + if (asy > asy_cut) { + return true; } + else { + + // it might be better to directly use the closest point to find the wire plane id ... + auto wpid = dv->contained_by(p_test); + // what if the point is not found ... + if (wpid.apa()==-1){ + auto idx = this->get_closest_point_index(p_test); + // Given the idx, one can directly find the wpid actually ... + wpid = dv->contained_by(point3d(idx)); + } + + // Create wpids for all three planes with the same APA and face + WirePlaneId wpid_u(kUlayer, wpid.face(), wpid.apa()); + WirePlaneId wpid_v(kVlayer, wpid.face(), wpid.apa()); + WirePlaneId wpid_w(kWlayer, wpid.face(), wpid.apa()); + // Get wire directions for all planes + Vector wire_dir_u = dv->wire_direction(wpid_u); + Vector wire_dir_v = dv->wire_direction(wpid_v); + Vector wire_dir_w = dv->wire_direction(wpid_w); + // Calculate angles + double angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + double angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + double angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + auto temp_point_cloud = std::make_shared(angle_u, angle_v, angle_w); + dir = dir.norm(); + // PointVector pts; + std::vector pts; + for (size_t i = 0; i != 40; i++) { + geo_point_t pt(p_test.x() + i * 0.5 * units::cm * dir.x(), p_test.y() + i * 0.5 * units::cm * dir.y(), + p_test.z() + i * 0.5 * units::cm * dir.z()); + // WCP::WCPointCloud::WCPoint& wcp = point_cloud->get_closest_wcpoint(pt); + auto [_, wcp] = get_closest_wcpoint(pt); - // Calculate distances between components - for (size_t j = 0; j != num; j++) { - for (size_t k = j + 1; k != num; k++) { - // Get closest points between components - index_index_dis[j][k] = pt_clouds.at(j)->get_closest_points(*pt_clouds.at(k)); - - // Skip small clouds - if ((num < 100 && pt_clouds.at(j)->get_num_points() > 100 && pt_clouds.at(k)->get_num_points() > 100 && - (pt_clouds.at(j)->get_num_points() + pt_clouds.at(k)->get_num_points()) > 400) || - (pt_clouds.at(j)->get_num_points() > 500 && pt_clouds.at(k)->get_num_points() > 500)) { - - // Get closest points and calculate directions - geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); - geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); - - geo_vector_t dir1 = vhough_transform(p1, 30 * units::cm, HoughParamSpace::theta_phi, pt_clouds.at(j), - pt_clouds_global_indices.at(j)); - geo_vector_t dir2 = vhough_transform(p2, 30 * units::cm, HoughParamSpace::theta_phi, pt_clouds.at(k), - pt_clouds_global_indices.at(k)); - dir1 = dir1 * -1; - dir2 = dir2 * -1; - - std::pair result1 = pt_clouds.at(k)->get_closest_point_along_vec(p1, dir1, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); - - if (result1.first >= 0) { - index_index_dis_dir1[j][k] = std::make_tuple(std::get<0>(index_index_dis[j][k]), - result1.first, result1.second); - } - - std::pair result2 = pt_clouds.at(j)->get_closest_point_along_vec(p2, dir2, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); - - if (result2.first >= 0) { - index_index_dis_dir2[j][k] = std::make_tuple(result2.first, - std::get<1>(index_index_dis[j][k]), - result2.second); - } - } - // Now check the path - - { - geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); - geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); - - double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); - double step_dis = 1.0 * units::cm; - int num_steps = dis/step_dis + 1; - - - - // Track different types of "bad" points - int num_bad[4] = {0,0,0,0}; // more than one of three are bad - int num_bad1[4] = {0,0,0,0}; // at least one of three are bad - int num_bad2[3] = {0,0,0}; // number of dead channels - - // Check points along path - for (int ii = 0; ii != num_steps; ii++) { - geo_point_t test_p( - p1.x() + (p2.x() - p1.x())/num_steps*(ii + 1), - p1.y() + (p2.y() - p1.y())/num_steps*(ii + 1), - p1.z() + (p2.z() - p1.z())/num_steps*(ii + 1) - ); - - // Test point quality using grouping parameters - std::vector scores; - if (use_ctpc) { - scores = grouping()->test_good_point(test_p, tp.face); - - // Check overall quality - if (scores[0] + scores[3] + scores[1] + scores[4] + (scores[2]+scores[5])*2 < 3) { - num_bad[0]++; - } - if (scores[0]+scores[3]==0) num_bad[1]++; - if (scores[1]+scores[4]==0) num_bad[2]++; - if (scores[2]+scores[5]==0) num_bad[3]++; - - if (scores[3]!=0) num_bad2[0]++; - if (scores[4]!=0) num_bad2[1]++; - if (scores[5]!=0) num_bad2[2]++; - - if (scores[0] + scores[3] + scores[1] + scores[4] + (scores[2]+scores[5]) < 3) { - num_bad1[0]++; - } - if (scores[0]+scores[3]==0) num_bad1[1]++; - if (scores[1]+scores[4]==0) num_bad1[2]++; - if (scores[2]+scores[5]==0) num_bad1[3]++; - } - } - - // if (kd_blobs().size()==244){ - // std::cout << "Test: Dis: " << p1 << " " << p2 << " " << dis << std::endl; - // std::cout << "Test: num_bad1: " << num_bad1[0] << " " << num_bad1[1] << " " << num_bad1[2] << " " << num_bad1[3] << std::endl; - // std::cout << "Test: num_bad2: " << num_bad2[0] << " " << num_bad2[1] << " " << num_bad2[2] << std::endl; - // std::cout << "Test: num_bad: " << num_bad[0] << " " << num_bad[1] << " " << num_bad[2] << " " << num_bad[3] << std::endl; - // } - // Calculate angles between directions - geo_vector_t tempV1(0, p2.y() - p1.y(), p2.z() - p1.z()); - geo_vector_t tempV5; - - double angle1 = tempV1.angle(U_dir); - tempV5.set(fabs(p2.x() - p1.x()), - sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2)) * sin(angle1), - 0); - angle1 = tempV5.angle(drift_dir); - - double angle2 = tempV1.angle(V_dir); - tempV5.set(fabs(p2.x() - p1.x()), - sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2)) * sin(angle2), - 0); - angle2 = tempV5.angle(drift_dir); - - double angle1p = tempV1.angle(W_dir); - tempV5.set(fabs(p2.x() - p1.x()), - sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2)) * sin(angle1p), - 0); - angle1p = tempV5.angle(drift_dir); - - tempV5.set(p2.x() - p1.x(), p2.y() - p1.y(), p2.z() - p1.z()); - double angle3 = tempV5.angle(drift_dir); - - bool flag_strong_check = true; - - // Define constants for readability - constexpr double pi = 3.141592653589793; - constexpr double perp_angle_tol = 10.0/180.0*pi; - constexpr double wire_angle_tol = 12.5/180.0*pi; - constexpr double perp_angle = pi/2.0; - constexpr double invalid_dist = 1e9; - - if (fabs(angle3 - perp_angle) < perp_angle_tol) { - geo_vector_t tempV2 = vhough_transform(p1, 15*units::cm); - geo_vector_t tempV3 = vhough_transform(p2, 15*units::cm); - - if (fabs(tempV2.angle(drift_dir) - perp_angle) < perp_angle_tol && - fabs(tempV3.angle(drift_dir) - perp_angle) < perp_angle_tol) { - flag_strong_check = false; - } - } - else if (angle1 < wire_angle_tol || angle2 < wire_angle_tol || angle1p < wire_angle_tol) { - flag_strong_check = false; - } - - // Helper function to check if ratio exceeds threshold - auto exceeds_ratio = [](int val, int steps, double ratio = 0.75) { - return val >= ratio * steps; - }; - - // Helper function to invalidate distance - auto invalidate_distance = [&]() { - index_index_dis[j][k] = std::make_tuple(-1, -1, invalid_dist); - }; - - if (flag_strong_check) { - if (num_bad1[0] > 7 || (num_bad1[0] > 2 && exceeds_ratio(num_bad1[0], num_steps))) { - invalidate_distance(); - } - } - else { - bool parallel_angles = (angle1 < wire_angle_tol && angle2 < wire_angle_tol) || - (angle1p < wire_angle_tol && angle1 < wire_angle_tol) || - (angle1p < wire_angle_tol && angle2 < wire_angle_tol); - - if (parallel_angles) { - if (num_bad[0] > 7 || (num_bad[0] > 2 && exceeds_ratio(num_bad[0], num_steps))) { - invalidate_distance(); - } - } - else if (angle1 < wire_angle_tol) { - int sum_bad = num_bad[2] + num_bad[3]; - if (sum_bad > 9 || (sum_bad > 2 && exceeds_ratio(sum_bad, num_steps)) || num_bad[3] >= 3) { - invalidate_distance(); - } - } - else if (angle2 < wire_angle_tol) { - int sum_bad = num_bad[1] + num_bad[3]; - if (sum_bad > 9 || (sum_bad > 2 && exceeds_ratio(sum_bad, num_steps)) || num_bad[3] >= 3) { - invalidate_distance(); - } - } - else if (angle1p < wire_angle_tol) { - int sum_bad = num_bad[2] + num_bad[1]; - if (sum_bad > 9 || (sum_bad > 2 && exceeds_ratio(sum_bad, num_steps))) { - invalidate_distance(); - } - } - else if (num_bad[0] > 7 || (num_bad[0] > 2 && exceeds_ratio(num_bad[0], num_steps))) { - invalidate_distance(); - } - } + if (sqrt(pow(wcp.x() - pt.x(), 2) + pow(wcp.y() - pt.y(), 2) + pow(wcp.z() - pt.z(), 2)) < + std::max(1.8 * units::cm, i * 0.5 * units::cm * sin(18. / 180. * 3.1415926))) { + pt = wcp; } - - // Now check path again ... - if (std::get<0>(index_index_dis_dir1[j][k]) >= 0) { - geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_dir1[j][k])); //point3d(std::get<0>(index_index_dis_dir1[j][k])); - geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_dir1[j][k])); //point3d(std::get<1>(index_index_dis_dir1[j][k])); - - double dis = sqrt(pow(p1.x() - p2.x(), 2) + - pow(p1.y() - p2.y(), 2) + - pow(p1.z() - p2.z(), 2)); - double step_dis = 1.0 * units::cm; - int num_steps = dis/step_dis + 1; - int num_bad = 0; - int num_bad1 = 0; - - // Check intermediate points along path - for (int ii = 0; ii != num_steps; ii++) { - geo_point_t test_p( - p1.x() + (p2.x() - p1.x())/num_steps*(ii + 1), - p1.y() + (p2.y() - p1.y())/num_steps*(ii + 1), - p1.z() + (p2.z() - p1.z())/num_steps*(ii + 1) - ); - - if (use_ctpc) { - /// FIXME: assumes clusters are bounded to 1 face! Need to fix this. - const bool good_point = grouping()->is_good_point(test_p, tp.face); - if (!good_point) { - num_bad++; - } - if (!grouping()->is_good_point(test_p, tp.face, 0.6*units::cm, 1, 0)) { - num_bad1++; - } - } - } - - // Calculate angles - geo_vector_t tempV1(0, p2.y() - p1.y(), p2.z() - p1.z()); - geo_vector_t tempV5; - - double angle1 = tempV1.angle(U_dir); - tempV5.set(fabs(p2.x() - p1.x()), - sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2))*sin(angle1), - 0); - angle1 = tempV5.angle(drift_dir); - - double angle2 = tempV1.angle(V_dir); - tempV5.set(fabs(p2.x() - p1.x()), - sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2))*sin(angle2), - 0); - angle2 = tempV5.angle(drift_dir); - - tempV5.set(p2.x() - p1.x(), p2.y() - p1.y(), p2.z() - p1.z()); - double angle3 = tempV5.angle(drift_dir); - - double angle1p = tempV1.angle(W_dir); - tempV5.set(fabs(p2.x() - p1.x()), - sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2))*sin(angle1p), - 0); - angle1p = tempV5.angle(drift_dir); - - const double pi = 3.141592653589793; - if (fabs(angle3 - pi/2) < 10.0/180.0*pi || - angle1 < 12.5/180.0*pi || - angle2 < 12.5/180.0*pi || - angle1p < 7.5/180.0*pi) { - // Parallel or prolonged case - if (num_bad > 7 || (num_bad > 2 && num_bad >= 0.75*num_steps)) { - index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); - } - } - else { - if (num_bad1 > 7 || (num_bad1 > 2 && num_bad1 >= 0.75*num_steps)) { - index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); - } + pts.push_back(pt); + if (i != 0) { + geo_point_t pt1(p_test.x() - i * 0.5 * units::cm * dir.x(), p_test.y() - i * 0.5 * units::cm * dir.y(), + p_test.z() - i * 0.5 * units::cm * dir.z()); + // WCP::WCPointCloud::WCPoint& wcp1 = point_cloud->get_closest_wcpoint(pt1); + auto [_, wcp1] = get_closest_wcpoint(pt1); + if (sqrt(pow(wcp1.x() - pt1.x(), 2) + pow(wcp1.y() - pt1.y(), 2) + pow(wcp1.z() - pt1.z(), 2)) < + std::max(1.8 * units::cm, i * 0.5 * units::cm * sin(18. / 180. * 3.1415926))) { + pt1 = wcp1; } + pts.push_back(pt1); } + } + // temp_point_cloud.AddPoints(pts); + for (auto& pt : pts) { + temp_point_cloud->add(pt); + } + // temp_point_cloud.build_kdtree_index(); - //Now check path again ... - // Now check the path... - if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { - geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_dir2[j][k]));//point3d(std::get<0>(index_index_dis_dir2[j][k])); - geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_dir2[j][k]));//point3d(std::get<1>(index_index_dis_dir2[j][k])); - - double dis = sqrt(pow(p1.x() - p2.x(), 2) + - pow(p1.y() - p2.y(), 2) + - pow(p1.z() - p2.z(), 2)); - double step_dis = 1.0 * units::cm; - int num_steps = dis/step_dis + 1; - int num_bad = 0; - int num_bad1 = 0; - - // Check points along path - for (int ii = 0; ii != num_steps; ii++) { - geo_point_t test_p( - p1.x() + (p2.x() - p1.x())/num_steps*(ii + 1), - p1.y() + (p2.y() - p1.y())/num_steps*(ii + 1), - p1.z() + (p2.z() - p1.z())/num_steps*(ii + 1) - ); - - if (use_ctpc) { - /// FIXME: assumes clusters are bounded to 1 face! Need to fix this. - const bool good_point = grouping()->is_good_point(test_p, tp.face); - if (!good_point) { - num_bad++; - } - if (!grouping()->is_good_point(test_p, tp.face, 0.6*units::cm, 1, 0)) { - num_bad1++; - } - } - } + int temp_num_total_points = 0; + int temp_num_occupied_points = 0; - // Calculate angles between directions - geo_vector_t tempV1(0, p2.y() - p1.y(), p2.z() - p1.z()); - geo_vector_t tempV5; - - double angle1 = tempV1.angle(U_dir); - tempV5.set(fabs(p2.x() - p1.x()), - sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2))*sin(angle1), - 0); - angle1 = tempV5.angle(drift_dir); - - double angle2 = tempV1.angle(V_dir); - tempV5.set(fabs(p2.x() - p1.x()), - sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2))*sin(angle2), - 0); - angle2 = tempV5.angle(drift_dir); - - tempV5.set(p2.x() - p1.x(), p2.y() - p1.y(), p2.z() - p1.z()); - double angle3 = tempV5.angle(drift_dir); - - double angle1p = tempV1.angle(W_dir); - tempV5.set(fabs(p2.x() - p1.x()), - sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2))*sin(angle1p), - 0); - angle1p = tempV5.angle(drift_dir); - - const double pi = 3.141592653589793; - bool is_parallel = fabs(angle3 - pi/2) < 10.0/180.0*pi || - angle1 < 12.5/180.0*pi || - angle2 < 12.5/180.0*pi || - angle1p < 7.5/180.0*pi; - - if (is_parallel) { - // Parallel or prolonged case - if (num_bad > 7 || (num_bad > 2 && num_bad >= 0.75*num_steps)) { - index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); - } - } - else { - if (num_bad1 > 7 || (num_bad1 > 2 && num_bad1 >= 0.75*num_steps)) { - index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); - } - } - } - } - } + // const int N = point_cloud->get_num_points(); + const int N = this->npoints(); + // WCP::WCPointCloud& cloud = point_cloud->get_cloud(); + for (int i = 0; i != N; i++) { + // geo_point_t dir1(cloud.pts[i].x() - p_test.x(), cloud.pts[i].y() - p_test.y(), cloud.pts[i].z() - p_test.z()); + geo_point_t dir1 = this->point3d(i) - p_test; - // deal with MST of first type - { - boost::adjacency_list> - temp_graph(num); - // int temp_count = 0; - for (size_t j = 0; j != num; j++) { - for (size_t k = j + 1; k != num; k++) { - int index1 = j; - int index2 = k; - if (std::get<0>(index_index_dis[j][k]) >= 0) { - add_edge(index1, index2, std::get<2>(index_index_dis[j][k]), temp_graph); - // LogDebug(index1 << " " << index2 << " " << std::get<2>(index_index_dis[j][k])); - // temp_count ++; - } + if (dir1.magnitude() < 15 * units::cm) { + geo_point_t test_p1 = point3d(i); + temp_num_total_points++; + double dis[3]; + dis[0] = temp_point_cloud->get_closest_2d_dis(test_p1, 0).second; + dis[1] = temp_point_cloud->get_closest_2d_dis(test_p1, 1).second; + dis[2] = temp_point_cloud->get_closest_2d_dis(test_p1, 2).second; + if (dis[0] <= 1.5 * units::cm && dis[1] <= 1.5 * units::cm && dis[2] <= 2.4 * units::cm || + dis[0] <= 1.5 * units::cm && dis[2] <= 1.5 * units::cm && dis[1] <= 2.4 * units::cm || + dis[2] <= 1.5 * units::cm && dis[1] <= 1.5 * units::cm && dis[0] <= 2.4 * units::cm) + temp_num_occupied_points++; } } - // std::cout << "Test: Count: " << temp_count << std::endl; - // Process MST - process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_mst); + if (temp_num_occupied_points < temp_num_total_points * occupied_cut) return true; } - // MST of the direction ... - { - boost::adjacency_list> - temp_graph(num); - - for (size_t j = 0; j != num; j++) { - for (size_t k = j + 1; k != num; k++) { - int index1 = j; - int index2 = k; - if (std::get<0>(index_index_dis_dir1[j][k]) >= 0 || std::get<0>(index_index_dis_dir2[j][k]) >= 0) { - add_edge( - index1, index2, - std::min(std::get<2>(index_index_dis_dir1[j][k]), std::get<2>(index_index_dis_dir2[j][k])), - temp_graph); - // LogDebug(index1 << " " << index2 << " " - // << std::min(std::get<2>(index_index_dis_dir1[j][k]), - // std::get<2>(index_index_dis_dir2[j][k]))); - } - } - } + // judge if there + + return false; +} - process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_dir_mst); +bool Facade::cluster_less(const Cluster* a, const Cluster* b) +{ + if (a == b) return false; + { + const double la = a->get_length(); + const double lb = b->get_length(); + if (la < lb) return true; + if (lb < la) return false; } + { + const int na = a->nchildren(); + const int nb = b->nchildren(); + if (na < nb) return true; + if (nb < na) return false; + } + + const int na = a->npoints(); + const int nb = b->npoints(); + if (na < nb) return true; + if (nb < na) return false; - for (size_t j = 0; j != num; j++) { - for (size_t k = j + 1; k != num; k++) { - if (std::get<2>(index_index_dis[j][k]) < 3 * units::cm) { - index_index_dis_mst[j][k] = index_index_dis[j][k]; - } + // std::cout << "Cluster::cluster_less: na=" << na << " nb=" << nb << std::endl; + + auto wpids_a = a->wpids_blob(); + auto wpids_b = b->wpids_blob(); + std::set wpids_set; + wpids_set.insert(wpids_a.begin(), wpids_a.end()); + wpids_set.insert(wpids_b.begin(), wpids_b.end()); + + for (const auto& wpid : wpids_set) { + auto ar = a->get_uvwt_min(wpid.apa(), wpid.face()); + auto br = b->get_uvwt_min(wpid.apa(), wpid.face()); + if (get<0>(ar) < get<0>(br)) return true; + if (get<0>(br) < get<0>(ar)) return false; + if (get<1>(ar) < get<1>(br)) return true; + if (get<1>(br) < get<1>(ar)) return false; + if (get<2>(ar) < get<2>(br)) return true; + if (get<2>(br) < get<2>(ar)) return false; + if (get<3>(ar) < get<3>(br)) return true; + if (get<3>(br) < get<3>(ar)) return false; + } - // establish the path ... - if (std::get<0>(index_index_dis_mst[j][k]) >= 0) { - const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_mst[j][k])); - const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_mst[j][k])); - // auto edge = - // add_edge(gind1, gind2, *m_graph); - // LogDebug(gind1 << " " << gind2 << " " << std::get<2>(index_index_dis_mst[j][k])); - float dis; - // if (edge.second) { - if (std::get<2>(index_index_dis_mst[j][k]) > 5 * units::cm) { - dis = std::get<2>(index_index_dis_mst[j][k]); - } - else { - dis = std::get<2>(index_index_dis_mst[j][k]); - } - // } - /*auto edge =*/ add_edge(gind1, gind2, WireCell::PointCloud::Facade::EdgeProp(dis), *m_graph); - } + for (const auto& wpid : wpids_set) { + auto ar = a->get_uvwt_max(wpid.apa(), wpid.face()); + auto br = b->get_uvwt_max(wpid.apa(), wpid.face()); + if (get<0>(ar) < get<0>(br)) return true; + if (get<0>(br) < get<0>(ar)) return false; + if (get<1>(ar) < get<1>(br)) return true; + if (get<1>(br) < get<1>(ar)) return false; + if (get<2>(ar) < get<2>(br)) return true; + if (get<2>(br) < get<2>(ar)) return false; + if (get<3>(ar) < get<3>(br)) return true; + if (get<3>(br) < get<3>(ar)) return false; + } - if (std::get<0>(index_index_dis_dir_mst[j][k]) >= 0) { - if (std::get<0>(index_index_dis_dir1[j][k]) >= 0) { - const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir1[j][k])); - const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir1[j][k])); - //auto edge = add_edge(gind1, gind2, *m_graph); - // LogDebug(gind1 << " " << gind2 << " " << std::get<2>(index_index_dis_dir1[j][k])); - float dis; - // if (edge.second) { - if (std::get<2>(index_index_dis_dir1[j][k]) > 5 * units::cm) { - dis = std::get<2>(index_index_dis_dir1[j][k]) * 1.1; - } - else { - dis = std::get<2>(index_index_dis_dir1[j][k]); - } - // } - /*auto edge =*/ add_edge(gind1, gind2, WireCell::PointCloud::Facade::EdgeProp(dis), *m_graph); - } - if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { - const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir2[j][k])); - const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir2[j][k])); - // auto edge = add_edge(gind1, gind2, *m_graph); - // LogDebug(gind1 << " " << gind2 << " " << std::get<2>(index_index_dis_dir2[j][k])); - // if (edge.second) { - float dis; - if (std::get<2>(index_index_dis_dir2[j][k]) > 5 * units::cm) { - dis = std::get<2>(index_index_dis_dir2[j][k]) * 1.1; - } - else { - dis = std::get<2>(index_index_dis_dir2[j][k]); - } - // } - /*auto edge =*/ add_edge(gind1, gind2, WireCell::PointCloud::Facade::EdgeProp(dis), *m_graph); - } - } + if (na !=0){ + auto ac = a->get_pca().center; + auto bc = b->get_pca().center; + if (ac[0] < bc[0]) return true; + if (bc[0] < bc[0]) return false; + if (ac[1] < bc[1]) return true; + if (bc[1] < bc[1]) return false; + if (ac[2] < bc[2]) return true; + if (bc[2] < bc[2]) return false; + } - } // k - } // j - + // After exhausting all "content" comparison, we are left with the question, + // are these two clusters really different or not. We have two choices. We + // may compare on pointer value which will surely "break the tie" but will + // introduce randomness. We may return "false" which says "these are equal" + // in which case any unordered set/map will not hold both. Randomness is + // the better choice as we would have a better chance to detect that in some + // future bug. + return a < b; +} +void Facade::sort_clusters(std::vector& clusters) +{ + std::sort(clusters.rbegin(), clusters.rend(), cluster_less); +} +void Facade::sort_clusters(std::vector& clusters) +{ + std::sort(clusters.rbegin(), clusters.rend(), cluster_less); } -// In Facade_Cluster.cxx -std::vector Cluster::examine_graph(const bool use_ctpc) const +Facade::Cluster::Flash Facade::Cluster::get_flash() const { - // Create new graph - if (m_graph != nullptr) { - m_graph.reset(); - } - - m_graph = std::make_unique(npoints()); - - // Establish connections - Establish_close_connected_graph(); - - // Connect using overclustering protection (not easy to debug ...) - Connect_graph_overclustering_protection(use_ctpc); - - // Find connected components - std::vector component(num_vertices(*m_graph)); - // const int num_components = - connected_components(*m_graph, &component[0]); + Flash flash; // starts invalid - // std::cout << "Test: num components " << num_components << " " << kd_blobs().size() << std::endl; + const auto* p = this->node()->parent; + if (!p) return flash; + const auto* g = p->value.facade(); + if (!g) return flash; - // If only one component, no need for mapping - // if (num_components <= 1) { - // return std::vector(); - // } + const int flash_index = this->get_scalar("flash", -1); - // Create mapping from blob indices to component groups - std::vector b2groupid(nchildren(), -1); + //std::cout << "Test3 " << flash_index << std::endl; - // For each point in the graph - for (size_t i = 0; i < component.size(); ++i) { - // Get the blob index for this point - const int bind = kd3d().major_index(i); - // Map the blob to its component - b2groupid.at(bind) = component[i]; + if (flash_index < 0) { + return flash; } + if (! g->has_pc("flash")) { + return flash; + } + flash.m_valid = true; + + // These are kind of inefficient as we get the "flash" PC each time. + flash.m_time = g->get_element("flash", "time", flash_index, 0); + flash.m_value = g->get_element("flash", "value", flash_index, 0); + flash.m_ident = g->get_element("flash", "ident", flash_index, -1); + flash.m_type = g->get_element("flash", "type", flash_index, -1); - return b2groupid; -} - -void Cluster::dijkstra_shortest_paths(const size_t pt_idx, const bool use_ctpc) const -{ - if (m_graph == nullptr) Create_graph(use_ctpc); - if ((int)pt_idx == m_source_pt_index) return; - m_source_pt_index = pt_idx; - m_parents.resize(num_vertices(*m_graph)); - m_distances.resize(num_vertices(*m_graph)); - - vertex_descriptor v0 = vertex(pt_idx, *m_graph); - // making a param object - const auto& param = weight_map(get(boost::edge_weight, *m_graph)) - .predecessor_map(&m_parents[0]) - .distance_map(&m_distances[0]); - // const auto& param = boost::weight_map(boost::get(&EdgeProp::dist, *m_graph)).predecessor_map(&m_parents[0]).distance_map(&m_distances[0]); - boost::dijkstra_shortest_paths(*m_graph, v0, param); - - // if (nchildren()==3449){ - // std::cout << "dijkstra_shortest_paths: " << pt_idx << " " << use_ctpc << std::endl; - // std::cout << "distances: "; - // for (size_t i = 0; i != m_distances.size(); i++) { - // std::cout << i << "->" << m_distances[i] << " "; - // } - // std::cout << std::endl; - // std::cout << "parents: "; - // for (size_t i = 0; i != m_parents.size(); i++) { - // std::cout << i << "->" << m_parents[i] << " "; - // } - // std::cout << std::endl; - // } -} - + // std::cout << "Test3: " << g->has_pc("flash") << " " << g->has_pc("light") << " " << g->has_pc("flashlight") << " " << flash_index << " " << flash.m_time << std::endl; + if (!(g->has_pc("light") && g->has_pc("flashlight"))) { + return flash; // valid, but no vector info. + } + + // These are spans. We walk the fl to look up in the l. + const auto fl_flash = g->get_pcarray("flash", "flashlight"); + const auto fl_light = g->get_pcarray("light", "flashlight"); + const auto l_times = g->get_pcarray("time", "light"); + const auto l_values = g->get_pcarray("value", "light"); + const auto l_errors = g->get_pcarray("error", "light"); -void Cluster::cal_shortest_path(const size_t dest_wcp_index) const -{ - m_path_wcps.clear(); - m_path_mcells.clear(); + // std::cout << "Test3: " << fl_flash.size() << " " << fl_light.size() << std::endl; - int prev_i = -1; - for (int i = dest_wcp_index; i != m_source_pt_index; i = m_parents[i]) - { - const auto* mcell = blob_with_point(i); - if (m_path_wcps.size() == 0) - { - m_path_wcps.push_front(i); - m_path_mcells.push_front(mcell); - } - else - { - m_path_wcps.push_front(i); - if (mcell != m_path_mcells.front()) - m_path_mcells.push_front(mcell); - } - if (i == prev_i) - break; - prev_i = i; + const size_t nfl = fl_light.size(); + for (size_t ifl = 0; ifl < nfl; ++ifl) { + if (fl_flash[ifl] != flash_index) continue; + const int light_index = fl_light[ifl]; + + flash.m_times.push_back(l_times[light_index]); + flash.m_values.push_back(l_values[light_index]); + flash.m_errors.push_back(l_errors[light_index]); } - auto* src_mcell = blob_with_point(m_source_pt_index); - m_path_wcps.push_front(m_source_pt_index); - if (src_mcell != m_path_mcells.front()) - m_path_mcells.push_front(src_mcell); + return flash; } -std::vector Cluster::indices_to_points(const std::list& path_indices) const + +const Facade::Cluster::graph_type& Facade::Cluster::find_graph(const std::string& flavor) const { - std::vector points; - points.reserve(path_indices.size()); - for (size_t idx : path_indices) { - points.push_back(point3d(idx)); - } - return points; + return const_cast(const_cast(this)->find_graph(flavor)); } - -void Cluster::organize_points_path_vec(std::vector& path_points, double low_dis_limit) const +Facade::Cluster::graph_type& Facade::Cluster::find_graph(const std::string& flavor) { - std::vector temp_points = path_points; - path_points.clear(); - - // First pass: filter based on distance - for (size_t i = 0; i != temp_points.size(); i++) { - if (path_points.empty()) { - path_points.push_back(temp_points[i]); - } - else if (i + 1 == temp_points.size()) { - double dis = (temp_points[i] - path_points.back()).magnitude(); - if (dis > low_dis_limit * 0.75) { - path_points.push_back(temp_points[i]); - } - } - else { - double dis = (temp_points[i] - path_points.back()).magnitude(); - double dis1 = (temp_points[i + 1] - path_points.back()).magnitude(); - - if (dis > low_dis_limit || (dis1 > low_dis_limit * 1.7 && dis > low_dis_limit * 0.75)) { - path_points.push_back(temp_points[i]); - } - } + if (this->has_graph(flavor)) { + return get_graph(flavor); } - - // Second pass: filter based on angle - temp_points = path_points; - std::vector angles; - for (size_t i = 0; i != temp_points.size(); i++) { - if (i == 0 || i + 1 == temp_points.size()) { - angles.push_back(M_PI); - } - else { - geo_vector_t v1 = temp_points[i] - temp_points[i - 1]; - geo_vector_t v2 = temp_points[i] - temp_points[i + 1]; - angles.push_back(v1.angle(v2)); - } + if (flavor == "basic") { + return this->give_graph(flavor, make_graph_basic(*this)); } - - path_points.clear(); - for (size_t i = 0; i != temp_points.size(); i++) { - if (angles[i] * 180.0 / M_PI >= 75) { - path_points.push_back(temp_points[i]); - } + if (flavor == "basic_pid"){ + return this->give_graph(flavor, make_graph_basic_pid(*this)); } + // We did our best.... + raise("unknown graph flavor " + flavor); + std::terminate(); // this is here mostly to quell compiler warnings about not returning a value. } -// this is different from WCP implementation, the path_points is the input ... -void Cluster::organize_path_points(std::vector& path_points, double low_dis_limit) const -{ - // std::vector temp_points = path_points; - path_points.clear(); - auto indices = get_path_wcps(); - auto temp_points = indices_to_points(indices); - for (size_t i = 0; i != temp_points.size(); i++) { - if (path_points.empty()) { - path_points.push_back(temp_points[i]); - } - else if (i + 1 == temp_points.size()) { - double dis = (temp_points[i] - path_points.back()).magnitude(); - if (dis > low_dis_limit * 0.5) { - path_points.push_back(temp_points[i]); - } - } - else { - double dis = (temp_points[i] - path_points.back()).magnitude(); - double dis1 = (temp_points[i + 1] - path_points.back()).magnitude(); +const Facade::Cluster::graph_type& Facade::Cluster::find_graph(const std::string& flavor, const Cluster& ref_cluster) const +{ + return const_cast(const_cast(this)->find_graph(flavor, ref_cluster)); +} - if (dis > low_dis_limit || (dis1 > low_dis_limit * 1.7 && dis > low_dis_limit * 0.5)) { - path_points.push_back(temp_points[i]); - } - } +Facade::Cluster::graph_type& Facade::Cluster::find_graph(const std::string& flavor, const Cluster& ref_cluster) +{ + if (this->has_graph(flavor)) { + return get_graph(flavor); + } + if (flavor == "basic") { + return this->give_graph(flavor, make_graph_basic(*this)); + } + if (flavor == "basic_ref_pid"){ + return this->give_graph(flavor, make_graph_basic_pid(*this, ref_cluster)); } + // We did our best.... + raise("unknown graph flavor " + flavor); + std::terminate(); // this is here mostly to quell compiler warnings about not returning a value. } -std::vector Cluster::get_hull() const +const Facade::Cluster::graph_type& Facade::Cluster::find_graph( + const std::string& flavor, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts) const { - // add cached ... - if (m_hull_calculated) { - return m_hull_points; - } + return const_cast(const_cast(this)->find_graph(flavor, dv, pcts)); +} - quickhull::QuickHull qh; - std::vector> pc; - const auto& points = this->points(); - for (int i = 0; i != npoints(); i++) { - pc.emplace_back(points[0][i], points[1][i], points[2][i]); +Facade::Cluster::graph_type& Facade::Cluster::find_graph( + const std::string& flavor, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts) +{ + if (this->has_graph(flavor)) { + return get_graph(flavor); } - quickhull::ConvexHull hull = qh.getConvexHull(pc, false, true); - std::set indices; - for (size_t i = 0; i != hull.getIndexBuffer().size(); i++) { - indices.insert(hull.getIndexBuffer().at(i)); + // Factory of known graph flavors relying on detector info: + + if (flavor == "ctpc") { + return this->give_graph(flavor, make_graph_ctpc(*this, dv, pcts)); + } + if (flavor == "ctpc_pid") { + return this->give_graph(flavor, make_graph_ctpc_pid(*this, Cluster{},dv, pcts)); } - m_hull_points.clear(); - for (auto i : indices) { - m_hull_points.push_back({points[0][i], points[1][i], points[2][i]}); + if (flavor == "relaxed") { + return this->give_graph(flavor, make_graph_relaxed(*this, dv, pcts)); } - - m_hull_calculated = true; - return m_hull_points; - // std::vector results; - // for (auto i : indices) { - // results.push_back({points[0][i], points[1][i], points[2][i]}); - // } - // return results; + // Do a hail mary, maybe user made a mistake by passing dv/pcts and really + // wants a flavor that we can make implicitly. + return find_graph(flavor); } -void Cluster::Calc_PCA() const + +const Facade::Cluster::graph_type& Facade::Cluster::find_graph( + const std::string& flavor, + const Cluster& ref_cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts) const { - if (m_pca_calculated) return; + return const_cast(const_cast(this)->find_graph(flavor, ref_cluster, dv, pcts)); +} - m_center.set(0, 0, 0); - int nsum = 0; - for (const Blob* blob : children()) { - for (const geo_point_t& p : blob->points()) { - m_center += p; - nsum++; - } +Facade::Cluster::graph_type& Facade::Cluster::find_graph( + const std::string& flavor, + const Cluster& ref_cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts) +{ + if (this->has_graph(flavor)) { + return get_graph(flavor); } - for (int i = 0; i != 3; i++) { - m_pca_axis[i].set(0, 0, 0); + // Factory of known graph flavors relying on detector info: + if (flavor == "ctpc") { + return this->give_graph(flavor, make_graph_ctpc(*this, dv, pcts)); } - - if (nsum >= 3) { - m_center = m_center / nsum; + if (flavor == "ctpc_ref_pid") { + return this->give_graph(flavor, make_graph_ctpc_pid(*this, ref_cluster, dv, pcts)); } - else { - return; + if (flavor == "relaxed") { + return this->give_graph(flavor, make_graph_relaxed(*this, dv, pcts)); } - Eigen::MatrixXd cov_matrix(3, 3); - for (int i = 0; i != 3; i++) { - for (int j = i; j != 3; j++) { - cov_matrix(i, j) = 0; - for (const Blob* blob : children()) { - for (const geo_point_t& p : blob->points()) { - cov_matrix(i, j) += (p[i] - m_center[i]) * (p[j] - m_center[j]); - } - } - } + // Do a hail mary, maybe user made a mistake by passing dv/pcts and really + // wants a flavor that we can make implicitly. + return find_graph(flavor); +} + + + +const GraphAlgorithms& Facade::Cluster::graph_algorithms(const std::string& flavor) const +{ + auto it = m_galgs.find(flavor); + if (it != m_galgs.end()) { + return it->second; // we have it already } - cov_matrix(1, 0) = cov_matrix(0, 1); - cov_matrix(2, 0) = cov_matrix(0, 2); - cov_matrix(2, 1) = cov_matrix(1, 2); - // std::cout << cov_matrix << std::endl; - // const auto eigenSolver = WireCell::Array::pca(cov_matrix); - Eigen::SelfAdjointEigenSolver eigenSolver(cov_matrix); - auto eigen_values = eigenSolver.eigenvalues(); - auto eigen_vectors = eigenSolver.eigenvectors(); + if (this->has_graph(flavor)) { // if graph exists, make the GA + auto got = m_galgs.emplace(flavor, GraphAlgorithms(get_graph(flavor))); + return got.first->second; + } + + // We failed to find an existing graph of the given flavor, but we there are + // some flavors we know how to construct on the fly: + + if (flavor == "basic") { + // we are caching, so const cast is "okay". + auto& gr = const_cast(this)->give_graph(flavor, make_graph_basic(*this)); + auto got = m_galgs.emplace(flavor, GraphAlgorithms(gr)); + return got.first->second; + } - // ascending order from Eigen, we want descending - for (int i = 0; i != 3; i++) { - m_pca_values[2-i] = eigen_values(i); - double norm = sqrt(eigen_vectors(0, i) * eigen_vectors(0, i) + eigen_vectors(1, i) * eigen_vectors(1, i) + - eigen_vectors(2, i) * eigen_vectors(2, i)); - m_pca_axis[2-i].set(eigen_vectors(0, i) / norm, eigen_vectors(1, i) / norm, eigen_vectors(2, i) / norm); - // std::cout << "PCA: " << i << " " << m_pca_values[i] << " " << m_pca_axis[i] << std::endl; + if (flavor == "basic_pid") { + auto& gr = const_cast(this)->give_graph(flavor, make_graph_basic_pid(*this)); + auto got = m_galgs.emplace(flavor, GraphAlgorithms(gr)); + return got.first->second; } - m_pca_calculated = true; + // We did our best.... + raise("unknown graph flavor " + flavor); + std::terminate(); // this is here mostly to quell compiler warnings about not returning a value. } -void Cluster::Calc_PCA(std::vector& points) const +const GraphAlgorithms& Facade::Cluster::graph_algorithms(const std::string& flavor, const Cluster& ref_cluster) const { - // Reset center - m_center.set(0, 0, 0); - int nsum = 0; - - // Calculate center - for (auto it = children().begin(); it != children().end(); it++) { - for (size_t k = 0; k != points.size(); k++) { - m_center += points[k]; - nsum++; - } + auto it = m_galgs.find(flavor); + if (it != m_galgs.end()) { + return it->second; // we have it already } - // Reset PCA axes - for (int i = 0; i != 3; i++) { - m_pca_axis[i].set(0, 0, 0); + if (this->has_graph(flavor)) { // if graph exists, make the GA + auto got = m_galgs.emplace(flavor, GraphAlgorithms(get_graph(flavor))); + return got.first->second; } + + // We failed to find an existing graph of the given flavor, but we there are + // some flavors we know how to construct on the fly: - // Early return if not enough points - if (nsum < 3) { - return; + if (flavor == "basic") { + // we are caching, so const cast is "okay". + auto& gr = const_cast(this)->give_graph(flavor, make_graph_basic(*this)); + auto got = m_galgs.emplace(flavor, GraphAlgorithms(gr)); + return got.first->second; } - // Normalize center - m_center = m_center / nsum; + if (flavor == "basic_ref_pid") { + auto& gr = const_cast(this)->give_graph(flavor, make_graph_basic_pid(*this, ref_cluster)); + auto got = m_galgs.emplace(flavor, GraphAlgorithms(gr)); + return got.first->second; + } - // Calculate covariance matrix using Eigen - Eigen::MatrixXd cov_matrix(3, 3); + // We did our best.... + raise("unknown graph flavor " + flavor); + std::terminate(); // this is here mostly to quell compiler warnings about not returning a value. +} - for (int i = 0; i != 3; i++) { - for (int j = i; j != 3; j++) { - cov_matrix(i, j) = 0; - for (auto it = children().begin(); it != children().end(); it++) { - for (size_t k = 0; k != points.size(); k++) { - cov_matrix(i, j) += (points[k][i] - m_center[i]) * (points[k][j] - m_center[j]); - } - } - } +const GraphAlgorithms& Facade::Cluster::graph_algorithms(const std::string& flavor, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts) const +{ + auto it = m_galgs.find(flavor); + if (it != m_galgs.end()) { + return it->second; } - // Fill symmetric part of matrix - cov_matrix(1, 0) = cov_matrix(0, 1); - cov_matrix(2, 0) = cov_matrix(0, 2); - cov_matrix(2, 1) = cov_matrix(1, 2); + // Factory of known graph flavors relying on detector info: - // Compute eigenvalues and eigenvectors - Eigen::SelfAdjointEigenSolver eigenSolver(cov_matrix); - auto eigen_values = eigenSolver.eigenvalues(); - auto eigen_vectors = eigenSolver.eigenvectors(); + if (flavor == "ctpc") { + auto& gr = const_cast(this)->give_graph(flavor, make_graph_ctpc(*this, dv, pcts)); + auto got = m_galgs.emplace(flavor, GraphAlgorithms(gr)); + return got.first->second; + } - // Store eigenvalues and eigenvectors in descending order - // Note: Eigen returns in ascending order, we want descending - for (int i = 0; i != 3; i++) { - m_pca_values[2-i] = eigen_values(i); - double norm = sqrt(eigen_vectors(0, i) * eigen_vectors(0, i) + - eigen_vectors(1, i) * eigen_vectors(1, i) + - eigen_vectors(2, i) * eigen_vectors(2, i)); - - m_pca_axis[2-i].set(eigen_vectors(0, i) / norm, - eigen_vectors(1, i) / norm, - eigen_vectors(2, i) / norm); + if (flavor == "ctpc_pid") { + auto& gr = const_cast(this)->give_graph(flavor, make_graph_ctpc_pid(*this, Cluster{}, dv, pcts)); + auto got = m_galgs.emplace(flavor, GraphAlgorithms(gr)); + return got.first->second; + } + + if (flavor == "relaxed") { + auto& gr = const_cast(this)->give_graph(flavor, make_graph_relaxed(*this, dv, pcts)); + auto got = m_galgs.emplace(flavor, GraphAlgorithms(gr)); + return got.first->second; } - m_pca_calculated = true; + // Do a hail mary, maybe user made a mistake by passing dv/pcts and really + // wants a flavor that we can make implicitly. + return graph_algorithms(flavor); } -geo_vector_t Cluster::calc_pca_dir(const geo_point_t& center, const std::vector& points) const +const GraphAlgorithms& Facade::Cluster::graph_algorithms(const std::string& flavor, + const Cluster& ref_cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts) const { - // Create covariance matrix - Eigen::MatrixXd cov_matrix(3, 3); + auto it = m_galgs.find(flavor); + if (it != m_galgs.end()) { + return it->second; + } - // Calculate covariance matrix elements - for (int i = 0; i != 3; i++) { - for (int j = i; j != 3; j++) { - cov_matrix(i, j) = 0; - for (const auto& p : points) { - if (i == 0 && j == 0) { - cov_matrix(i, j) += (p.x() - center.x()) * (p.x() - center.x()); - } - else if (i == 0 && j == 1) { - cov_matrix(i, j) += (p.x() - center.x()) * (p.y() - center.y()); - } - else if (i == 0 && j == 2) { - cov_matrix(i, j) += (p.x() - center.x()) * (p.z() - center.z()); - } - else if (i == 1 && j == 1) { - cov_matrix(i, j) += (p.y() - center.y()) * (p.y() - center.y()); - } - else if (i == 1 && j == 2) { - cov_matrix(i, j) += (p.y() - center.y()) * (p.z() - center.z()); - } - else if (i == 2 && j == 2) { - cov_matrix(i, j) += (p.z() - center.z()) * (p.z() - center.z()); - } - } - } + // Factory of known graph flavors relying on detector info: + + if (flavor == "ctpc") { + auto& gr = const_cast(this)->give_graph(flavor, make_graph_ctpc(*this, dv, pcts)); + auto got = m_galgs.emplace(flavor, GraphAlgorithms(gr)); + return got.first->second; } - // std::cout << "Test: " << center << " " << points.at(0) << std::endl; - // std::cout << "Test: " << center << " " << points.at(1) << std::endl; - // std::cout << "Test: " << center << " " << points.at(2) << std::endl; + if (flavor == "ctpc_ref_pid") { + auto& gr = const_cast(this)->give_graph(flavor, make_graph_ctpc_pid(*this, ref_cluster, dv, pcts)); + auto got = m_galgs.emplace(flavor, GraphAlgorithms(gr)); + return got.first->second; + } - // Fill symmetric parts - cov_matrix(1, 0) = cov_matrix(0, 1); - cov_matrix(2, 0) = cov_matrix(0, 2); - cov_matrix(2, 1) = cov_matrix(1, 2); + if (flavor == "relaxed") { + auto& gr = const_cast(this)->give_graph(flavor, make_graph_relaxed(*this, dv, pcts)); + auto got = m_galgs.emplace(flavor, GraphAlgorithms(gr)); + return got.first->second; + } - // Calculate eigenvalues/eigenvectors using Eigen - Eigen::SelfAdjointEigenSolver eigenSolver(cov_matrix); - auto eigen_vectors = eigenSolver.eigenvectors(); + // Do a hail mary, maybe user made a mistake by passing dv/pcts and really + // wants a flavor that we can make implicitly. + return graph_algorithms(flavor); +} - // std::cout << "Test: " << eigen_vectors(0,0) << " " << eigen_vectors(1,0) << " " << eigen_vectors(2,0) << std::endl; - // Get primary direction (first eigenvector) - double norm = sqrt(eigen_vectors(0, 2) * eigen_vectors(0, 2) + - eigen_vectors(1, 2) * eigen_vectors(1, 2) + - eigen_vectors(2, 2) * eigen_vectors(2, 2)); +// These methods implement the cache management functionality - return geo_vector_t(eigen_vectors(0, 2) / norm, - eigen_vectors(1, 2) / norm, - eigen_vectors(2, 2) / norm); +void Facade::Cluster::clear_graph_algorithms_cache(const std::string& graph_name) +{ + auto it = m_galgs.find(graph_name); + if (it != m_galgs.end()) { + it->second.clear_cache(); + auto log = Log::logger("clus"); + log->debug("Cleared cache for GraphAlgorithms '{}'", graph_name); + } } +void Facade::Cluster::remove_graph_algorithms(const std::string& graph_name) +{ + auto it = m_galgs.find(graph_name); + if (it != m_galgs.end()) { + m_galgs.erase(it); + auto log = Log::logger("clus"); + log->debug("Removed GraphAlgorithms '{}'", graph_name); + } +} - -geo_point_t Cluster::get_center() const { - if (!m_pca_calculated) { - Calc_PCA(); +void Facade::Cluster::clear_all_graph_algorithms_caches() +{ + for (auto& [name, ga] : m_galgs) { + ga.clear_cache(); } - return m_center; + auto log = Log::logger("clus"); + log->debug("Cleared all GraphAlgorithms caches"); } -geo_vector_t Cluster::get_pca_axis(int axis) const { - if (!m_pca_calculated) { - Calc_PCA(); + +std::vector Facade::Cluster::get_cached_graph_algorithms() const +{ + std::vector names; + names.reserve(m_galgs.size()); + for (const auto& [name, ga] : m_galgs) { + names.push_back(name); } - if (axis < 0 || axis >= 3) raise("axis %d < 0 || axis >= 3", axis); - return m_pca_axis[axis]; + return names; } -double Cluster::get_pca_value(int axis) const { - if (!m_pca_calculated) { - Calc_PCA(); + + +// ne' examine_graph +std::vector Cluster::connected_blobs(IDetectorVolumes::pointer dv, IPCTransformSet::pointer pcts) const +{ + const auto& ga = graph_algorithms("relaxed", dv, pcts); + const auto& component = ga.connected_components(); + + // Create mapping from blob indices to component groups + std::vector b2groupid(nchildren(), -1); + + // For each point in the graph + for (size_t i = 0; i < component.size(); ++i) { + // Get the blob index for this point + const int bind = kd3d().major_index(i); + // Map the blob to its component + b2groupid.at(bind) = (int)component[i]; } - if (axis < 0 || axis >= 3) raise("axis %d < 0 || axis >= 3", axis); - return m_pca_values[axis]; + return b2groupid; } -// std::unordered_map -std::vector Cluster::examine_x_boundary(const double low_limit, const double high_limit) +std::vector> Cluster::get_extreme_wcps(const Cluster* reference_cluster) const { - double num_points[3] = {0, 0, 0}; - double x_max = -1e9; - double x_min = 1e9; - auto& mcells = children(); - for (Blob* mcell : mcells) { - /// TODO: no caching, could be slow - std::vector pts = mcell->points(); - for (size_t i = 0; i != pts.size(); i++) { - if (pts.at(i).x() < low_limit) { - num_points[0]++; - if (pts.at(i).x() > x_max) x_max = pts.at(i).x(); + std::vector> out_vec_wcps; + + if (npoints() == 0) { + return out_vec_wcps; + } + + // Create list of valid point indices based on spatial filtering + // This directly corresponds to prototype's all_indices creation + std::vector valid_indices; + + if (reference_cluster == nullptr) { + // No filtering - use all points (equivalent to old_time_mcells_map==0) + for (int i = 0; i < npoints(); ++i) { + valid_indices.push_back(i); + } + } else { + // Get reference cluster's time_blob_map (equivalent to old_time_mcells_map) + const auto& ref_time_blob_map = reference_cluster->time_blob_map(); + + // Filter points based on spatial relationship with reference cluster + // This implements the exact same logic as prototype's old_time_mcells_map filtering + for (int i = 0; i < npoints(); ++i) { + if (is_point_spatially_related_to_time_blobs(i, ref_time_blob_map, false)) { + valid_indices.push_back(i); } - else if (pts.at(i).x() > high_limit) { - num_points[2]++; - if (pts.at(i).x() < x_min) x_min = pts.at(i).x(); + } + } + + if (valid_indices.empty()) { + return out_vec_wcps; + } + + // Get main axis and ensure consistent direction (y>0) + // Equivalent to prototype's Calc_PCA() and get_PCA_axis(0) + geo_point_t main_axis = get_pca().axis.at(0); + if (main_axis.y() < 0) { + main_axis = main_axis * -1; + } + + // Find 8 extreme points: 2 along main axis + 6 along coordinate axes + // Equivalent to prototype's wcps[8] array + geo_point_t extreme_points[8]; + std::vector extreme_values(8); // Track the extreme values for comparison + bool initialized = false; + + + + // Scan through all valid points to find extremes + // Equivalent to prototype's scanning loop through all_indices + for (size_t idx : valid_indices) { + if (is_point_excluded(idx)) continue; + + geo_point_t current_point = point3d(idx); + + if (!initialized) { + // Initialize all extreme points to the first valid point + for (int i = 0; i < 8; ++i) { + extreme_points[i] = current_point; } - else { - num_points[1]++; + + // Initialize extreme values + extreme_values[0] = extreme_values[1] = current_point.dot(main_axis); // main axis projections + extreme_values[2] = extreme_values[3] = current_point.y(); // Y values + extreme_values[4] = extreme_values[5] = current_point.z(); // Z values + extreme_values[6] = extreme_values[7] = current_point.x(); // X values + + initialized = true; + continue; + } + + // Main axis extremes (along PCA axis) + double main_projection = current_point.dot(main_axis); + if (main_projection > extreme_values[0]) { + extreme_points[0] = current_point; // high along main axis + extreme_values[0] = main_projection; + } + if (main_projection < extreme_values[1]) { + extreme_points[1] = current_point; // low along main axis + extreme_values[1] = main_projection; + } + + // Y-axis extremes (top/bottom) + if (current_point.y() > extreme_values[2]) { + extreme_points[2] = current_point; // highest Y + extreme_values[2] = current_point.y(); + } + if (current_point.y() < extreme_values[3]) { + extreme_points[3] = current_point; // lowest Y + extreme_values[3] = current_point.y(); + } + + // Z-axis extremes (front/back) + if (current_point.z() > extreme_values[4]) { + extreme_points[4] = current_point; // furthest Z + extreme_values[4] = current_point.z(); + } + if (current_point.z() < extreme_values[5]) { + extreme_points[5] = current_point; // nearest Z + extreme_values[5] = current_point.z(); + } + + // X-axis extremes (earliest/latest) + if (current_point.x() > extreme_values[6]) { + extreme_points[6] = current_point; // latest X + extreme_values[6] = current_point.x(); + } + if (current_point.x() < extreme_values[7]) { + extreme_points[7] = current_point; // earliest X + extreme_values[7] = current_point.x(); + } + } + + if (!initialized) { + return std::vector>(); // No valid points found + } + + // Group the extreme points into result vectors + // Following the prototype's grouping strategy exactly + + // First extreme along the main axis + std::vector main_axis_high; + main_axis_high.push_back(extreme_points[0]); + out_vec_wcps.push_back(main_axis_high); + + // Second extreme along the main axis + std::vector main_axis_low; + main_axis_low.push_back(extreme_points[1]); + out_vec_wcps.push_back(main_axis_low); + + // Add other extremes if they are significantly different from main axis extremes + // This prevents duplicate points in the output (same as prototype logic) + const double min_separation = 5.0 * units::cm; // Minimum distance to be considered distinct + + for (int i = 2; i < 8; i++) { + bool is_distinct = true; + + // Check if this extreme is too close to already added points + for (auto& added_group : out_vec_wcps) { + double distance = (extreme_points[i] - added_group[0]).magnitude(); + if (distance < min_separation) { + added_group.push_back(extreme_points[i]); // Add to existing group + is_distinct = false; + break; } + + if (!is_distinct) break; + } + + // If distinct enough, add as a new extreme group + if (is_distinct) { + std::vector coord_extreme; + coord_extreme.push_back(extreme_points[i]); + out_vec_wcps.push_back(coord_extreme); } } + + return out_vec_wcps; +} +// Updated is_point_spatially_related_to_time_blobs to match prototype exactly +bool Cluster::is_point_spatially_related_to_time_blobs( + size_t point_index, + const time_blob_map_t& ref_time_blob_map, + bool flag_nearby_timeslice +) const { + + // Get current point's time slice information + // Equivalent to: int time_slice = cloud.pts[i].mcell->GetTimeSlice(); + const Blob* current_blob = blob_with_point(point_index); + auto wpid = current_blob->wpid(); + auto apa = wpid.apa(); + auto face = wpid.face(); + int current_time_slice = current_blob->slice_index_min(); + + // Check ONLY current time slice (exact prototype logic, no ±1 offset) + // This is the exact prototype logic: + // if (old_time_mcells_map->find(time_slice)!=old_time_mcells_map->end()) + // tbm[wpid.apa()][wpid.face()][blob->slice_index_min()].insert(blob); + + auto apa_it = ref_time_blob_map.find(apa); + if (apa_it == ref_time_blob_map.end()) return false; - // std::cout - // << "npoints() " << npoints() - // << " xmax " << x_max << " xmin " << x_min - // << " low_limit " << low_limit << " high_limit " << high_limit - // << " num_points: " << num_points[0] << " " << num_points[1] << " " << num_points[2] << std::endl; + auto face_it = apa_it->second.find(face); + if (face_it == apa_it->second.end()) return false; - std::vector clusters; - std::vector b2groupid(mcells.size(), 0); - std::set groupids; + auto time_it = face_it->second.find(current_time_slice); + if (time_it != face_it->second.end()) { - // if (true) { - if (num_points[0] + num_points[2] < num_points[1] * 0.075) { - // PR3DCluster* cluster_1 = 0; - // PR3DCluster* cluster_2 = 0; - // PR3DCluster* cluster_3 = 0; - /// FIXME: does tolerance need to be configurable? - if (x_max < low_limit - 1.0 * units::cm && x_max > -1e8) { - // fill the small one ... - // cluster_1 = new PR3DCluster(1); - groupids.insert(1); - } - if (x_min > high_limit + 1.0 * units::cm && x_min < 1e8) { - // fill the large one ... - // cluster_3 = new PR3DCluster(3); - groupids.insert(3); + // Iterate through apa/face maps in this time slice + // time_blob_map_t is std::map>> + // Structure: apa -> face -> time -> blobset + // Now iterate through blobs in the BlobSet + for (const Blob* ref_blob : time_it->second) { + + // std::cout << "Test: " << point_index << " " << ref_blob->u_wire_index_min() << " " << ref_blob->u_wire_index_max() << " " + // << ref_blob->v_wire_index_min() << " " << ref_blob->v_wire_index_max() << " " + // << ref_blob->w_wire_index_min() << " " << ref_blob->w_wire_index_max() << std::endl; + + // if (flag_nearby_timeslice) + // std::cout << wire_index(point_index, 0) << " " << wire_index(point_index, 1) << " " << wire_index(point_index, 2) << " " + // << ref_blob->u_wire_index_min() << " " << ref_blob->u_wire_index_max() << " " + // << ref_blob->v_wire_index_min() << " " << ref_blob->v_wire_index_max() << " " + // << ref_blob->w_wire_index_min() << " " << ref_blob->w_wire_index_max() << " " + // << check_wire_ranges_match(point_index, ref_blob) << std::endl; + + + + + if (check_wire_ranges_match(point_index, ref_blob)) { + return true; // Equivalent to flag_add = true; break; + } } - // std::cout << "groupids size: " << groupids.size() << std::endl; - if (!groupids.empty()) { - // cluster_2 = new PR3DCluster(2); - groupids.insert(2); - for (size_t idx=0; idx < mcells.size(); idx++) { - Blob *mcell = mcells.at(idx); - if (mcell->points()[0].x() < low_limit) { - if (groupids.find(1) != groupids.end()) { - // cluster_1->AddCell(mcell, mcell->GetTimeSlice()); - b2groupid[idx] = 1; - } - else { - // cluster_2->AddCell(mcell, mcell->GetTimeSlice()); - b2groupid[idx] = 2; - } - } - else if (mcell->points()[0].x() > high_limit) { - if (groupids.find(3) != groupids.end()) { - // cluster_3->AddCell(mcell, mcell->GetTimeSlice()); - b2groupid[idx] = 3; - } - else { - // cluster_2->AddCell(mcell, mcell->GetTimeSlice()); - b2groupid[idx] = 2; + } + + if (flag_nearby_timeslice) { + // Check adjacent time slices (±1) if flag_nearby_timeslice is true + // Equivalent to: if (old_time_mcells_map->find(time_slice-1)!=old_time_mcells_map->end()) + // and old_time_mcells_map->find(time_slice+1)!=old_time_mcells_map->end() + + for (int offset : {-1, 1}) { + int adjacent_time_slice = current_time_slice + offset; + auto time_it_adj = face_it->second.find(adjacent_time_slice); + if (time_it_adj != face_it->second.end()) { + for (const Blob* ref_blob : time_it_adj->second) { + if (check_wire_ranges_match(point_index, ref_blob)) { + return true; // Equivalent to flag_add = true; break; } } - else { - // cluster_2->AddCell(mcell, mcell->GetTimeSlice()); - b2groupid[idx] = 2; - } } - // if (cluster_1 != 0) clusters.push_back(cluster_1); - // clusters.push_back(cluster_2); - // if (cluster_3 != 0) clusters.push_back(cluster_3); } } - return b2groupid; + + // if (flag_nearby_timeslice) { + // std::cout << "No match found " << wire_index(point_index, 0) << " " << wire_index(point_index, 1) << " " << wire_index(point_index, 2) << " " << std::endl; + // } + + return false; // Equivalent to flag_add remains false +} + +// Updated check_wire_ranges_match to match prototype exactly +bool Cluster::check_wire_ranges_match(size_t point_index, const Blob* ref_blob) const +{ + try { + // Get current point's wire indices (equivalent to cloud.pts[i].index_u, index_v, index_w) + int current_wire_u = wire_index(point_index, 0); // U plane + int current_wire_v = wire_index(point_index, 1); // V plane + int current_wire_w = wire_index(point_index, 2); // W plane + + // Get reference blob's wire ranges (exact prototype logic, no tolerance) + // Equivalent to: + // int u1_low_index = mcell->get_uwires().front()->index(); + // int u1_high_index = mcell->get_uwires().back()->index(); + int u_min = ref_blob->u_wire_index_min(); + int u_max = ref_blob->u_wire_index_max(); + int v_min = ref_blob->v_wire_index_min(); + int v_max = ref_blob->v_wire_index_max(); + int w_min = ref_blob->w_wire_index_min(); + int w_max = ref_blob->w_wire_index_max(); + + + + // NO tolerance added - use exact wire ranges like prototype + // Removed: u_min = u_min - 1; u_max = u_max + 1; etc. + + // Check if current point's wire indices fall within ALL THREE ranges + // This is the exact prototype condition: + // if (cloud.pts[i].index_u <= u1_high_index && cloud.pts[i].index_u >= u1_low_index && + // cloud.pts[i].index_v <= v1_high_index && cloud.pts[i].index_v >= v1_low_index && + // cloud.pts[i].index_w <= w1_high_index && cloud.pts[i].index_w >= w1_low_index) + if (current_wire_u >= u_min && current_wire_u < u_max && + current_wire_v >= v_min && current_wire_v < v_max && + current_wire_w >= w_min && current_wire_w < w_max) { + return true; // Equivalent to flag_add = true; break; + } + + } catch (...) { + // If wire information is not available, continue + } + + return false; +} + + +std::pair Cluster::get_two_boundary_steiner_graph_idx(const std::string& steiner_graph_name, const std::string& steiner_pc_name, bool flag_cosmic) const{ + // run the reugular two boundary points ... + auto pair_points = get_two_boundary_wcps(flag_cosmic); + + + if (!has_pc(steiner_pc_name)) { + throw std::runtime_error("Steiner point cloud not found"); + } + auto& steiner_pc = get_pc(steiner_pc_name); + + // 1. Form vector from pair_points + geo_vector_t boundary_vector = pair_points.second - pair_points.first; + + + // std::cout << pair_points.first << " " << pair_points.second << std::endl; + + // Normalize the vector to ensure consistent projection calculations + if (boundary_vector.magnitude() > 0) { + boundary_vector = boundary_vector.norm(); + } else { + // If points are identical, return first two points or handle error + return std::make_pair(0, std::min(1, (int)steiner_pc.size() - 1)); + } + + // 2. Loop over all points in steiner_pc and find the two points + // that are furthest along the boundary vector + double max_projection = -std::numeric_limits::infinity(); + double min_projection = std::numeric_limits::infinity(); + int max_idx = -1; + int min_idx = -1; + + const auto& coords = get_default_scope().coords; + + // Get coordinate arrays from the point cloud + const auto& x_coords = steiner_pc.get(coords.at(0))->elements(); + const auto& y_coords = steiner_pc.get(coords.at(1))->elements(); + const auto& z_coords = steiner_pc.get(coords.at(2))->elements(); + + + for (size_t i = 0; i < x_coords.size(); ++i) { + // Create point from steiner point cloud + geo_point_t steiner_point(x_coords[i], y_coords[i], z_coords[i]); + + // Project point onto the boundary vector direction + // Use first boundary point as reference origin + geo_vector_t point_vector = steiner_point - pair_points.first; + double projection = point_vector.dot(boundary_vector); + + // Track extremes + if (projection > max_projection) { + max_projection = projection; + max_idx = static_cast(i); + } + if (projection < min_projection) { + min_projection = projection; + min_idx = static_cast(i); + } + } + + // 3. Return indices of the two extreme points + if (max_idx == -1 || min_idx == -1) { + throw std::runtime_error("Could not find valid points in Steiner point cloud"); + } + + return std::make_pair(min_idx, max_idx); + + + // if (!has_graph(steiner_graph_name)) { + // throw std::runtime_error("Steiner graph not found"); + // } + // auto& graph_steiner = get_graph(steiner_graph_name); + + // // Create a MultiQuery from the dataset - this builds the k-d tree internally + // // Note: const_cast is needed because MultiQuery constructor requires non-const reference + // // but query operations don't modify the dataset + // KDTree::MultiQuery steiner_kd(const_cast(steiner_pc)); + + // // Get a 3D query object for x,y,z coordinates + // auto query3d = steiner_kd.get({"x", "y", "z"}); + + // // Convert geo_point_t to std::vector for the query + // std::vector query_point1 = {pair_points.first.x(), pair_points.first.y(), pair_points.first.z()}; + // std::vector query_point2 = {pair_points.second.x(), pair_points.second.y(), pair_points.second.z()}; + + // auto p1 = query3d->knn(1, query_point1); + // auto p2 = query3d->knn(1, query_point2); + + // // Map boundary points to their indices + // std::map boundary_indices; + // boundary_indices[0] = steiner_pc->add_point(pair_points.first); + // boundary_indices[1] = steiner_pc->add_point(pair_points.second); + + // return boundary_indices; } -bool Cluster::judge_vertex(geo_point_t& p_test, const double asy_cut, const double occupied_cut) + +std::pair Cluster::get_two_boundary_wcps(bool flag_cosmic) const { - p_test = calc_ave_pos(p_test, 3 * units::cm); + // Early exit for single point + if (npoints() <= 1) { + geo_point_t single_point = point3d(0); + return std::make_pair(single_point, single_point); + } + + // Get PCA info + const auto& pca = get_pca(); + geo_vector_t main_axis = pca.axis.at(0); + geo_vector_t second_axis = pca.axis.at(1); + - geo_point_t dir = vhough_transform(p_test, 15 * units::cm); + // Use maps to store 14 extreme points, their indices, and values for each (apa, face) pair + using ApaFace = std::pair; // apa, face + std::map> extreme_points_map; + std::map> extreme_point_indices_map; + std::map> extreme_values_map; + std::map initialized_map; + std::map> boundary_points_map; - // judge if this is end points - std::pair num_pts = ndipole(p_test, dir, 25 * units::cm); - if ((num_pts.first + num_pts.second) == 0) return false; + // find all the wpids ... + // Get unique wpids from wpids_blob() + std::vector wpids_vec = wpids_blob(); + std::set wpids_set(wpids_vec.begin(), wpids_vec.end()); + std::vector wpids(wpids_set.begin(), wpids_set.end()); - double asy = fabs(num_pts.first - num_pts.second) / (num_pts.first + num_pts.second); + for (auto& wpid : wpids) { + auto apa = wpid.apa(); + auto face = wpid.face(); - if (asy > asy_cut) { - return true; + auto key = std::make_pair(apa, face); + if (extreme_points_map.find(key) == extreme_points_map.end()) { + extreme_points_map[key] = {}; + extreme_point_indices_map[key] = {}; + extreme_values_map[key] = {}; + initialized_map[key] = false; + } } - else { - // TPCParams& mp = Singleton::Instance(); - // double angle_u = mp.get_angle_u(); - // double angle_v = mp.get_angle_v(); - // double angle_w = mp.get_angle_w(); - const auto& mp = grouping()->get_params(); - // ToyPointCloud temp_point_cloud(angle_u, angle_v, angle_w); - auto temp_point_cloud = std::make_shared(mp.angle_u, mp.angle_v, mp.angle_w); - dir = dir.norm(); - // PointVector pts; - std::vector pts; - for (size_t i = 0; i != 40; i++) { - geo_point_t pt(p_test.x() + i * 0.5 * units::cm * dir.x(), p_test.y() + i * 0.5 * units::cm * dir.y(), - p_test.z() + i * 0.5 * units::cm * dir.z()); - // WCP::WCPointCloud::WCPoint& wcp = point_cloud->get_closest_wcpoint(pt); - auto [_, wcp] = get_closest_wcpoint(pt); - if (sqrt(pow(wcp.x() - pt.x(), 2) + pow(wcp.y() - pt.y(), 2) + pow(wcp.z() - pt.z(), 2)) < - std::max(1.8 * units::cm, i * 0.5 * units::cm * sin(18. / 180. * 3.1415926))) { - pt = wcp; - } - pts.push_back(pt); - if (i != 0) { - geo_point_t pt1(p_test.x() - i * 0.5 * units::cm * dir.x(), p_test.y() - i * 0.5 * units::cm * dir.y(), - p_test.z() - i * 0.5 * units::cm * dir.z()); - // WCP::WCPointCloud::WCPoint& wcp1 = point_cloud->get_closest_wcpoint(pt1); - auto [_, wcp1] = get_closest_wcpoint(pt1); - if (sqrt(pow(wcp1.x() - pt1.x(), 2) + pow(wcp1.y() - pt1.y(), 2) + pow(wcp1.z() - pt1.z(), 2)) < - std::max(1.8 * units::cm, i * 0.5 * units::cm * sin(18. / 180. * 3.1415926))) { - pt1 = wcp1; - } - pts.push_back(pt1); - } + + + + + // Find extreme points + for (int i = 0; i < npoints(); i++) { + // Skip excluded points + if (is_point_excluded(i)) continue; + + // Get blob and check charge threshold + const Blob* blob = blob_with_point(i); + if (blob->estimate_total_charge() < 1500) continue; + auto wpid = blob->wpid(); + auto apa = wpid.apa(); + auto face = wpid.face(); + auto key = std::make_pair(apa, face); + + geo_point_t current_point = point3d(i); + + auto& extreme_points = extreme_points_map[key]; + auto& extreme_point_indices = extreme_point_indices_map[key]; + auto& extreme_values = extreme_values_map[key]; + bool& initialized = initialized_map[key]; + + // Now you can use wpid, apa, face, and the per-(apa,face) arrays for this point. + + if (!initialized) { + // Initialize all extremes to first valid point + for (int j = 0; j < 14; j++) { + extreme_points[j] = current_point; + extreme_point_indices[j] = i; + } + // Initialize projection values + extreme_values[0] = extreme_values[1] = current_point.dot(main_axis); + extreme_values[2] = extreme_values[3] = current_point.dot(second_axis); + // Initialize coordinate values + extreme_values[4] = extreme_values[5] = current_point.x(); + extreme_values[6] = extreme_values[7] = current_point.y(); + extreme_values[8] = extreme_values[9] = current_point.z(); + // Initialize wire index values + extreme_values[10] = extreme_values[11] = wire_index(i, 0); // U + extreme_values[12] = extreme_values[13] = wire_index(i, 1); // V + + initialized = true; + continue; } - // temp_point_cloud.AddPoints(pts); - for (auto& pt : pts) { - temp_point_cloud->add(pt); + + // Main axis projections + double main_proj = current_point.dot(main_axis); + if (main_proj > extreme_values[0]) { + extreme_values[0] = main_proj; + extreme_points[0] = current_point; + extreme_point_indices[0] = i; + } + if (main_proj < extreme_values[1]) { + extreme_values[1] = main_proj; + extreme_points[1] = current_point; + extreme_point_indices[1] = i; } - // temp_point_cloud.build_kdtree_index(); + + // Second axis projections + double second_proj = current_point.dot(second_axis); + if (second_proj > extreme_values[2]) { + extreme_values[2] = second_proj; + extreme_points[2] = current_point; + extreme_point_indices[2] = i; + } + if (second_proj < extreme_values[3]) { + extreme_values[3] = second_proj; + extreme_points[3] = current_point; + extreme_point_indices[3] = i; + } + + // X extremes (early/late) + if (current_point.x() > extreme_values[4]) { + extreme_values[4] = current_point.x(); + extreme_points[4] = current_point; + extreme_point_indices[4] = i; + } + if (current_point.x() < extreme_values[5]) { + extreme_values[5] = current_point.x(); + extreme_points[5] = current_point; + extreme_point_indices[5] = i; + } + + // Y extremes (top/bottom) + if (current_point.y() > extreme_values[6]) { + extreme_values[6] = current_point.y(); + extreme_points[6] = current_point; + extreme_point_indices[6] = i; + } + if (current_point.y() < extreme_values[7]) { + extreme_values[7] = current_point.y(); + extreme_points[7] = current_point; + extreme_point_indices[7] = i; + } + + // Z extremes (left/right) + if (current_point.z() > extreme_values[8]) { + extreme_values[8] = current_point.z(); + extreme_points[8] = current_point; + extreme_point_indices[8] = i; + } + if (current_point.z() < extreme_values[9]) { + extreme_values[9] = current_point.z(); + extreme_points[9] = current_point; + extreme_point_indices[9] = i; + } + + // U wire index extremes + int u_wire = wire_index(i, 0); + if (u_wire > extreme_values[10]) { + extreme_values[10] = u_wire; + extreme_points[10] = current_point; + extreme_point_indices[10] = i; + } + if (u_wire < extreme_values[11]) { + extreme_values[11] = u_wire; + extreme_points[11] = current_point; + extreme_point_indices[11] = i; + } + + // V wire index extremes + int v_wire = wire_index(i, 1); + if (v_wire > extreme_values[12]) { + extreme_values[12] = v_wire; + extreme_points[12] = current_point; + extreme_point_indices[12] = i; + } + if (v_wire < extreme_values[13]) { + extreme_values[13] = v_wire; + extreme_points[13] = current_point; + extreme_point_indices[13] = i; + } + } + + // Get live channel sets for each plane + auto live_u_index_map = get_live_wire_indices(0); + auto live_v_index_map = get_live_wire_indices(1); + auto live_w_index_map = get_live_wire_indices(2); - int temp_num_total_points = 0; - int temp_num_occupied_points = 0; - // const int N = point_cloud->get_num_points(); - const int N = npoints(); - // WCP::WCPointCloud& cloud = point_cloud->get_cloud(); - for (int i = 0; i != N; i++) { - // geo_point_t dir1(cloud.pts[i].x() - p_test.x(), cloud.pts[i].y() - p_test.y(), cloud.pts[i].z() - p_test.z()); - geo_point_t dir1 = point3d(i) - p_test; - if (dir1.magnitude() < 15 * units::cm) { - geo_point_t test_p1 = point3d(i); - temp_num_total_points++; - double dis[3]; - dis[0] = temp_point_cloud->get_closest_2d_dis(test_p1, 0).second; - dis[1] = temp_point_cloud->get_closest_2d_dis(test_p1, 1).second; - dis[2] = temp_point_cloud->get_closest_2d_dis(test_p1, 2).second; - if (dis[0] <= 1.5 * units::cm && dis[1] <= 1.5 * units::cm && dis[2] <= 2.4 * units::cm || - dis[0] <= 1.5 * units::cm && dis[2] <= 1.5 * units::cm && dis[1] <= 2.4 * units::cm || - dis[2] <= 1.5 * units::cm && dis[1] <= 1.5 * units::cm && dis[0] <= 2.4 * units::cm) - temp_num_occupied_points++; - } - } + // Calculate constants for distance normalization + const Grouping* grouping = this->grouping(); + auto nticks_map = grouping->get_nticks_per_slice(); + auto drift_speed_map = grouping->get_drift_speed(); + auto tick_map = grouping->get_tick(); - if (temp_num_occupied_points < temp_num_total_points * occupied_cut) return true; - } - // judge if there + for (auto & [key, extreme_points] : extreme_points_map) { + auto apa = key.first; + auto face = key.second; - return false; -} + double nrebin = nticks_map[apa][face]; + double drift_speed = drift_speed_map[apa][face]; + double tick = tick_map[apa][face]; + double distance_norm = nrebin * tick * drift_speed; -bool Facade::cluster_less(const Cluster* a, const Cluster* b) -{ - if (a == b) return false; + auto& extreme_point_indices = extreme_point_indices_map[key]; + // auto& extreme_values = extreme_values_map[key]; + // bool& initialized = initialized_map[key]; + + auto& live_u_index = live_u_index_map[key]; + auto& live_v_index = live_v_index_map[key]; + auto& live_w_index = live_w_index_map[key]; + + boundary_points_map[key] = std::make_pair(extreme_points[0], extreme_points[1]); + auto& boundary_points = boundary_points_map[key]; + + double boundary_value = calculate_boundary_metric( + extreme_point_indices[0], extreme_point_indices[1], + live_u_index, live_v_index, live_w_index, + distance_norm, flag_cosmic); + + // Test all pairs of extreme points + for (int i = 0; i < 14; i++) { + for (int j = i + 1; j < 14; j++) { + double value = calculate_boundary_metric( + extreme_point_indices[i], extreme_point_indices[j], + live_u_index, live_v_index, live_w_index, + distance_norm, flag_cosmic); + + if (value > boundary_value) { + boundary_value = value; + if (extreme_points[i].y() > extreme_points[j].y()) { + boundary_points.first = extreme_points[i]; + boundary_points.second = extreme_points[j]; + } else { + boundary_points.first = extreme_points[j]; + boundary_points.second = extreme_points[i]; + } + } + } + } + + - { - const double la = a->get_length(); - const double lb = b->get_length(); - if (la < lb) return true; - if (lb < la) return false; - } - { - const int na = a->nchildren(); - const int nb = b->nchildren(); - if (na < nb) return true; - if (nb < na) return false; } - { - const int na = a->npoints(); - const int nb = b->npoints(); - if (na < nb) return true; - if (nb < na) return false; + + // Collect all points, avoiding duplicates + std::vector all_points; + for (const auto& entry : boundary_points_map) { + all_points.push_back(entry.second.first); + all_points.push_back(entry.second.second); } - { - auto ar = a->get_uvwt_min(); - auto br = b->get_uvwt_min(); - if (get<0>(ar) < get<0>(br)) return true; - if (get<0>(br) < get<0>(ar)) return false; - if (get<1>(ar) < get<1>(br)) return true; - if (get<1>(br) < get<1>(ar)) return false; - if (get<2>(ar) < get<2>(br)) return true; - if (get<2>(br) < get<2>(ar)) return false; - if (get<3>(ar) < get<3>(br)) return true; - if (get<3>(br) < get<3>(ar)) return false; + + double max_dist_sq = -1; + geo_point_t best_p1, best_p2; + + // Compare all pairs + for (size_t i = 0; i < all_points.size(); ++i) { + for (size_t j = i + 1; j < all_points.size(); ++j) { + double dist_sq = (all_points[i] - all_points[j]).magnitude2(); + if (dist_sq > max_dist_sq) { + max_dist_sq = dist_sq; + best_p1 = all_points[i]; + best_p2 = all_points[j]; + } + } } - { - auto ar = a->get_uvwt_max(); - auto br = b->get_uvwt_max(); - if (get<0>(ar) < get<0>(br)) return true; - if (get<0>(br) < get<0>(ar)) return false; - if (get<1>(ar) < get<1>(br)) return true; - if (get<1>(br) < get<1>(ar)) return false; - if (get<2>(ar) < get<2>(br)) return true; - if (get<2>(br) < get<2>(ar)) return false; - if (get<3>(ar) < get<3>(br)) return true; - if (get<3>(br) < get<3>(ar)) return false; + + if (best_p1.y() < best_p2.y()) { + std::swap(best_p1, best_p2); } - // The two are very similar. What is left to check? Only pointer?. This - // will cause "randomness"! - return a < b; + return {best_p1, best_p2}; + } -void Facade::sort_clusters(std::vector& clusters) + + + + +// Helper function to get live wire indices for a given plane +std::map, std::set> Cluster::get_live_wire_indices(int plane) const { - std::sort(clusters.rbegin(), clusters.rend(), cluster_less); + using ApaFace = std::pair; // apa, face + std::map> apa_face_live_indices; + + for (const Blob* blob : children()) { + const Grouping* grouping = this->grouping(); + auto wpid = blob->wpid(); + int time_slice = blob->slice_index_min(); + + // Create ApaFace key for this blob + ApaFace apa_face_key = std::make_pair(wpid.apa(), wpid.face()); + + int wire_min, wire_max; + switch (plane) { + case 0: // U plane + wire_min = blob->u_wire_index_min(); + wire_max = blob->u_wire_index_max(); + break; + case 1: // V plane + wire_min = blob->v_wire_index_min(); + wire_max = blob->v_wire_index_max(); + break; + case 2: // W plane + wire_min = blob->w_wire_index_min(); + wire_max = blob->w_wire_index_max(); + break; + default: + continue; + } + + // Check for bad planes using charge error threshold + bool plane_is_bad = false; + int dead_wire_count = 0; + int total_wire_count = wire_max - wire_min; + + for (int wire_index = wire_min; wire_index < wire_max; wire_index++) { + if (grouping->is_wire_dead(wpid.apa(), wpid.face(), plane, wire_index, time_slice) || + blob->get_wire_charge_error(plane, wire_index) > 1e10) { + dead_wire_count++; + } + } + + // If more than half the wires are dead, consider the plane bad + if (dead_wire_count > total_wire_count / 2) { + plane_is_bad = true; + } + + if (!plane_is_bad) { + for (int wire_index = wire_min; wire_index < wire_max; wire_index++) { + if (!grouping->is_wire_dead(wpid.apa(), wpid.face(), plane, wire_index, time_slice)) { + apa_face_live_indices[apa_face_key].insert(wire_index); + } + } + } + } + + return apa_face_live_indices; } -void Facade::sort_clusters(std::vector& clusters) + +// Helper function to count live channels between two wire indices (assuming everything are already with one APA/face) +int Cluster::count_live_channels_between(int wire_min, int wire_max, const std::set& live_indices) const { - std::sort(clusters.rbegin(), clusters.rend(), cluster_less); + int count = 0; + for (int wire_index = wire_min; wire_index < wire_max; wire_index++) { + if (live_indices.find(wire_index) != live_indices.end()) { + count++; + } + } + return count; } - -Facade::Cluster::Flash Facade::Cluster::get_flash() const +// Helper function to calculate boundary metric between two points (assuming everything are already with one APA/face) +double Cluster::calculate_boundary_metric( + int point_idx1, int point_idx2, + const std::set& live_u_index, + const std::set& live_v_index, + const std::set& live_w_index, + double distance_norm, bool flag_cosmic) const { - Flash flash; // starts invalid + geo_point_t p1 = point3d(point_idx1); + geo_point_t p2 = point3d(point_idx2); + + // Get wire indices for both points + int p1_u = wire_index(point_idx1, 0); + int p1_v = wire_index(point_idx1, 1); + int p1_w = wire_index(point_idx1, 2); + + int p2_u = wire_index(point_idx2, 0); + int p2_v = wire_index(point_idx2, 1); + int p2_w = wire_index(point_idx2, 2); + + // Count live channels between points + int ncount_live_u = count_live_channels_between( + std::min(p1_u, p2_u), std::max(p1_u, p2_u), live_u_index); + int ncount_live_v = count_live_channels_between( + std::min(p1_v, p2_v), std::max(p1_v, p2_v), live_v_index); + int ncount_live_w = count_live_channels_between( + std::min(p1_w, p2_w), std::max(p1_w, p2_w), live_w_index); + + // Calculate boundary metric + double value; + if (flag_cosmic) { + value = fabs(p1.x() - p2.x()) / units::mm + + std::abs(p1_u - p2_u) * 1.0 + ncount_live_u * 1.0 + + std::abs(p1_v - p2_v) * 1.0 + ncount_live_v * 1.0 + + std::abs(p1_w - p2_w) * 1.0 + ncount_live_w * 1.0 + + sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)) / units::mm; + } else { + value = std::abs(p1.x() - p2.x()) / distance_norm + + std::abs(p1_u - p2_u) * 0.0 + ncount_live_u * 1.0 + + std::abs(p1_v - p2_v) * 0.0 + ncount_live_v * 1.0 + + std::abs(p1_w - p2_w) * 0.0 + ncount_live_w * 1.0; + } + + return value; +} - const auto* p = node()->parent; - if (!p) return flash; - const auto* g = p->value.facade(); - if (!g) return flash; - const int flash_index = get_scalar("flash", -1); +void Cluster::build_steiner_kd_cache(const std::string& steiner_pc_name) const +{ + // Get the steiner point cloud + if (!has_pc(steiner_pc_name)) { + raise("Steiner point cloud '%s' not found", steiner_pc_name); + } + + auto& steiner_pc = get_pc(steiner_pc_name); + auto& cache_ref = const_cast(cache()); + + // Create a MultiQuery from the dataset - this builds the k-d tree internally + // Note: const_cast is needed because MultiQuery constructor requires non-const reference + // but query operations don't modify the dataset + cache_ref.steiner_kd = std::make_unique(const_cast(steiner_pc)); + + const auto& coords = get_default_scope().coords; + // Get a 3D query object for x,y,z coordinates and cache it + cache_ref.steiner_query3d = cache_ref.steiner_kd->get(coords); + + // Cache the name and mark as built + cache_ref.cached_steiner_pc_name = steiner_pc_name; + cache_ref.steiner_kd_built = true; +} - //std::cout << "Test3 " << flash_index << std::endl; +void Cluster::ensure_steiner_kd_cache(const std::string& steiner_pc_name) const +{ + const auto& cache_ref = cache(); - if (flash_index < 0) { - return flash; + // Check if cache is valid for this steiner_pc_name + if (cache_ref.steiner_kd_built && cache_ref.steiner_kd && cache_ref.steiner_query3d && + cache_ref.cached_steiner_pc_name == steiner_pc_name) { + return; // Cache is valid } - if (! g->has_pc("flash")) { - return flash; + + // Rebuild the cache + build_steiner_kd_cache(steiner_pc_name); +} + +Cluster::steiner_kd_results_t Cluster::kd_steiner_radius(double radius_not_squared, + const geo_point_t& query_point, + const std::string& steiner_pc_name) const +{ + ensure_steiner_kd_cache(steiner_pc_name); + + const auto& cache_ref = cache(); + + // Convert geo_point_t to std::vector for the query + std::vector query_vec = {query_point.x(), query_point.y(), query_point.z()}; + + // Perform radius query using cached query3d (note: radius expects squared radius) + auto kd_results = cache_ref.steiner_query3d->radius(radius_not_squared * radius_not_squared, query_vec); + + // Convert Results to std::vector> + steiner_kd_results_t results; + results.reserve(kd_results.index.size()); + for (size_t i = 0; i < kd_results.index.size(); ++i) { + results.emplace_back(kd_results.index[i], kd_results.distance[i]); } - flash.m_valid = true; - - // These are kind of inefficient as we get the "flash" PC each time. - flash.m_time = g->get_element("flash", "time", flash_index, 0); - flash.m_value = g->get_element("flash", "value", flash_index, 0); - flash.m_ident = g->get_element("flash", "ident", flash_index, -1); - flash.m_type = g->get_element("flash", "type", flash_index, -1); + + return results; +} - // std::cout << "Test3: " << g->has_pc("flash") << " " << g->has_pc("light") << " " << g->has_pc("flashlight") << " " << flash_index << " " << flash.m_time << std::endl; +Cluster::steiner_kd_results_t Cluster::kd_steiner_knn(int nnearest, + const geo_point_t& query_point, + const std::string& steiner_pc_name) const +{ + ensure_steiner_kd_cache(steiner_pc_name); + + const auto& cache_ref = cache(); + + // Convert geo_point_t to std::vector for the query + std::vector query_vec = {query_point.x(), query_point.y(), query_point.z()}; + + // std::cout << "Performing k-NN query for " << nnearest << " nearest neighbors to point: " + // << query_vec[0] << ", " << query_vec[1] << ", " << query_vec[2] << std::endl; - if (!(g->has_pc("light") && g->has_pc("flashlight"))) { - return flash; // valid, but no vector info. + // Perform k-NN query using cached query3d + auto kd_results = cache_ref.steiner_query3d->knn(nnearest, query_vec); + + // Convert Results to std::vector> + steiner_kd_results_t results; + results.reserve(kd_results.index.size()); + for (size_t i = 0; i < kd_results.index.size(); ++i) { + results.emplace_back(kd_results.index[i], kd_results.distance[i]); } - // These are spans. We walk the fl to look up in the l. - const auto fl_flash = g->get_pcarray("flash", "flashlight"); - const auto fl_light = g->get_pcarray("light", "flashlight"); - const auto l_times = g->get_pcarray("time", "light"); - const auto l_values = g->get_pcarray("value", "light"); - const auto l_errors = g->get_pcarray("error", "light"); + return results; +} - // std::cout << "Test3: " << fl_flash.size() << " " << fl_light.size() << std::endl; +std::vector >> Cluster::kd_steiner_points(const steiner_kd_results_t& res, + const std::string& steiner_pc_name) const +{ + if (!has_pc(steiner_pc_name)) { + raise("Steiner point cloud '%s' not found", steiner_pc_name); + } + + auto& steiner_pc = get_pc(steiner_pc_name); + const auto& scope = get_default_scope(); + auto x_array = steiner_pc.get(scope.coords.at(0)); + auto y_array = steiner_pc.get(scope.coords.at(1)); + auto z_array = steiner_pc.get(scope.coords.at(2)); - const size_t nfl = fl_light.size(); - for (size_t ifl = 0; ifl < nfl; ++ifl) { - if (fl_flash[ifl] != flash_index) continue; - const int light_index = fl_light[ifl]; - - flash.m_times.push_back(l_times[light_index]); - flash.m_values.push_back(l_values[light_index]); - flash.m_errors.push_back(l_errors[light_index]); + auto wpid_array = steiner_pc.get("wpid"); + auto flag_steiner_terminal_array = steiner_pc.get("flag_steiner_terminal"); + + std::vector >> points; + points.reserve(res.size()); + + // std::cout << x_array->size_major() << " " << wpid_array->size_major() << " " << flag_steiner_terminal_array->size_major() << std::endl; + + for (const auto& [index, distance] : res) { + double x = x_array->element(index); + double y = y_array->element(index); + double z = z_array->element(index); + auto wpid = wpid_array->element(index); + int flag_steiner_terminal = flag_steiner_terminal_array->element(index); + + points.emplace_back(geo_point_t{x, y, z}, std::make_pair(wpid, flag_steiner_terminal)); } - return flash; + + return points; } + // Local Variables: // mode: c++ // c-basic-offset: 4 diff --git a/clus/src/Facade_Ensemble.cxx b/clus/src/Facade_Ensemble.cxx new file mode 100644 index 000000000..abb48dc5c --- /dev/null +++ b/clus/src/Facade_Ensemble.cxx @@ -0,0 +1,85 @@ +#include "WireCellClus/Facade_Grouping.h" + +#include "WireCellClus/Facade_Ensemble.h" + +using namespace WireCell::Clus::Facade; + +bool Ensemble::has(const std::string& name) const +{ + std::vector ret; + for (const auto* child : children()) { + if (name == child->get_name()) { + return true; + } + } + return false; +} + +std::vector Ensemble::names() const +{ + std::vector ret; + for (const auto* child : children()) { + ret.push_back(child->get_name()); + } + return ret; +} + +std::set Ensemble::unique_names() const +{ + std::set ret; + for (const auto* child : children()) { + ret.insert(child->get_name()); + } + return ret; +} + + +std::vector Ensemble::with_name(const std::string& name) const +{ + std::vector ret; + for (const auto* child : children()) { + if (child->get_name() == name) { + ret.push_back(child); + } + } + return ret; +} +std::vector Ensemble::with_name(const std::string& name) +{ + std::vector ret; + for (auto* child : children()) { + if (child->get_name() == name) { + ret.push_back(child); + } + } + return ret; +} + +Grouping& Ensemble::make_grouping(const std::string& name) +{ + auto* pnode = m_node->insert(); + Grouping* grouping = pnode->value.facade(); + grouping->set_name(name); + return *grouping; +} + +Grouping& Ensemble::add_grouping_node(const std::string& name, points_t::node_ptr&& gnode) +{ + auto* pnode = m_node->insert(std::move(gnode)); + Grouping* grouping = pnode->value.facade(); + grouping->set_name(name); + return *grouping; +} + +std::map Ensemble::groupings_by_name() +{ + std::map ret; + for (auto* child : children()) { + auto name = child->get_name(); + if (ret.find(name) == ret.end()) { + ret[name] = child; + } + } + return ret; +} + diff --git a/clus/src/Facade_Grouping.cxx b/clus/src/Facade_Grouping.cxx index dc1e8b0e4..0ea3f8ba6 100644 --- a/clus/src/Facade_Grouping.cxx +++ b/clus/src/Facade_Grouping.cxx @@ -1,11 +1,13 @@ #include "WireCellClus/Facade_Blob.h" #include "WireCellClus/Facade_Cluster.h" #include "WireCellClus/Facade_Grouping.h" +#include "WireCellAux/PlaneTools.h" #include using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; using namespace WireCell::PointCloud; -using namespace WireCell::PointCloud::Facade; // using WireCell::PointCloud::Dataset; using namespace WireCell::PointCloud::Tree; // for "Points" node value type // using WireCell::PointCloud::Tree::named_pointclouds_t; @@ -50,104 +52,246 @@ std::string Facade::dump(const Facade::Grouping& grouping, int level) } +// static std::tuple parse_dead_winds(const std::string& ds_name) { +// int apa, face; +// char plane; +// // Use sscanf to extract the numbers and the plane letter. +// // The format string must match the structure of ds_name. +// if (std::sscanf(ds_name.c_str(), "dead_winds_a%df%dp%c", &apa, &face, &plane) != 3) { +// throw std::runtime_error("Failed to parse string: " + ds_name); +// } +// // Convert the plane letter to an index. +// int plane_index = -1; +// switch (plane) { +// case 'U': plane_index = 0; break; +// case 'V': plane_index = 1; break; +// case 'W': plane_index = 2; break; +// default: +// throw std::runtime_error("Unexpected plane letter in: " + ds_name); +// } +// return std::make_tuple(apa, face, plane_index); +// } + void Grouping::on_construct(node_type* node) { this->NaryTree::Facade::on_construct(node); - const auto& lpcs = m_node->value.local_pcs(); - /// FIXME: use fixed numbers? - std::set faces = {0, 1}; - std::set planes = {0, 1, 2}; - for (const int face : faces) { - for (const int plane : planes) { - const std::string ds_name = String::format("dead_winds_f%dp%d", face, plane); - if (lpcs.find(ds_name) == lpcs.end()) continue; - const auto& pc_dead_winds = lpcs.at(ds_name); - const auto& xbeg = pc_dead_winds.get("xbeg")->elements(); - const auto& xend = pc_dead_winds.get("xend")->elements(); - const auto& wind = pc_dead_winds.get("wind")->elements(); - for (size_t i = 0; i < xbeg.size(); ++i) { - m_dead_winds[face][plane][wind[i]] = {xbeg[i], xend[i]}; - } - } + + + // const auto& lpcs = m_node->value.local_pcs(); + // for (const auto& [name, pc_dead_winds] : lpcs) { + // // std::cout << "Grouping::on_construct: name=" << name << std::endl; + // if (name.find("dead_winds") != std::string::npos) { + // const auto& xbeg = pc_dead_winds.get("xbeg")->elements(); + // const auto& xend = pc_dead_winds.get("xend")->elements(); + // const auto& wind = pc_dead_winds.get("wind")->elements(); + // auto [apa, face, plane] = parse_dead_winds(name); + // for (size_t i = 0; i < xbeg.size(); ++i) { + // m_dead_winds[apa][face][plane][wind[i]] = {xbeg[i], xend[i]}; + // } + // // std::cout << "Xin on construct " << nchildren() << " " << apa << " " << face << " " << plane << " " + // // << xbeg.size() << " " << xend.size() << " " << wind.size() << std::endl; + // } + // } + + // for (const int face : faces) { + // for (const int plane : planes) { + // const std::string ds_name = String::format("dead_winds_f%dp%d", face, plane); + // if (lpcs.find(ds_name) == lpcs.end()) continue; + // const auto& pc_dead_winds = lpcs.at(ds_name); + // const auto& xbeg = pc_dead_winds.get("xbeg")->elements(); + // const auto& xend = pc_dead_winds.get("xend")->elements(); + // const auto& wind = pc_dead_winds.get("wind")->elements(); + // for (size_t i = 0; i < xbeg.size(); ++i) { + // m_dead_winds[face][plane][wind[i]] = {xbeg[i], xend[i]}; + // } + // } + // } +} + + +void Facade::Grouping::from(const Grouping& other) +{ + m_anodes = other.m_anodes; + m_dv = other.m_dv; +} + + +void Facade::Grouping::enumerate_idents(const std::string& sort_order, int id) +{ + if (sort_order.empty() or sort_order == "none") { + return; + } + + auto clusters = children(); + + if (sort_order == "size") { + sort_clusters(clusters); + std::reverse(clusters.begin(), clusters.end()); // largest first + } + // Other order is "tree" which means, leave as-is. + + // Count IDs starting with initial value of "id". + for (auto* cluster : children()) { + cluster->set_ident(id++); } } +std::map Grouping::separate( + Cluster*& cluster, + const std::vector groups, + bool remove, + bool notify_value) +{ + const int ident = cluster->ident(); + auto ret = this->NaryTree::FacadeParent::separate(cluster, groups, false, notify_value); + + // Clear cache of original cluster after separation + if (!remove) { + cluster->invalidate_cache(); + } + + for (auto& [_, c] : ret) { + c->set_ident(ident); + c->from(*cluster); + } + + if(remove){ + // Remove the original cluster from the grouping. + this->destroy_child(cluster, notify_value); + } + return ret; +} + void Grouping::fill_cache(GroupingCache& gc) const { { // In pre-cached code this was Grouping::fill_proj_centers_pitch_mags() const const int ndummy_layers = 2; - if (!m_anode) { + if (m_anodes.size()==0) { raise("anode is null"); } - for (const auto& face : m_anode->faces()) { - // std::cout<< "fill_cache: anode ident" << m_anode->ident() << " face ident " << face->ident() << " face which " << face->which() << std::endl; - const auto& coords = face->raygrid(); - // skip dummy layers so the vector matches 0, 1, 2 plane order - for (int layer=ndummy_layers; layerident()][layer-ndummy_layers] = proj_center; - gc.pitch_mags[face->ident()][layer-ndummy_layers] = coords.pitch_mags()[layer]; + for (const auto& [ident, anode] : m_anodes) { + for (const auto& face : anode->faces()) { + const auto& coords = face->raygrid(); + // skip dummy layers so the vector matches 0, 1, 2 plane order + for (int layer=ndummy_layers; layerident()][face->ident()][layer - ndummy_layers] = proj_center; + gc.pitch_mags[anode->ident()][face->ident()][layer - ndummy_layers] = coords.pitch_mags()[layer]; + } } } } + + { + for (size_t iclus = 0; iclus != children().size(); iclus++) { + const Cluster* cluster = children().at(iclus); + const auto& wpids = cluster->wpids_blob(); + gc.cluster_wpids.insert(wpids.begin(), wpids.end()); + } + // for (const auto wpid : gc.cluster_wpids) { + // std::cout << "Grouping::fill_cache wpid: " << wpid.name() << std::endl; + // } + } + + // fill cache related to the detector volume + // for (const auto wpid : gc.dv_wpids) { + // std::cout << "DEBUG Grouping::fill_cache wpid: " << wpid.name() << std::endl; + // } + fill_dv_cache(gc); + // for (const auto wpid : gc.dv_wpids) { + // std::cout << "DEBUG Grouping::fill_cache wpid: " << wpid.name() << std::endl; + // } + } +void Grouping::fill_dv_cache(GroupingCache& gc) const +{ + if (m_dv != nullptr) { + for (auto& [wpid_ident, iface] : m_dv->wpident_faces()) { + const WirePlaneId wpid(wpid_ident); + // std::cout << "DEBUG Grouping::fill_dv_cache wpid: " << wpid.name() << std::endl; + gc.dv_wpids.insert(wpid); + // std::cout << "DEBUG Grouping::fill_dv_cache gc.dv_wpids.size() " << gc.dv_wpids.size() << std::endl; + int face = wpid.face(); + int apa = wpid.apa(); + // int plane = wpid.index(); + // std::cout << "Test: " << apa << " " << face << " " << plane << " " << kAllLayers << " " << m_dv << std::endl; + WirePlaneId wpid_all(kAllLayers, face, apa); + gc.map_time_offset[apa][face] = m_dv->metadata(wpid_all)["time_offset"].asDouble(); + gc.map_drift_speed[apa][face] = m_dv->metadata(wpid_all)["drift_speed"].asDouble(); + gc.map_tick[apa][face] = m_dv->metadata(wpid_all)["tick"].asDouble(); + + // Create wpids for all three planes with the same APA and face + WirePlaneId wpid_u(kUlayer, face, apa); + WirePlaneId wpid_v(kVlayer, face, apa); + WirePlaneId wpid_w(kWlayer, face, apa); + + // Get wire directions for all planes + Vector wire_dir_u = m_dv->wire_direction(wpid_u); + Vector wire_dir_v = m_dv->wire_direction(wpid_v); + Vector wire_dir_w = m_dv->wire_direction(wpid_w); + + // Calculate angles + gc.map_wire_angles[apa][face][0] = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + gc.map_wire_angles[apa][face][1] = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + gc.map_wire_angles[apa][face][2] = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + gc.map_drift_dir[apa][face] = m_dv->face_dirx(wpid); + + gc.map_nticks_per_slice[apa][face] = m_dv->metadata(wpid_all)["nticks_live_slice"].asInt(); + + // std::cout << "Test: " << gc.map_time_offset[apa][face] << " " << gc.map_drift_speed[apa][face] << " " << gc.map_tick[apa][face] << " " << gc.map_drift_dir[apa][face] << std::endl; + } + // for (auto wpid : gc.dv_wpids) { + // std::cout << "DEBUG Grouping::fill_dv_cache gc.dv_wpids wpid: " << wpid.name() << std::endl; + // } + // double time_offset = m_dv->metadata(wpid_all)["time_offset"].asDouble(); + // std::map > > map_time_offset; + // std::map > > map_drift_speed; + // std::map > > map_tick; + } +} -void Grouping::set_params(const WireCell::Configuration& cfg) { - m_tp.face = get(cfg, "face", m_tp.face); - m_tp.pitch_u = get(cfg, "pitch_u", m_tp.pitch_u); - m_tp.pitch_v = get(cfg, "pitch_v", m_tp.pitch_v); - m_tp.pitch_w = get(cfg, "pitch_w", m_tp.pitch_w); - m_tp.angle_u = get(cfg, "angle_u", m_tp.angle_u); - m_tp.angle_v = get(cfg, "angle_v", m_tp.angle_v); - m_tp.angle_w = get(cfg, "angle_w", m_tp.angle_w); - m_tp.drift_speed = get(cfg, "drift_speed", m_tp.drift_speed); - m_tp.tick = get(cfg, "tick", m_tp.tick); - m_tp.tick_drift = get(cfg, "tick_drift", m_tp.tick_drift); - m_tp.time_offset = get(cfg, "time_offset", m_tp.time_offset); - m_tp.nticks_live_slice = get(cfg, "nticks_live_slice", m_tp.nticks_live_slice); - m_tp.FV_xmin = get(cfg, "FV_xmin", m_tp.FV_xmin); - m_tp.FV_xmax = get(cfg, "FV_xmax", m_tp.FV_xmax); - m_tp.FV_ymin = get(cfg, "FV_ymin", m_tp.FV_ymin); - m_tp.FV_ymax = get(cfg, "FV_ymax", m_tp.FV_ymax); - m_tp.FV_zmin = get(cfg, "FV_zmin", m_tp.FV_zmin); - m_tp.FV_zmax = get(cfg, "FV_zmax", m_tp.FV_zmax); - m_tp.FV_xmin_margin = get(cfg, "FV_xmin_margin", m_tp.FV_xmin_margin); - m_tp.FV_xmax_margin = get(cfg, "FV_xmax_margin", m_tp.FV_xmax_margin); - m_tp.FV_ymin_margin = get(cfg, "FV_ymin_margin", m_tp.FV_ymin_margin); - m_tp.FV_ymax_margin = get(cfg, "FV_ymax_margin", m_tp.FV_ymax_margin); - m_tp.FV_zmin_margin = get(cfg, "FV_zmin_margin", m_tp.FV_zmin_margin); - m_tp.FV_zmax_margin = get(cfg, "FV_zmax_margin", m_tp.FV_zmax_margin); + +void Grouping::set_anodes(const std::vector& anodes) { + for (auto anode : anodes) { + m_anodes[anode->ident()] = anode; + } +} + +const IAnodePlane::pointer Grouping::get_anode(const int ident) const { + if (m_anodes.find(ident) == m_anodes.end()) { + raise("anode %d not found", ident); + } + return m_anodes.at(ident); } size_t Grouping::hash() const { std::size_t h = 0; - boost::hash_combine(h, m_tp.pitch_u); - boost::hash_combine(h, m_tp.pitch_v); - boost::hash_combine(h, m_tp.pitch_w); - boost::hash_combine(h, m_tp.angle_u); - boost::hash_combine(h, m_tp.angle_v); - boost::hash_combine(h, m_tp.angle_w); - boost::hash_combine(h, m_tp.tick_drift); + for (auto wpid : cache().dv_wpids) { + boost::hash_combine(h, wpid.ident()); + } auto clusters = children(); // copy vector - sort_clusters(clusters); + // sort_clusters(clusters); for (const Cluster* cluster : clusters) { boost::hash_combine(h, cluster->hash()); } return h; } -const Grouping::kd2d_t& Grouping::kd2d(const int face, const int pind) const +const Grouping::kd2d_t& Grouping::kd2d(const int apa, const int face, const int pind) const { - const auto sname = String::format("ctpc_f%dp%d", face, pind); + std::vector plane_names = {"U", "V", "W"}; + const auto sname = String::format("ctpc_a%df%dp%d",apa, face, plane_names[pind]); + // const auto sname = String::format("ctpc_f%dp%d", face, pind); Tree::Scope scope = {sname, {"x", "y"}, 1}; const auto& sv = m_node->value.scoped_view(scope); // std::cout << "sname: " << sname << " npoints: " << sv.kd().npoints() << std::endl; @@ -155,13 +299,13 @@ const Grouping::kd2d_t& Grouping::kd2d(const int face, const int pind) const } -bool Grouping::is_good_point(const geo_point_t& point, const int face, double radius, int ch_range, int allowed_bad) const { +bool Grouping::is_good_point(const geo_point_t& point, const int apa, const int face, double radius, int ch_range, int allowed_bad) const { const int nplanes = 3; int matched_planes = 0; for (int pind = 0; pind < nplanes; ++pind) { - if (get_closest_points(point, radius, face, pind).size() > 0) { + if (get_closest_points(point, radius, apa, face, pind).size() > 0) { matched_planes++; - } else if (get_closest_dead_chs(point, ch_range, face, pind)) { + } else if (get_closest_dead_chs(point, ch_range, apa, face, pind)) { matched_planes++; } } @@ -172,7 +316,7 @@ bool Grouping::is_good_point(const geo_point_t& point, const int face, double ra return false; } -bool Grouping::is_good_point_wc(const geo_point_t& point, const int face, double radius, int ch_range, int allowed_bad) const +bool Grouping::is_good_point_wc(const geo_point_t& point, const int apa, const int face, double radius, int ch_range, int allowed_bad) const { const int nplanes = 3; int matched_planes = 0; @@ -180,10 +324,10 @@ bool Grouping::is_good_point_wc(const geo_point_t& point, const int face, double // Loop through U,V,W planes for (int pind = 0; pind < nplanes; pind++) { int weight = (pind == 2) ? 2 : 1; // W plane counts double - if (get_closest_points(point, radius, face, pind).size() > 0) { + if (get_closest_points(point, radius, apa, face, pind).size() > 0) { matched_planes += weight; } - else if (get_closest_dead_chs(point, ch_range, face, pind)) { + else if (get_closest_dead_chs(point, ch_range, apa, face, pind)) { matched_planes += weight; } } @@ -191,7 +335,7 @@ bool Grouping::is_good_point_wc(const geo_point_t& point, const int face, double return matched_planes >= 4 - allowed_bad; } -std::vector Grouping::test_good_point(const geo_point_t& point, const int face, +std::vector Grouping::test_good_point(const geo_point_t& point, const int apa, const int face, double radius, int ch_range) const { std::vector num_planes(6, 0); // Initialize with 6 zeros @@ -199,7 +343,7 @@ std::vector Grouping::test_good_point(const geo_point_t& point, const int f // Check each plane (0,1,2) for (int pind = 0; pind < 3; ++pind) { // Get closest points for this plane - const auto closest_pts = get_closest_points(point, radius, face, pind); + const auto closest_pts = get_closest_points(point, radius, apa, face, pind); if (closest_pts.size() > 0) { // Has hits in this plane @@ -207,7 +351,7 @@ std::vector Grouping::test_good_point(const geo_point_t& point, const int f } else { // No hits, check if it's in dead region - if (get_closest_dead_chs(point, ch_range, face, pind)) { + if (get_closest_dead_chs(point, ch_range, apa, face, pind)) { num_planes[pind + 3]++; } } @@ -217,14 +361,14 @@ std::vector Grouping::test_good_point(const geo_point_t& point, const int f return num_planes; } -double Facade::Grouping::get_ave_3d_charge(const geo_point_t& point, const double radius, const int face) const { +double Facade::Grouping::get_ave_3d_charge(const geo_point_t& point, const int apa, const int face, const double radius) const { double charge = 0; int ncount = 0; const int nplanes = 3; // Check all three planes for (int pind = 0; pind < nplanes; ++pind) { - if (!get_closest_dead_chs(point, 1, face, pind)) { - charge += get_ave_charge(point, radius, face, pind); + if (!get_closest_dead_chs(point, 1, apa, face, pind)) { + charge += get_ave_charge(point, apa, face, pind, radius); ncount++; } } @@ -235,15 +379,17 @@ double Facade::Grouping::get_ave_3d_charge(const geo_point_t& point, const doubl return charge; } -double Facade::Grouping::get_ave_charge(const geo_point_t& point, const double radius, const int face, const int pind) const { +double Facade::Grouping::get_ave_charge(const geo_point_t& point, const int apa, const int face, const int pind, const double radius) const { double sum_charge = 0; double ncount = 0; // Get closest points within radius - auto nearby_points = get_closest_points(point, radius, face, pind); + auto nearby_points = get_closest_points(point, radius, apa, face, pind); // Access the charge information from ctpc dataset - const std::string ds_name = String::format("ctpc_f%dp%d", face, pind); + std::vector plane_names = {"U", "V", "W"}; + const std::string ds_name = String::format("ctpc_a%df%dp%d",apa, face, plane_names[pind]); + // const std::string ds_name = String::format("ctpc_f%dp%d", face, pind); const auto& local_pcs = m_node->value.local_pcs(); if (local_pcs.find(ds_name) == local_pcs.end()) { @@ -267,20 +413,20 @@ double Facade::Grouping::get_ave_charge(const geo_point_t& point, const double r -Grouping::kd_results_t Grouping::get_closest_points(const geo_point_t& point, const double radius, const int face, +Grouping::kd_results_t Grouping::get_closest_points(const geo_point_t& point, const double radius, const int apa, const int face, int pind) const { double x = point[0]; - const auto [angle_u,angle_v,angle_w] = wire_angles(); + const auto [angle_u,angle_v,angle_w] = wire_angles(apa, face); std::vector angles = {angle_u, angle_v, angle_w}; double y = cos(angles[pind]) * point[2] - sin(angles[pind]) * point[1]; - const auto& skd = kd2d(face, pind); + const auto& skd = kd2d(apa, face, pind); return skd.radius>(radius * radius, {x, y}); } -bool Grouping::get_closest_dead_chs(const geo_point_t& point, const int ch_range, const int face, int pind) const { - const auto [tind, wind] = convert_3Dpoint_time_ch(point, face, pind); - const auto& ch2xrange = get_dead_winds(face, pind); +bool Grouping::get_closest_dead_chs(const geo_point_t& point, const int ch_range, const int apa, const int face, int pind) const { + const auto [tind, wind] = convert_3Dpoint_time_ch(point, apa, face, pind); + const auto& ch2xrange = get_dead_winds(apa, face, pind); for (int ch = wind - ch_range; ch <= wind + ch_range; ++ch) { if (ch2xrange.find(ch) == ch2xrange.end()) continue; const auto [xmin, xmax] = ch2xrange.at(ch); @@ -292,57 +438,64 @@ bool Grouping::get_closest_dead_chs(const geo_point_t& point, const int ch_range return false; } -std::tuple Grouping::convert_3Dpoint_time_ch(const geo_point_t& point, const int face, const int pind) const { - if (m_anode == nullptr) { +std::tuple Grouping::convert_3Dpoint_time_ch(const geo_point_t& point, const int apa, const int face, const int pind) const { + if (m_anodes.size()==0) { raise("Anode is null"); } - const auto& iface = m_anode->face(face); + const auto iface = m_anodes.at(apa)->faces()[face]; if (iface == nullptr) { - raise("anode %d has no face %d", m_anode->ident(), face); + raise("anode %d has no face %d", m_anodes.at(apa)->ident(), face); } - const auto [angle_u,angle_v,angle_w] = wire_angles(); + const auto [angle_u,angle_v,angle_w] = wire_angles(apa, face); std::vector angles = {angle_u, angle_v, angle_w}; const double angle = angles[pind]; - const double pitch = pitch_mags().at(face).at(pind); - const double center = proj_centers().at(face).at(pind); + const double pitch = pitch_mags().at(apa).at(face).at(pind); + const double center = proj_centers().at(apa).at(face).at(pind); // std::cout << "Test: " << pitch/units::cm << " " << center/units::cm << std::endl; const int wind = point2wind(point, angle, pitch, center); - const auto params = get_params(); + // const auto params = get_params(); + double time_offset = cache().map_time_offset.at(apa).at(face); + double drift_speed = cache().map_drift_speed.at(apa).at(face); + double tick = cache().map_tick.at(apa).at(face); //std::cout << "Test: " << params.time_offset/units::us << " " << params.drift_speed/(units::mm/units::us) << " " << point[0] << std::endl; - const double time = drift2time(iface, params.time_offset, params.drift_speed, point[0]); - const int tind = std::round(time / params.tick); + const double time = drift2time(iface, time_offset, drift_speed, point[0]); + const int tind = std::round(time / tick); return {tind, wind}; } -std::pair Grouping::convert_time_ch_2Dpoint(const int timeslice, const int channel, const int face, const int plane) const +std::pair Grouping::convert_time_ch_2Dpoint(const int timeslice, const int channel, const int apa, const int face, const int plane) const { - if (m_anode == nullptr) { + if (m_anodes.size() == 0) { raise("Anode is null"); } - const auto& iface = m_anode->face(face); + const auto iface = m_anodes.at(apa)->faces()[face]; if (iface == nullptr) { - raise("anode %d has no face %d", m_anode->ident(), face); + raise("anode %d has no face %d", m_anodes.at(apa)->ident(), face); } const int nplanes = 3; - const auto params = get_params(); + // const auto params = get_params(); const auto& pitch_mags = this->pitch_mags(); const auto& proj_centers = this->proj_centers(); + double time_offset = cache().map_time_offset.at(apa).at(face); + double drift_speed = cache().map_drift_speed.at(apa).at(face); + double tick = cache().map_tick.at(apa).at(face); + // Convert time to x position - const double x = time2drift(iface, params.time_offset, params.drift_speed, timeslice * params.tick); + const double x = time2drift(iface, time_offset, drift_speed, timeslice * tick); // Get y position based on channel and plane double y; if (plane >= 0 && plane < nplanes) { - const double pitch = pitch_mags.at(face).at(plane); - const double center = proj_centers.at(face).at(plane); + const double pitch = pitch_mags.at(apa).at(face).at(plane); + const double center = proj_centers.at(apa).at(face).at(plane); y = pitch * (channel+0.5) + center; } else { @@ -353,100 +506,111 @@ std::pair Grouping::convert_time_ch_2Dpoint(const int timeslice, } -size_t Grouping::get_num_points(const int face, const int pind) const { - const auto sname = String::format("ctpc_f%dp%d", face, pind); +size_t Grouping::get_num_points(const int apa, const int face, const int pind) const { + std::vector plane_names = {"U", "V", "W"}; + const auto sname = String::format("ctpc_a%df%dp%d",apa, face, plane_names[pind]); + // const auto sname = String::format("ctpc_f%dp%d", face, pind); Tree::Scope scope = {sname, {"x", "y"}, 1}; const auto& sv = m_node->value.scoped_view(scope); return sv.npoints(); } -std::vector> Facade::Grouping::get_overlap_dead_chs(const int min_time, const int max_time, - const int min_ch, const int max_ch, const int face, const int pind, const bool flag_ignore_time) const +std::vector > Facade::Grouping::get_overlap_dead_chs(const int min_time, const int max_time, + const int min_ch, const int max_ch, const int apa, const int face, const int pind, const bool flag_ignore_time) const { - if (!m_anode) { - raise("anode is null"); - } - const auto& params = get_params(); - - // Convert time to position - const double min_xpos = time2drift(m_anode->face(face), params.time_offset, params.drift_speed, min_time); - const double max_xpos = time2drift(m_anode->face(face), params.time_offset, params.drift_speed, max_time); - - std::set dead_chs; - const auto& dead_winds = get_dead_winds(face, pind); + auto results = get_all_dead_chs(apa, face, pind); + std::set overlap_results; - // Find overlapping dead channels - for (const auto& [wind, xrange] : dead_winds) { - const int temp_ch = wind; - const double temp_min_xpos = xrange.first; - const double temp_max_xpos = xrange.second; + for (auto& [ch, xrange] : results) { + int min_time_ch = xrange.first; + int max_time_ch = xrange.second; - if (flag_ignore_time) { - if (temp_ch >= min_ch && temp_ch <= max_ch) { - dead_chs.insert(temp_ch); - } + // Check if the channel is within the specified range + if (ch < min_ch || ch >= max_ch) { + continue; } - else { - if (temp_ch >= min_ch && temp_ch <= max_ch && - max_xpos >= temp_min_xpos && min_xpos <= temp_max_xpos) { - dead_chs.insert(temp_ch); - } + + // Check if the time range overlaps with the specified time range + if (flag_ignore_time || (min_time_ch < max_time && max_time_ch > min_time)) { + // Adjust time range to be within the specified bounds + // int overlap_min = std::max(min_time, min_time_ch); + // int overlap_max = std::min(max_time, max_time_ch); + // if (flag_ignore_time) { + // // If ignoring time, just use the channel range + // overlap_min = min_time; + // overlap_max = max_time; + // } + // overlap_results[ch] = std::make_pair(overlap_min, overlap_max); + overlap_results.insert(ch); // Store the channel that overlaps } } - // Convert set of channels to ranges - std::vector> dead_ch_range; - for (const auto ch : dead_chs) { - if (dead_ch_range.empty()) { - dead_ch_range.push_back(std::make_pair(ch, ch)); - } - else { - if (ch - dead_ch_range.back().second == 1) { - dead_ch_range.back().second = ch; + std::vector> overlap_ranges; + if (!overlap_results.empty()) { + int range_start = -1; + int prev_ch = -2; + for (int ch : overlap_results) { + if (range_start == -1) { + range_start = ch; } - else { - dead_ch_range.push_back(std::make_pair(ch, ch)); + else if (ch != prev_ch + 1) { + overlap_ranges.emplace_back(range_start, prev_ch + 1); + range_start = ch; } + prev_ch = ch; } + overlap_ranges.emplace_back(range_start, prev_ch + 1); } + return overlap_ranges; - return dead_ch_range; } -std::map> Facade::Grouping::get_all_dead_chs(const int face, const int pind, int expand) const +// channel -> [min_time, max_time) +std::map> Facade::Grouping::get_all_dead_chs(const int apa, const int face, const int pind, int expand) const { std::map> results; - const auto& dead_winds = get_dead_winds(face, pind); + const auto& dead_winds = get_dead_winds(apa, face, pind); + + double time_offset = cache().map_time_offset.at(apa).at(face); + double drift_speed = cache().map_drift_speed.at(apa).at(face); + + double tick = cache().map_tick.at(apa).at(face); // Add entries for this face/plane's dead channels for (const auto& [wind, xrange] : dead_winds) { int temp_ch = wind; // Convert position range to time ticks using drift parameters - int min_time = std::round(drift2time(m_anode->face(face), - m_tp.time_offset, - m_tp.drift_speed, - xrange.first)) - expand; - int max_time = std::round(drift2time(m_anode->face(face), - m_tp.time_offset, - m_tp.drift_speed, - xrange.second)) + expand; + int min_time = std::round(drift2time(m_anodes.at(apa)->faces()[face], + time_offset, + drift_speed, + xrange.first)/tick); + int max_time = std::round(drift2time(m_anodes.at(apa)->faces()[face], + time_offset, + drift_speed, + xrange.second)/tick); - results[temp_ch] = std::make_pair(std::min(min_time, max_time), std::max(min_time, max_time)); + results[temp_ch] = std::make_pair(std::min(min_time, max_time)-expand, std::max(min_time, max_time)+1+expand); } return results; } std::map, std::pair> Facade::Grouping::get_overlap_good_ch_charge( - int min_time, int max_time, int min_ch, int max_ch, + int min_time, int max_time, int min_ch, int max_ch, const int apa, const int face, const int pind) const { std::map, std::pair> map_time_ch_charge; // Get the point cloud for this face/plane - const std::string ds_name = String::format("ctpc_f%dp%d", face, pind); + std::vector plane_names = {"U", "V", "W"}; + const std::string ds_name = String::format("ctpc_a%df%dp%d",apa, face, plane_names[pind]); + + // std::cout << "Xin1: " << apa << " " << face << " " << ds_name << std::endl; + + + // const std::string ds_name = String::format("ctpc_f%dp%d", face, pind); if (m_node->value.local_pcs().find(ds_name) == m_node->value.local_pcs().end()) { return map_time_ch_charge; // Return empty if dataset not found } @@ -457,10 +621,13 @@ std::map, std::pair> Facade::Grouping::get_ove const auto& charge = ctpc.get("charge")->elements(); const auto& charge_err = ctpc.get("charge_err")->elements(); + // std::cout << "Xin1: " << slice_index.size() << " " << wind.size() << " " + // << charge.size() << " " << charge_err.size() << std::endl; + // Fill the map for points within the specified window for (size_t i = 0; i < slice_index.size(); ++i) { - if (slice_index[i] >= min_time && slice_index[i] <= max_time && - wind[i] >= min_ch && wind[i] <= max_ch) { + if (slice_index[i] >= min_time && slice_index[i] < max_time && + wind[i] >= min_ch && wind[i] < max_ch) { map_time_ch_charge[std::make_pair(slice_index[i], wind[i])] = std::make_pair(charge[i], charge_err[i]); } @@ -470,17 +637,226 @@ std::map, std::pair> Facade::Grouping::get_ove } +void Grouping::build_wire_cache(int apa, int face, int plane) const { + auto& gc = this->cache(); + auto& cache = gc.wire_caches[apa][face]; + + if (cache.cached[plane]) return; // Already built + + // Build charge cache from CTPC data + std::vector plane_names = {"U", "V", "W"}; + const std::string ctpc_name = String::format("ctpc_a%df%dp%d", apa, face, plane_names[plane]); + + // std::cout << "Xin: " << apa << " " << face << " " << plane << " " << ctpc_name << std::endl; + + const auto& local_pcs = m_node->value.local_pcs(); + if (local_pcs.find(ctpc_name) != local_pcs.end()) { + const auto& ctpc = local_pcs.at(ctpc_name); + const auto& slice_indices = ctpc.get("slice_index")->elements(); + const auto& wire_indices = ctpc.get("wind")->elements(); + const auto& charges = ctpc.get("charge")->elements(); + const auto& charge_errs = ctpc.get("charge_err")->elements(); + + // std::cout << "Xin: " << slice_indices.size() << " " << wire_indices.size() << " " << charges.size() << " " << charge_errs.size() << std::endl; + + // Populate charge cache + for (size_t i = 0; i < slice_indices.size(); ++i) { + int time_slice = slice_indices[i]; + int wire_index = wire_indices[i]; + double charge = charges[i]; + double uncertainty = charge_errs[i]; + + cache.charge_data[plane][time_slice][wire_index] = {charge, uncertainty}; + } + } + + // Build dead wires cache from dead_winds data using x positions + std::vector plane_chars = {"U", "V", "W"}; + const std::string dead_name = String::format("dead_winds_a%df%dp%d", apa, face, plane_chars[plane]); + + if (local_pcs.find(dead_name) != local_pcs.end()) { + const auto& dead_winds = local_pcs.at(dead_name); + const auto& xbeg = dead_winds.get("xbeg")->elements(); + const auto& xend = dead_winds.get("xend")->elements(); + const auto& wind = dead_winds.get("wind")->elements(); + + + // std::cout << "Xin: " << dead_name << " " << xbeg.size() << " " << xend.size() << " " << wind.size() << std::endl; + + // Populate dead wires cache with x positions + for (size_t i = 0; i < xbeg.size(); ++i) { + int wire_index = wind[i]; + double start_x = xbeg[i]; + double end_x = xend[i]; + cache.dead_wires[plane][wire_index] = {start_x, end_x}; + } + } + + cache.cached[plane] = true; +} + +std::pair Grouping::get_wire_charge(int apa, int face, int plane, + int wire_index, int time_slice) const { + // Ensure cache is built for this APA/face/plane + build_wire_cache(apa, face, plane); + + auto& gc = this->cache(); + const auto& cache = gc.wire_caches[apa][face]; + + // Look up charge data + auto time_it = cache.charge_data[plane].find(time_slice); + if (time_it == cache.charge_data[plane].end()) { + return {0.0, 1e12}; // No data for this time slice + } + + auto wire_it = time_it->second.find(wire_index); + if (wire_it == time_it->second.end()) { + return {0.0, 1e12}; // No data for this wire + } + + return wire_it->second; +} + +bool Grouping::is_wire_dead(int apa, int face, int plane, + int wire_index, int time_slice) const { + // Ensure cache is built for this APA/face/plane + build_wire_cache(apa, face, plane); + + auto& gc = this->cache(); + const auto& cache = gc.wire_caches[apa][face]; + + + + + // Look up dead wire x position range + auto wire_it = cache.dead_wires[plane].find(wire_index); + if (wire_it == cache.dead_wires[plane].end()) { + return false; // No dead x range for this wire + } + + // std::cout << "apa: " << apa << " face: " << face << " plane: " << plane << " wire_index: " << wire_index << " time_slice: " << time_slice << " " << cache.dead_wires[plane].size() << " " << cache.dead_wires[plane].begin()->first << " " << cache.dead_wires[plane].rbegin()->first << std::endl; + + // Convert time_slice to x position + double time_offset = gc.map_time_offset.at(apa).at(face); + double drift_speed = gc.map_drift_speed.at(apa).at(face); + double tick = gc.map_tick.at(apa).at(face); + auto iface = m_anodes.at(apa)->faces()[face]; + + double time = time_slice * tick; + double x_position = time2drift(iface, time_offset, drift_speed, time); + + // std::cout << "Wire dead check: apa=" << apa << ", face=" << face + // << ", plane=" << plane << ", wire_index=" << wire_index + // << ", time_slice=" << time_slice + // << ", x_position=" << x_position + // << ", range=(" << wire_it->second.first + // << ", " << wire_it->second.second << ")" << std::endl; + + // Check if x position falls within dead wire range + const auto& [start_x, end_x] = wire_it->second; + return (x_position >= start_x && x_position <= end_x); +} + + +const std::map>>>& Grouping::all_dead_winds() const { + // Since we can't return a reference to a temporary, we need a static/member variable + // Option 1: Use a mutable member to cache the reconstructed data + static thread_local std::map>>> reconstructed_dead_winds; + reconstructed_dead_winds.clear(); + + // Get all known APA/face combinations from existing cache data + auto& gc = this->cache(); + + // First, build cache for all known APA/face/plane combinations + // We can get these from cluster_wpids or dv_wpids + for (const auto& wpid : gc.cluster_wpids) { + int apa = wpid.apa(); + int face = wpid.face(); + int plane = wpid.index(); + if (plane < 3) { // Skip kAllLayers + build_wire_cache(apa, face, plane); + } + } + + // Now reconstruct the old format from cached data + for (const auto& [apa, face_map] : gc.wire_caches) { + for (const auto& [face, cache] : face_map) { + for (int plane = 0; plane < 3; ++plane) { + if (cache.cached[plane]) { + // Convert unordered_map to map for compatibility + for (const auto& [wire_idx, wire_range] : cache.dead_wires[plane]) { + reconstructed_dead_winds[apa][face][plane][wire_idx] = wire_range; + } + } + } + } + } + + return reconstructed_dead_winds; +} + +// Updated get_dead_winds() function +std::map>& Grouping::get_dead_winds(const int apa, const int face, const int pind) const { + // Build cache for this specific APA/face/plane + build_wire_cache(apa, face, pind); + + auto& gc = this->cache(); + auto& cache = gc.wire_caches[apa][face]; + + // Return reference to the cached dead wires for this plane + return cache.dead_wires[pind]; +} + void Grouping::clear_cache() const { - this->Mixin::clear_cache(); - + this->Mixins::Cached::clear_cache(); // This is utterly broken. #381. - m_dead_winds.clear(); + // m_dead_winds.clear(); + + +} +bool Grouping::is_blob_plane_bad(const Blob* blob, int plane, double cut_ratio) const { + const Cluster* cluster_ptr = blob->cluster(); + if (!cluster_ptr) return true; + + const Grouping* grouping = cluster_ptr->grouping(); + if (!grouping) return true; + + const auto wpid_val = blob->wpid(); + const int apa = wpid_val.apa(); + const int face = wpid_val.face(); + const int time_slice = blob->slice_index_min(); + + // Get wire ranges + int wire_min, wire_max; + switch (plane) { + case 0: wire_min = blob->u_wire_index_min(); wire_max = blob->u_wire_index_max(); break; + case 1: wire_min = blob->v_wire_index_min(); wire_max = blob->v_wire_index_max(); break; + case 2: wire_min = blob->w_wire_index_min(); wire_max = blob->w_wire_index_max(); break; + default: return true; + } + + if (wire_min >= wire_max) return true; + + // Count dead wires + int num_dead_wire = 0; + for (int wire_index = wire_min; wire_index < wire_max; wire_index++) { + if (grouping->is_wire_dead(apa, face, plane, wire_index, time_slice)) { + num_dead_wire++; + if (num_dead_wire > 1 && num_dead_wire >= cut_ratio * (wire_max - wire_min)) { + // If too many dead wires, consider the plane bad + return true; + } + } + } + + return false; } + // Local Variables: // mode: c++ // c-basic-offset: 4 diff --git a/clus/src/Facade_Mixins.cxx b/clus/src/Facade_Mixins.cxx new file mode 100644 index 000000000..16011671e --- /dev/null +++ b/clus/src/Facade_Mixins.cxx @@ -0,0 +1,60 @@ +#include "WireCellClus/Facade_Mixins.h" + +using namespace WireCell::Clus::Facade; + +bool Mixins::Graphs::has_graph(const std::string& name) const +{ + return m_graph_store.find(name) != m_graph_store.end(); +} + +Mixins::Graphs::graph_type& Mixins::Graphs::make_graph(const std::string& name, size_t nvertices) +{ + m_graph_store[name] = graph_type(nvertices); + return m_graph_store[name]; +} + +Mixins::Graphs::graph_type& Mixins::Graphs::give_graph(const std::string& name, Mixins::Graphs::graph_type&& gr) +{ + auto it = m_graph_store.find(name); + if (it != m_graph_store.end()) { + m_graph_store.erase(it); + } + auto it2 = m_graph_store.emplace(name, std::move(gr)); + return it2.first->second; +} + +Mixins::Graphs::graph_type& Mixins::Graphs::get_graph(const std::string& name) +{ + auto it = m_graph_store.find(name); + if (it == m_graph_store.end()) { + return make_graph(name); + } + return it->second; +} + +const Mixins::Graphs::graph_type& Mixins::Graphs::get_graph(const std::string& name) const +{ + auto it = m_graph_store.find(name); + if (it == m_graph_store.end()) { + raise("no graph with name " + name); + } + return it->second; +} + +Mixins::Graphs::graph_type Mixins::Graphs::take_graph(const std::string& name) +{ + // not actually in C++17 for GCC at least? + // auto entry = m_graph_store.extract(name); + // if (entry) { + // return std::move(entry.value()); + // } + auto it = m_graph_store.find(name); + if (it == m_graph_store.end()) { + return graph_type{}; + } + auto g = std::move(it->second); + m_graph_store.erase(it); + return g; +} + + diff --git a/clus/src/Facade_Summary.cxx b/clus/src/Facade_Summary.cxx index 37a9e2eba..a84a9bb9a 100644 --- a/clus/src/Facade_Summary.cxx +++ b/clus/src/Facade_Summary.cxx @@ -5,7 +5,7 @@ using namespace WireCell; using WireCell::PointCloud::json_summary; Configuration -PointCloud::Facade::json_summary(const PointCloud::Facade::Grouping& grp) +Clus::Facade::json_summary(const Clus::Facade::Grouping& grp) { Configuration ret; ret["type"] = "Grouping"; @@ -20,14 +20,14 @@ PointCloud::Facade::json_summary(const PointCloud::Facade::Grouping& grp) return ret; } -Configuration PointCloud::Facade::json_summary(const PointCloud::Facade::Cluster& cls) +Configuration Clus::Facade::json_summary(const Clus::Facade::Cluster& cls) { Configuration ret; // this is too huge to be exhaustive ret["type"] = "Cluster"; ret["hash"] = cls.hash(); ret["length"] = cls.get_length(); - ret["num_slices"] = cls.get_num_time_slices(); + // ret["num_slices"] = cls.get_num_time_slices(); ret["value"] = WireCell::PointCloud::json_summary(cls.value(), false); for (const auto* cf : cls.children()) { ret["clusters"].append(json_summary(*cf)); @@ -35,12 +35,12 @@ Configuration PointCloud::Facade::json_summary(const PointCloud::Facade::Cluster return ret; } -Configuration PointCloud::Facade::json_summary(const PointCloud::Facade::Blob& blb) +Configuration Clus::Facade::json_summary(const Clus::Facade::Blob& blb) { Configuration ret; ret["type"] = "Blob"; ret["hash"] = blb.hash(); - ret["face"] = blb.face(); + ret["face"] = blb.wpid().face(); ret["npoints"] = blb.npoints(); ret["charge"] = blb.charge(); ret["center_x"] = blb.center_x(); diff --git a/clus/src/Facade_Util.cxx b/clus/src/Facade_Util.cxx index 2caae0517..620f1aced 100644 --- a/clus/src/Facade_Util.cxx +++ b/clus/src/Facade_Util.cxx @@ -4,7 +4,10 @@ using namespace WireCell; using namespace WireCell::PointCloud; -using namespace WireCell::PointCloud::Facade; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Graphs; + +using namespace WireCell::Clus::Facade; // using WireCell::PointCloud::Dataset; using namespace WireCell::PointCloud::Tree; // for "Points" node value type // using WireCell::PointCloud::Tree::named_pointclouds_t; @@ -12,6 +15,7 @@ using namespace WireCell::PointCloud::Tree; // for "Points" node value type #include "WireCellUtil/Logging.h" using spdlog::debug; + // int global_counter_get_closest_wcpoint = 0; // #define __DEBUG__ @@ -47,9 +51,7 @@ namespace { } #endif -void Facade::process_mst_deterministically( - const boost::adjacency_list>& temp_graph, +void Facade::process_mst_deterministically(const Weighted::Graph& temp_graph, std::vector>>& index_index_dis, std::vector>>& index_index_dis_mst) { @@ -87,7 +89,7 @@ void Facade::process_mst_deterministically( // Use minimum vertex as root size_t root_vertex = comp_vertices[0]; - std::vector::vertex_descriptor> predecessors(num_vertices(temp_graph)); + std::vector predecessors(num_vertices(temp_graph)); prim_minimum_spanning_tree(temp_graph, &predecessors[0], boost::root_vertex(root_vertex)); @@ -291,290 +293,220 @@ std::ostream& Facade::operator<<(std::ostream& os, const Multi2DPointCloud& m2dp return os; } -Facade::DynamicPointCloud::DynamicPointCloud(const double angle_u, const double angle_v, const double angle_w) - : m_pc2d(angle_u, angle_v, angle_w) -{ -} - -void Facade::DynamicPointCloud::add_points(const Cluster* cluster, const int flag, const double step) -{ - // size_t current_size = get_num_points(); - const auto& winds = cluster->wire_indices(); - - if (flag == 0) { - // add actual points in - // WCP::WCPointCloud& pcloud = cluster->get_point_cloud()->get_cloud(); - // WCP::WC2DPointCloud& pcloud_u = cluster->get_point_cloud()->get_cloud_u(); - // WCP::WC2DPointCloud& pcloud_v = cluster->get_point_cloud()->get_cloud_v(); - // WCP::WC2DPointCloud& pcloud_w = cluster->get_point_cloud()->get_cloud_w(); - - // cloud.pts.resize(current_size + pcloud.pts.size()); - // cloud_u.pts.resize(current_size + pcloud.pts.size()); - // cloud_v.pts.resize(current_size + pcloud.pts.size()); - // cloud_w.pts.resize(current_size + pcloud.pts.size()); - // vec_index_cluster.resize(current_size + pcloud.pts.size()); - - for (int i = 0; i != cluster->npoints(); i++) { - // vec_index_cluster.at(current_size + i) = cluster; - m_clusters.push_back(cluster); - m_pc3d.add({cluster->point3d(i).x(), cluster->point3d(i).y(), cluster->point3d(i).z()}); - m_pc2d.add(cluster->point3d(i)); - for (size_t plane = 0; plane < 3; ++plane) { - m_winds[plane].push_back(winds[plane][i]); - } - m_blobs.push_back(cluster->blob_with_point(i)); - - // cloud.pts[current_size + i].x = pcloud.pts.at(i).x; - // cloud.pts[current_size + i].y = pcloud.pts.at(i).y; - // cloud.pts[current_size + i].z = pcloud.pts.at(i).z; - // cloud.pts[current_size + i].index_u = pcloud.pts.at(i).index_u; - // cloud.pts[current_size + i].index_v = pcloud.pts.at(i).index_v; - // cloud.pts[current_size + i].index_w = pcloud.pts.at(i).index_w; - // cloud.pts[current_size + i].mcell = pcloud.pts.at(i).mcell; - // cloud.pts[current_size + i].index = current_size + i; - // cloud_u.pts[current_size + i].x = pcloud_u.pts.at(i).x; - // cloud_u.pts[current_size + i].y = pcloud_u.pts.at(i).y; - // cloud_u.pts[current_size + i].index = current_size + i; - // cloud_v.pts[current_size + i].x = pcloud_v.pts.at(i).x; - // cloud_v.pts[current_size + i].y = pcloud_v.pts.at(i).y; - // cloud_v.pts[current_size + i].index = current_size + i; - // cloud_w.pts[current_size + i].x = pcloud_w.pts.at(i).x; - // cloud_w.pts[current_size + i].y = pcloud_w.pts.at(i).y; - // cloud_w.pts[current_size + i].index = current_size + i; - } - // if (pcloud.pts.size() > 0) { - // index->addPoints(current_size, current_size + pcloud.pts.size() - 1); - // index_u->addPoints(current_size, current_size + pcloud.pts.size() - 1); - // index_v->addPoints(current_size, current_size + pcloud.pts.size() - 1); - // index_w->addPoints(current_size, current_size + pcloud.pts.size() - 1); - // } - } - else { - // add skeleton points in - const std::list& path_wcps = cluster->get_path_wcps(); - - std::vector pts; - geo_point_t prev_wcp = cluster->point3d(path_wcps.front()); - for (auto it = path_wcps.begin(); it != path_wcps.end(); it++) { - geo_point_t test_point = cluster->point3d(*it); - double dis = - sqrt(pow(test_point.x() - prev_wcp.x(), 2) + pow(test_point.y() - prev_wcp.y(), 2) + pow(test_point.z() - prev_wcp.z(), 2)); - if (dis <= step) { - // geo_point_t current_pt((*it).x(), (*it).y(), (*it).z()); - pts.push_back(test_point); - } - else { - int num_points = int(dis / (step)) + 1; - // double dis_seg = dis / num_points; - for (int k = 0; k != num_points; k++) { - geo_point_t current_pt(prev_wcp.x() + (k + 1.) / num_points * (test_point.x() - prev_wcp.x()), - prev_wcp.y() + (k + 1.) / num_points * (test_point.y() - prev_wcp.y()), - prev_wcp.z() + (k + 1.) / num_points * (test_point.z() - prev_wcp.z())); - pts.push_back(current_pt); - } - } - prev_wcp = test_point; - } - - // cloud.pts.resize(current_size + pts.size()); - // cloud_u.pts.resize(current_size + pts.size()); - // cloud_v.pts.resize(current_size + pts.size()); - // cloud_w.pts.resize(current_size + pts.size()); - // vec_index_cluster.resize(current_size + pts.size()); - // int i = 0; - for (auto it = pts.begin(); it != pts.end(); it++) { - m_clusters.push_back(cluster); - m_blobs.push_back(nullptr); - m_pc3d.add({(*it).x(), (*it).y(), (*it).z()}); - m_pc2d.add((*it)); - for (size_t plane = 0; plane < 3; ++plane) { - m_winds[plane].push_back(2.4 * units::cm); - } +// Facade::DynamicPointCloudLegacy::DynamicPointCloudLegacy(const double angle_u, const double angle_v, const double angle_w) +// : m_pc2d(angle_u, angle_v, angle_w) +// { +// } - // vec_index_cluster.at(current_size + i) = cluster; - // cloud.pts[current_size + i].x = (*it).x; - // cloud.pts[current_size + i].y = (*it).y; - // cloud.pts[current_size + i].z = (*it).z; - // cloud.pts[current_size + i].index_u = 2.4 * units::cm; - // cloud.pts[current_size + i].index_v = 2.4 * units::cm; - // cloud.pts[current_size + i].index_w = 2.4 * units::cm; - // cloud.pts[current_size + i].mcell = 0; - // cloud.pts[current_size + i].index = current_size + i; - // cloud_u.pts[current_size + i].x = (*it).x; - // cloud_u.pts[current_size + i].y = cos(angle_u) * (*it).z - sin(angle_u) * (*it).y; - // cloud_u.pts[current_size + i].index = current_size + i; - // cloud_v.pts[current_size + i].x = (*it).x; - // cloud_v.pts[current_size + i].y = cos(angle_v) * (*it).z - sin(angle_v) * (*it).y; - // cloud_v.pts[current_size + i].index = current_size + i; - // cloud_w.pts[current_size + i].x = (*it).x; - // cloud_w.pts[current_size + i].y = cos(angle_w) * (*it).z - sin(angle_w) * (*it).y; - // cloud_w.pts[current_size + i].index = current_size + i; - - // i++; - } - // if (pts.size() > 0) { - // index->addPoints(current_size, current_size + pts.size() - 1); - // index_u->addPoints(current_size, current_size + pts.size() - 1); - // index_v->addPoints(current_size, current_size + pts.size() - 1); - // index_w->addPoints(current_size, current_size + pts.size() - 1); - // } - } - LogDebug("add_points: " << m_pc3d.get_num_points() << " " << m_pc2d.get_num_points() << " " << m_clusters.size() << " " << m_blobs.size() << " " << m_winds[0].size()); -} +// void Facade::DynamicPointCloudLegacy::add_points(const Cluster* cluster, const int flag, const double step) +// { +// // size_t current_size = get_num_points(); +// const auto& winds = cluster->wire_indices(); + +// if (flag == 0) { +// for (int i = 0; i != cluster->npoints(); i++) { +// // vec_index_cluster.at(current_size + i) = cluster; +// m_clusters.push_back(cluster); +// m_pc3d.add({cluster->point3d(i).x(), cluster->point3d(i).y(), cluster->point3d(i).z()}); +// m_pc2d.add(cluster->point3d(i)); +// for (size_t plane = 0; plane < 3; ++plane) { +// m_winds[plane].push_back(winds[plane][i]); +// } +// m_blobs.push_back(cluster->blob_with_point(i)); +// } +// } +// else { +// // add skeleton points in +// const std::list& path_wcps = cluster->get_path_wcps(); + +// std::vector pts; +// geo_point_t prev_wcp = cluster->point3d(path_wcps.front()); +// for (auto it = path_wcps.begin(); it != path_wcps.end(); it++) { +// geo_point_t test_point = cluster->point3d(*it); +// double dis = +// sqrt(pow(test_point.x() - prev_wcp.x(), 2) + pow(test_point.y() - prev_wcp.y(), 2) + pow(test_point.z() - prev_wcp.z(), 2)); +// if (dis <= step) { +// // geo_point_t current_pt((*it).x(), (*it).y(), (*it).z()); +// pts.push_back(test_point); +// } +// else { +// int num_points = int(dis / (step)) + 1; +// // double dis_seg = dis / num_points; +// for (int k = 0; k != num_points; k++) { +// geo_point_t current_pt(prev_wcp.x() + (k + 1.) / num_points * (test_point.x() - prev_wcp.x()), +// prev_wcp.y() + (k + 1.) / num_points * (test_point.y() - prev_wcp.y()), +// prev_wcp.z() + (k + 1.) / num_points * (test_point.z() - prev_wcp.z())); +// pts.push_back(current_pt); +// } +// } +// prev_wcp = test_point; +// } +// for (auto it = pts.begin(); it != pts.end(); it++) { +// m_clusters.push_back(cluster); +// m_blobs.push_back(nullptr); +// m_pc3d.add({(*it).x(), (*it).y(), (*it).z()}); +// m_pc2d.add((*it)); +// for (size_t plane = 0; plane < 3; ++plane) { +// m_winds[plane].push_back(2.4 * units::cm); +// } +// } +// } +// LogDebug("add_points: " << m_pc3d.get_num_points() << " " << m_pc2d.get_num_points() << " " << m_clusters.size() << " " << m_blobs.size() << " " << m_winds[0].size()); +// } -void Facade::DynamicPointCloud::add_points(const Cluster* cluster, const geo_point_t& p_test, - const geo_point_t& dir_unmorm, const double range, - const double step, const double angle) -{ - // size_t current_size = get_num_points(); - geo_point_t dir = dir_unmorm.norm(); - - int num_points = int(range / (step)) + 1; - double dis_seg = range / num_points; - - /// TODO: resize is faster, but needs more interface implementation - for (int k = 0; k != num_points; k++) { - // 13 cm = 75 * sin(10/180.*3.1415926) - double dis_cut = - std::min(std::max(2.4 * units::cm, k * dis_seg * sin(angle / 180. * 3.1415926)), 13 * units::cm); - m_clusters.push_back(cluster); - m_blobs.push_back(nullptr); - m_pc3d.add({p_test.x() + k * dir.x() * dis_seg, p_test.y() + k * dir.y() * dis_seg, - p_test.z() + k * dir.z() * dis_seg}); - m_winds[0].push_back(int(dis_cut)); - m_winds[1].push_back(int(dis_cut)); - m_winds[2].push_back(int(dis_cut)); - m_pc2d.add({p_test.x() + k * dir.x() * dis_seg, p_test.y() + k * dir.y() * dis_seg, - p_test.z() + k * dir.z() * dis_seg}); - } -} +// void Facade::DynamicPointCloudLegacy::add_points(const Cluster* cluster, const geo_point_t& p_test, +// const geo_point_t& dir_unmorm, const double range, +// const double step, const double angle) +// { +// // size_t current_size = get_num_points(); +// geo_point_t dir = dir_unmorm.norm(); + +// int num_points = int(range / (step)) + 1; +// double dis_seg = range / num_points; + +// /// TODO: resize is faster, but needs more interface implementation +// for (int k = 0; k != num_points; k++) { +// // 13 cm = 75 * sin(10/180.*3.1415926) +// double dis_cut = +// std::min(std::max(2.4 * units::cm, k * dis_seg * sin(angle / 180. * 3.1415926)), 13 * units::cm); +// m_clusters.push_back(cluster); +// m_blobs.push_back(nullptr); +// m_pc3d.add({p_test.x() + k * dir.x() * dis_seg, p_test.y() + k * dir.y() * dis_seg, +// p_test.z() + k * dir.z() * dis_seg}); +// m_winds[0].push_back(int(dis_cut)); +// m_winds[1].push_back(int(dis_cut)); +// m_winds[2].push_back(int(dis_cut)); +// m_pc2d.add({p_test.x() + k * dir.x() * dis_seg, p_test.y() + k * dir.y() * dis_seg, +// p_test.z() + k * dir.z() * dis_seg}); +// } +// } -std::vector> Facade::DynamicPointCloud::get_2d_points_info( - const geo_point_t& p, const double radius, const int plane) -{ - std::vector> results = m_pc2d.get_closest_2d_index_radius(p, radius, plane); - std::vector> return_results; +// std::vector> Facade::DynamicPointCloudLegacy::get_2d_points_info( +// const geo_point_t& p, const double radius, const int plane) +// { +// std::vector> results = m_pc2d.get_closest_2d_index_radius(p, radius, plane); +// std::vector> return_results; - for (size_t i = 0; i != results.size(); i++) { - return_results.push_back(std::make_tuple(sqrt(results.at(i).second), m_clusters.at(results.at(i).first), - (size_t)results.at(i).first)); - } +// for (size_t i = 0; i != results.size(); i++) { +// return_results.push_back(std::make_tuple(sqrt(results.at(i).second), m_clusters.at(results.at(i).first), +// (size_t)results.at(i).first)); +// } - return return_results; -} +// return return_results; +// } -std::tuple Facade::DynamicPointCloud::get_closest_2d_point_info( - const geo_point_t& p, const int plane) -{ - std::vector> results = m_pc2d.get_closest_2d_index_knn(p, 1, plane); - std::vector> return_results; - if (results.size() != 1) { - return std::make_tuple(1e9, nullptr, -1); - } - // const auto p3d = m_pc3d.point(results.at(0).first); - // const auto cluster = m_clusters.at(results.at(0).first); - // LogDebug(" 3d " << p3d << " " << results.at(0).second); - // LogDebug(" cluster.npoints() " << cluster->npoints()); - return std::make_tuple(sqrt(results.at(0).second), m_clusters.at(results.at(0).first), (size_t)results.at(0).first); -} +// std::tuple Facade::DynamicPointCloudLegacy::get_closest_2d_point_info( +// const geo_point_t& p, const int plane) +// { +// std::vector> results = m_pc2d.get_closest_2d_index_knn(p, 1, plane); +// std::vector> return_results; +// if (results.size() != 1) { +// return std::make_tuple(1e9, nullptr, -1); +// } +// // const auto p3d = m_pc3d.point(results.at(0).first); +// // const auto cluster = m_clusters.at(results.at(0).first); +// // LogDebug(" 3d " << p3d << " " << results.at(0).second); +// // LogDebug(" cluster.npoints() " << cluster->npoints()); +// return std::make_tuple(sqrt(results.at(0).second), m_clusters.at(results.at(0).first), (size_t)results.at(0).first); +// } -#include -#include -namespace bh = boost::histogram; -namespace bha = boost::histogram::algorithm; +// #include +// #include +// namespace bh = boost::histogram; +// namespace bha = boost::histogram::algorithm; -// Example parameter calculating functions used by directional hough -// transforms. -static double theta_angle(const Vector& dir) -{ - const Vector Z(0, 0, 1); - return acos(Z.dot(dir)); -} -// static double theta_cosine(const Vector& dir) +// // Example parameter calculating functions used by directional hough +// // transforms. +// static double theta_angle(const Vector& dir) // { // const Vector Z(0, 0, 1); -// return Z.dot(dir); +// return acos(Z.dot(dir)); +// } +// // static double theta_cosine(const Vector& dir) +// // { +// // const Vector Z(0, 0, 1); +// // return Z.dot(dir); +// // } +// static double phi_angle(const Vector& dir) +// { +// const Vector X(1, 0, 0); +// const Vector Y(0, 1, 0); +// return atan2(Y.dot(dir), X.dot(dir)); // } -static double phi_angle(const Vector& dir) -{ - const Vector X(1, 0, 0); - const Vector Y(0, 1, 0); - return atan2(Y.dot(dir), X.dot(dir)); -} - -std::pair Facade::DynamicPointCloud::hough_transform(const geo_point_t& origin, const double dis) const -{ - std::vector pts; - std::vector blobs; - auto results = m_pc3d.kd().radius(dis * dis, origin); - for (const auto& [point_index, _] : results) { - pts.push_back(m_pc3d.point(point_index)); - blobs.push_back(m_blobs.at(point_index)); - } - - constexpr double pi = 3.141592653589793; - - using direction_parameter_function_f = std::function; - - // Parameter axis 1 is some measure of theta angle (angle or cosine) - const int nbins1 = 180; - // param_space == costh_phi - direction_parameter_function_f theta_param = theta_angle; - double min1 = 0, max1 = pi; - - // Parameter axis 2 is only supported by phi angle - const int nbins2 = 360; - const double min2 = -pi; - const double max2 = +pi; - direction_parameter_function_f phi_param = phi_angle; - - auto hist = bh::make_histogram(bh::axis::regular<>(nbins1, min1, max1), bh::axis::regular<>(nbins2, min2, max2)); - - for (size_t ind = 0; ind < blobs.size(); ++ind) { - const auto* blob = blobs[ind]; - auto charge = blob->charge(); - // protection against the charge=0 case ... - // if (charge == 0) charge = 1; - if (charge <= 0) continue; - - const auto npoints = blob->npoints(); - const auto& pt = pts[ind]; - - const Vector dir = (pt - origin).norm(); - const double r = (pt - origin).magnitude(); - - const double p1 = theta_param(dir); - const double p2 = phi_param(dir); - if (r < 10 * units::cm) { - hist(p1, p2, bh::weight(charge / npoints)); - } - else { - // hough->Fill(vec.Theta(), vec.Phi(), q * pow(10 * units::cm / r, 2)); - hist(p1, p2, bh::weight(charge / npoints * pow(10 * units::cm / r, 2))); - } - } - auto indexed = bh::indexed(hist); - auto it = std::max_element(indexed.begin(), indexed.end()); - const auto& cell = *it; - return {cell.bin(0).center(), cell.bin(1).center()}; -} +// std::pair Facade::DynamicPointCloudLegacy::hough_transform(const geo_point_t& origin, const double dis) const +// { +// std::vector pts; +// std::vector blobs; +// auto results = m_pc3d.kd().radius(dis * dis, origin); +// for (const auto& [point_index, _] : results) { +// pts.push_back(m_pc3d.point(point_index)); +// blobs.push_back(m_blobs.at(point_index)); +// } + +// constexpr double pi = 3.141592653589793; + +// using direction_parameter_function_f = std::function; + +// // Parameter axis 1 is some measure of theta angle (angle or cosine) +// const int nbins1 = 180; +// // param_space == costh_phi +// direction_parameter_function_f theta_param = theta_angle; +// double min1 = 0, max1 = pi; + +// // Parameter axis 2 is only supported by phi angle +// const int nbins2 = 360; +// const double min2 = -pi; +// const double max2 = +pi; +// direction_parameter_function_f phi_param = phi_angle; + +// auto hist = bh::make_histogram(bh::axis::regular<>(nbins1, min1, max1), bh::axis::regular<>(nbins2, min2, max2)); + +// for (size_t ind = 0; ind < blobs.size(); ++ind) { +// const auto* blob = blobs[ind]; +// auto charge = blob->charge(); +// // protection against the charge=0 case ... +// // if (charge == 0) charge = 1; +// if (charge <= 0) continue; + +// const auto npoints = blob->npoints(); +// const auto& pt = pts[ind]; + +// const Vector dir = (pt - origin).norm(); +// const double r = (pt - origin).magnitude(); + +// const double p1 = theta_param(dir); +// const double p2 = phi_param(dir); +// if (r < 10 * units::cm) { +// hist(p1, p2, bh::weight(charge / npoints)); +// } +// else { +// // hough->Fill(vec.Theta(), vec.Phi(), q * pow(10 * units::cm / r, 2)); +// hist(p1, p2, bh::weight(charge / npoints * pow(10 * units::cm / r, 2))); +// } +// } + +// auto indexed = bh::indexed(hist); +// auto it = std::max_element(indexed.begin(), indexed.end()); +// const auto& cell = *it; +// return {cell.bin(0).center(), cell.bin(1).center()}; +// } -geo_point_t Facade::DynamicPointCloud::vhough_transform(const geo_point_t& origin, const double dis) const -{ - // TODO: only support theta_phi - const auto [th, phi] = hough_transform(origin, dis); - return {sin(th) * cos(phi), sin(th) * sin(phi), cos(th)}; -} +// geo_point_t Facade::DynamicPointCloudLegacy::vhough_transform(const geo_point_t& origin, const double dis) const +// { +// // TODO: only support theta_phi +// const auto [th, phi] = hough_transform(origin, dis); +// return {sin(th) * cos(phi), sin(th) * sin(phi), cos(th)}; +// } // dirft = xorig + xsign * (time + m_time_offset) * m_drift_speed double Facade::time2drift(const IAnodeFace::pointer anodeface, const double time_offset, const double drift_speed, double time) { // std::cout << "time2drift: " << time << " " << time_offset << " " << drift_speed << std::endl; - const Pimpos* colpimpos = anodeface->planes()[2]->pimpos(); - double xsign = colpimpos->axis(0)[0]; + // const Pimpos* colpimpos = anodeface->planes()[2]->pimpos(); + double xsign = anodeface->dirx(); double xorig = anodeface->planes()[2]->wires().front()->center().x(); const double drift = (time + time_offset)*drift_speed; /// TODO: how to determine xsign? @@ -584,12 +516,11 @@ double Facade::time2drift(const IAnodeFace::pointer anodeface, const double time // time = (drift - xorig) / (xsign * m_drift_speed) - m_time_offset double Facade::drift2time(const IAnodeFace::pointer anodeface, const double time_offset, const double drift_speed, double drift) { - const Pimpos* colpimpos = anodeface->planes()[2]->pimpos(); - double xsign = colpimpos->axis(0)[0]; + // const Pimpos* colpimpos = anodeface->planes()[2]->pimpos(); + double xsign = anodeface->dirx(); double xorig = anodeface->planes()[2]->wires().front()->center().x(); return (drift - xorig) / (xsign * drift_speed) - time_offset; } - int Facade::point2wind(const geo_point_t& point, const double angle, const double pitch, const double center) { // double y = cos(angles[pind]) * point[2] - sin(angles[pind]) * point[1]; @@ -599,6 +530,55 @@ int Facade::point2wind(const geo_point_t& point, const double angle, const doubl return std::round(wind); } +double Facade::wind2point2dproj(const int wind, const double angle, const double pitch, const double center) +{ + // Reverse the calculation in point2wind: + return (wind + 0.5) * pitch + center; +} + + +WirePlaneId Facade::get_wireplaneid(const geo_point_t& point, const WirePlaneId& wpid1, const WirePlaneId& wpid2, IDetectorVolumes::pointer dv){ + if (wpid1 == wpid2) return wpid1; + + auto wpid = dv->contained_by(point); + + return wpid; +} + +WirePlaneId Facade::get_wireplaneid(const geo_point_t& p1, const WirePlaneId& wpid1, const geo_point_t& p2, const WirePlaneId& wpid2, IDetectorVolumes::pointer dv){ + if (wpid1 == wpid2) return wpid1; + // if the wpid1 != wpid2, find out the line p1-p2 intersects with wpid1, and wpid2, return the wpid for the longer one + + // Convert geo_point_t to WireCell::Point if needed + // Assuming geo_point_t is compatible with or can be converted to WireCell::Point + WireCell::Point point1 = p1; + WireCell::Point point2 = p2; + + // Create ray from p1 to p2 + WireCell::Ray ray(point1, point2); + + // Get bounding boxes for each wpid + WireCell::BoundingBox bb1 = dv->inner_bounds(wpid1); + WireCell::BoundingBox bb2 = dv->inner_bounds(wpid2); + + // Find intersections of ray with each bounding box + WireCell::Ray intersect1 = bb1.crop(ray); + WireCell::Ray intersect2 = bb2.crop(ray); + + // Calculate lengths of intersection segments + double length1 = WireCell::ray_length(intersect1); + double length2 = WireCell::ray_length(intersect2); + + // Return wpid corresponding to longer intersection + if (length1 >= length2) { + return wpid1; + } else { + return wpid2; + } +} + + + // Local Variables: // mode: c++ // c-basic-offset: 4 diff --git a/clus/src/FiducialUtils.cxx b/clus/src/FiducialUtils.cxx new file mode 100644 index 000000000..2638015b4 --- /dev/null +++ b/clus/src/FiducialUtils.cxx @@ -0,0 +1,177 @@ +#include "WireCellClus/FiducialUtils.h" +#include "WireCellClus/Facade_Cluster.h" + +namespace WireCell::Clus { + + + FiducialUtils::FiducialUtils(FiducialUtils::StaticData sd) + : m_sd(sd) + { + } + + void FiducialUtils::feed_static(FiducialUtils::StaticData sd) + { + m_internal = InternalData{}; // clear any previous internal data + m_sd = sd; + } + + void FiducialUtils::feed_dynamic(const FiducialUtils::DynamicData& dd) { + m_internal = InternalData{}; // clear any previous internal data + + // After the above reset, the rest of this method should be filled with + // code to derive whatever InternalData values from the static (m_sd) + // and dynamic data (dd) as needed to by the query methods. + + // For now, we simply set our place holder "dummy" to some meaningless value. + //m_internal.dummy = dd.live.nchildren() + dd.dead.nchildren(); + + m_internal.live = const_cast(&dd.live); + } + + + // query methods + + + bool FiducialUtils::inside_dead_region(const Point& p_raw, const int apa, const int face, const int minimal_views) const + { + // Convert 3D point to time and wire indices + const auto [tind_u, wind_u] = m_internal.live->convert_3Dpoint_time_ch(p_raw, apa, face, 0); + const auto [tind_v, wind_v] = m_internal.live->convert_3Dpoint_time_ch(p_raw, apa, face, 1); + const auto [tind_w, wind_w] = m_internal.live->convert_3Dpoint_time_ch(p_raw, apa, face, 2); + + int dead_view_count = 0; + + // Check each plane (U=0, V=1, W=2) + // Check if this wire at this time is dead + if (m_internal.live->is_wire_dead(apa, face, 0, wind_u, tind_u)) dead_view_count++; + if (m_internal.live->is_wire_dead(apa, face, 1, wind_v, tind_v)) dead_view_count++; + if (m_internal.live->is_wire_dead(apa, face, 2, wind_w, tind_w)) dead_view_count++; + + // std::map> dead_chs_u = m_internal.live->get_all_dead_chs(0,0,0); + // std::map> dead_chs_v = m_internal.live->get_all_dead_chs(0,0,1); + // std::map> dead_chs_w = m_internal.live->get_all_dead_chs(0,0,2); + + // std::cout << tind_u << " " << wind_u << " " << wind_v << " " << wind_w << " " << m_internal.live->is_wire_dead(apa, face, 0, wind_u, tind_u) << " " << m_internal.live->is_wire_dead(apa, face, 1, wind_v, tind_v) << " " << m_internal.live->is_wire_dead(apa, face, 2, wind_w, tind_w) << " " << dead_view_count << " " << dead_chs_u.size() << " " << dead_chs_v.size() << " " << dead_chs_w.size() << std::endl; + + // // Print dead channels for each view + // for (const auto& [ch, range] : dead_chs_u) { + // std::cout << "Dead channel (U) " << ch << ": " << range.first << " - " << range.second << std::endl; + // } + // for (const auto& [ch, range] : dead_chs_v) { + // std::cout << "Dead channel (V) " << ch << ": " << range.first << " - " << range.second << std::endl; + // } + // for (const auto& [ch, range] : dead_chs_w) { + // std::cout << "Dead channel (W) " << ch << ": " << range.first << " - " << range.second << std::endl; + // } + + // Return true if number of dead views >= minimal_views + return dead_view_count >= minimal_views; + } + + + bool FiducialUtils::inside_fiducial_volume(const Point& p, + const std::vector& tolerance_vec) const + { + // currently tolerance vector is not used ... + return m_sd.fiducial->contained(p); + } + + + bool FiducialUtils::check_dead_volume(const Facade::Cluster& main_cluster, const Point& p, const Vector& dir, double step, const double cut_ratio, const int cut_value) const + { + if (!inside_fiducial_volume(p)){ + return false; + }else{ + if (dir.magnitude()==0){ + return true; + }else{ + Vector normalized_dir = dir; + normalized_dir *= 1./dir.magnitude(); + Point temp_p = p; + int num_points = 0; + int num_points_dead = 0; + while(inside_fiducial_volume(temp_p)){ + + num_points ++; + // for the temp_p, find its apa, face, and raw_temp_p ... + auto test_wpid = m_sd.dv->contained_by(temp_p); + + if (test_wpid.apa() < 0 || test_wpid.face() < 0) { + num_points_dead ++; + }else{ + const auto transform = m_sd.pcts->pc_transform(main_cluster.get_scope_transform(main_cluster.get_default_scope())); + double cluster_t0 = main_cluster.get_cluster_t0(); + auto temp_p_raw = transform->backward(temp_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + + if (inside_dead_region(temp_p_raw, test_wpid.apa(), test_wpid.face())) num_points_dead ++; + } + if (num_points - num_points_dead >=cut_value) return true; + + temp_p.x(temp_p.x() + normalized_dir.x() * step); + temp_p.y(temp_p.y() + normalized_dir.y() * step); + temp_p.z(temp_p.z() + normalized_dir.z() * step); + + } + + if (num_points_dead > cut_ratio * num_points){ + return false; + }else{ + return true; + } + } + + } + } + + + + bool FiducialUtils::check_signal_processing(const Facade::Cluster& main_cluster, const Point& p, const Vector& dir, double step, const double cut_ratio, const int cut_value) const + { + if (dir.magnitude()==0){ + return true; + }else{ + Vector normalized_dir = dir; + normalized_dir *= 1./dir.magnitude(); + Point temp_p = p; + + int num_points = 0; + int num_points_dead = 0; + + while(inside_fiducial_volume(temp_p)){ + num_points ++; + auto test_wpid = m_sd.dv->contained_by(temp_p); + if (test_wpid.apa() < 0 || test_wpid.face() < 0) { // not in true volume ... + num_points_dead ++; + }else{ + // convert temp_p to raw point and find apa etc ... + auto transform = m_sd.pcts->pc_transform(main_cluster.get_scope_transform(main_cluster.get_default_scope())); + double cluster_t0 = main_cluster.get_cluster_t0(); + auto temp_p_raw = transform->backward(temp_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + + auto result_u = m_internal.live->get_closest_points(temp_p_raw,1.2*units::cm,test_wpid.apa(), test_wpid.face(), 0); + auto result_v = m_internal.live->get_closest_points(temp_p_raw,1.2*units::cm,test_wpid.apa(), test_wpid.face(), 1); + auto result_w = m_internal.live->get_closest_points(temp_p_raw,1.2*units::cm,test_wpid.apa(), test_wpid.face(), 2); + if (result_u.size() > 0 || result_v.size() > 0 || result_w.size() > 0 || inside_dead_region(temp_p_raw, test_wpid.apa(), test_wpid.face())) { + num_points_dead ++; + } + } + + if (num_points - num_points_dead >=cut_value) return true; + + temp_p.x(temp_p.x() + normalized_dir.x() * step); + temp_p.y(temp_p.y() + normalized_dir.y() * step); + temp_p.z(temp_p.z() + normalized_dir.z() * step); + } + + + if (num_points_dead > cut_ratio * num_points){ + return false; + }else{ + return true; + } + } + + return true; + } + +} diff --git a/clus/src/Graphs.cxx b/clus/src/Graphs.cxx new file mode 100644 index 000000000..89f85560f --- /dev/null +++ b/clus/src/Graphs.cxx @@ -0,0 +1,358 @@ +#include "WireCellUtil/GraphTools.h" +#include "WireCellClus/Graphs.h" +#include "PAAL.h" +#include + + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Graphs; +using WireCell::GraphTools::edge_range; + +Weighted::ShortestPaths::ShortestPaths(size_t source, const std::vector predecessors) + : m_source(source) + , m_predecessors(predecessors) +{ +} + +const std::vector& +Weighted::ShortestPaths::path(size_t destination) const +{ + auto& path = m_paths[destination]; // construct and hold + if (path.size()) { + return path; + } + + path.push_back(destination); + size_t prev = destination; + for (size_t vertex = m_predecessors[destination]; vertex != m_source; vertex = m_predecessors[vertex]) + { + path.push_back(vertex); + if (vertex == prev) { + break; + } + prev = vertex; + } + path.push_back(m_source); + std::reverse(path.begin(), path.end()); + + return path; +} + +std::vector Weighted::terminal_path( + const Weighted::graph_type& graph, + const Weighted::Voronoi& vor, + Weighted::vertex_type vtx) +{ + std::vector ret; + const vertex_type myterm = vor.terminal[vtx]; + while (true) { + ret.push_back(vtx); + if (myterm == vtx) { + break; + } + vtx = boost::source(vor.last_edge[vtx], graph); + } + return ret; +} + +Weighted::vertex_pair Weighted::make_vertex_pair(Weighted::vertex_type a, Weighted::vertex_type b) +{ + if (a fine_distances; + + // Find the shortest path between terminals along graph paths. + std::map shortest_paths; + for (auto fine_edge : edge_range(graph)) { + const vertex_type fine_tail = boost::source(fine_edge, graph); + const vertex_type fine_head = boost::target(fine_edge, graph); + + const vertex_pair fine_vp = make_vertex_pair(fine_tail, fine_head); + + const double fine_distance = edge_weight[fine_edge]; + fine_distances[fine_vp] = fine_distance; // for later by vertex pair + + const vertex_type term_tail = vor.terminal[fine_tail]; + const vertex_type term_head = vor.terminal[fine_head]; + if (term_tail == term_head) { + continue; + } + + const vertex_pair term_vp = make_vertex_pair(term_tail, term_head); + const double term_distance = vor.distance[fine_tail] + fine_distance + vor.distance[fine_head]; + + auto it = shortest_paths.find(term_vp); + if (it == shortest_paths.end()) { + shortest_paths.emplace(term_vp, TerminalPath{term_distance, fine_vp}); + continue; + } + if (it->second.path_distance <= term_distance) { + continue; + } + it->second.seed_vp = fine_vp; + it->second.path_distance = term_distance; + } + + // Find unique edges on all voronoi paths from the vertices of the seed edge + // to each of their nearest terminals. + std::set fine_edges; + for (const auto& [term_vp, term_path] : shortest_paths) { + const auto& vp = term_path.seed_vp; + fine_edges.insert(vp); + auto [tail, head] = vp; + for (auto vtx : {tail, head}) { + auto p = terminal_path(graph, vor, vtx); + for (size_t step = 1; step& terminals) +{ + Voronoi result; + const size_t npoints = boost::num_vertices(graph); + auto index = get(boost::vertex_index, graph); + + result.terminal.resize(npoints); // nearest_terminal + auto nearest_terminal_map = boost::make_iterator_property_map(result.terminal.begin(), index); + for (auto terminal : terminals) { + nearest_terminal_map[terminal] = terminal; + } + + auto edge_weight = get(boost::edge_weight, graph); + + result.distance.resize(npoints); + auto distance_map = boost::make_iterator_property_map(result.distance.begin(), index); + + result.last_edge.resize(npoints); + auto last_edge = boost::make_iterator_property_map(result.last_edge.begin(), index); + + boost::dijkstra_shortest_paths( + graph, terminals.begin(), terminals.end(), + boost::dummy_property_map(), + distance_map, + edge_weight, + index, + PAAL::less(), + boost::closed_plus(), + std::numeric_limits::max(), 0, + boost::make_dijkstra_visitor( + PAAL::make_nearest_recorder( + nearest_terminal_map, last_edge, boost::on_edge_relaxed{}))); + return result; +} + +Weighted::GraphAlgorithms::GraphAlgorithms(const Graph& graph, size_t max_cache_size) + : m_graph(graph), m_max_cache_size(max_cache_size) +{ + if (m_max_cache_size == 0) { + m_max_cache_size = 1; // Ensure at least 1 entry can be cached + } +} + +void Weighted::GraphAlgorithms::update_cache_access(size_t source) const +{ + auto it = m_sps.find(source); + if (it != m_sps.end()) { + // Move to front of access order list (most recently used) + m_access_order.erase(it->second.first); + m_access_order.push_front(source); + it->second.first = m_access_order.begin(); + } +} + +void Weighted::GraphAlgorithms::evict_oldest_if_needed() const +{ + while (m_sps.size() >= m_max_cache_size) { + // Remove least recently used (back of list) + size_t oldest = m_access_order.back(); + m_access_order.pop_back(); + m_sps.erase(oldest); + } +} + +const Weighted::ShortestPaths& +Weighted::GraphAlgorithms::shortest_paths(size_t source) const +{ + auto it = m_sps.find(source); + if (it != m_sps.end()) { + // Cache hit - update access order + update_cache_access(source); + return it->second.second; + } + + // Cache miss - need to evict if cache is full + evict_oldest_if_needed(); + + // Calculate shortest paths using Dijkstra + const size_t nvtx = boost::num_vertices(m_graph); + std::vector predecessors(nvtx); + std::vector distances(nvtx); + + const auto& param = weight_map(get(boost::edge_weight, m_graph)) + .predecessor_map(&predecessors[0]) + .distance_map(&distances[0]); + boost::dijkstra_shortest_paths(m_graph, source, param); + + // Add to front of access order list + m_access_order.push_front(source); + + // Insert into cache with iterator to list position + auto result = m_sps.emplace(source, + std::make_pair(m_access_order.begin(), + Weighted::ShortestPaths(source, predecessors))); + + return result.first->second.second; +} + +const std::vector& +Weighted::GraphAlgorithms::shortest_path(size_t source, size_t destination) const +{ + return shortest_paths(source).path(destination); +} + +const std::vector& +Weighted::GraphAlgorithms::connected_components() const +{ + if (m_cc.empty()) { + m_cc.resize(boost::num_vertices(m_graph)); + boost::connected_components(m_graph, &m_cc[0]); + } + return m_cc; +} + + +void Weighted::GraphAlgorithms::clear_cache() const +{ + m_sps.clear(); + m_access_order.clear(); +} + + +Weighted::filtered_graph_type Weighted::GraphAlgorithms::reduce(const vertex_set& vertices, bool accept) const +{ + auto filter = [&](vertex_type vtx) { + return accept == (vertices.find(vtx) != vertices.end()); + }; + return Weighted::filtered_graph_type(m_graph, boost::keep_all(), filter); +} + +Weighted::filtered_graph_type Weighted::GraphAlgorithms::reduce(const edge_set& edges, bool accept) const +{ + auto filter = [&](edge_type edge) { + return accept == (edges.find(edge) != edges.end()); + }; + return Weighted::filtered_graph_type(m_graph, filter, boost::keep_all()); +} +Weighted::filtered_graph_type Weighted::GraphAlgorithms::weight_threshold(double threshold, bool accept) const +{ + auto weight_map = get(boost::edge_weight, m_graph); + auto filter = [&](edge_type edge) { + return accept == (get(weight_map, edge) >= threshold); + }; + return Weighted::filtered_graph_type(m_graph, filter, boost::keep_all()); +} + +Weighted::vertex_set +Weighted::GraphAlgorithms::find_neighbors_nlevel(size_t index, int nlevel, bool include_self) const +{ + vertex_set result; + + // Input validation + if (nlevel < 0) { + return result; // Return empty set for invalid nlevel + } + + // Check if the vertex index is valid + if (index >= boost::num_vertices(m_graph)) { + return result; // Return empty set for invalid vertex index + } + + // Convert size_t to vertex_type + vertex_type start_vertex = boost::vertex(index, m_graph); + + // Special case: if nlevel is 0, only return the original vertex if include_self is true + if (nlevel == 0) { + if (include_self) { + result.insert(start_vertex); + } + return result; + } + + // std::cout << "Level " << 0 << " " << start_vertex << std::endl; + + // Use BFS to find neighbors level by level + std::queue current_level; + std::queue next_level; + std::set visited; + + // Initialize with the starting vertex + current_level.push(start_vertex); + if (include_self) { + result.insert(start_vertex); + } + visited.insert(start_vertex); + + // Process each level + for (int level = 1; level <= nlevel; ++level) { + // Process all vertices at the current level + while (!current_level.empty()) { + vertex_type current_vertex = current_level.front(); + current_level.pop(); + + // std::cout << "Level " << level-1 << " " << current_vertex << std::endl; + + // Examine all adjacent vertices + auto adjacent_vertices = boost::adjacent_vertices(current_vertex, m_graph); + for (auto vi = adjacent_vertices.first; vi != adjacent_vertices.second; ++vi) { + vertex_type neighbor = *vi; + + // If we haven't visited this neighbor yet + if (visited.find(neighbor) == visited.end()) { + visited.insert(neighbor); + result.insert(neighbor); + next_level.push(neighbor); + } + } + } + + // Move to the next level + current_level = std::move(next_level); + next_level = std::queue(); // Clear next_level + } + + return result; +} \ No newline at end of file diff --git a/clus/src/GroupingHelper.cxx b/clus/src/GroupingHelper.cxx index 3c77659d4..eb0f82b13 100644 --- a/clus/src/GroupingHelper.cxx +++ b/clus/src/GroupingHelper.cxx @@ -1,9 +1,9 @@ #include "WireCellClus/GroupingHelper.h" -std::map> -WireCell::PointCloud::Facade::process_groupings_helper( - WireCell::PointCloud::Facade::Grouping& original, - WireCell::PointCloud::Facade::Grouping& shadow, +std::map> +WireCell::Clus::Facade::process_groupings_helper( + WireCell::Clus::Facade::Grouping& original, + WireCell::Clus::Facade::Grouping& shadow, const std::string& aname, const std::string& pname) // Removed const here { @@ -25,14 +25,19 @@ WireCell::PointCloud::Facade::process_groupings_helper( // Step 2: Process each pair for (const auto& [orig_cluster, shad_cluster] : orig_to_shadow) { - std::cout << orig_cluster << " " << shad_cluster << std::endl; + // std::cout << orig_cluster << " " << shad_cluster << std::endl; // Get cluster index array auto cc = orig_cluster->get_pcarray(aname, pname); std::vector cc_vec(cc.begin(), cc.end()); // Create a non-const pointer for separate() Cluster* mutable_cluster = orig_cluster; // Separate clusters + auto scope_transform = mutable_cluster->get_scope_transform(mutable_cluster->get_default_scope()); + auto& scope = mutable_cluster->get_default_scope(); + mutable_cluster->get_scope_filter(scope); auto orig_splits = original.separate(mutable_cluster, cc_vec); + + // Get cluster index array auto shad_cc = shad_cluster->get_pcarray(aname, pname); @@ -40,7 +45,9 @@ WireCell::PointCloud::Facade::process_groupings_helper( // Create a non-const pointer for separate() Cluster* mutable_shad_cluster = shad_cluster; // Separate clusters + mutable_shad_cluster->get_scope_filter(scope); auto shad_splits = shadow.separate(mutable_shad_cluster, shad_cc_vec); + // fill in the main cluster information ... result[mutable_cluster] = std::make_tuple(mutable_shad_cluster, -1, mutable_cluster); @@ -56,4 +63,4 @@ WireCell::PointCloud::Facade::process_groupings_helper( } return result; -} \ No newline at end of file +} diff --git a/clus/src/MultiAlgBlobClustering.cxx b/clus/src/MultiAlgBlobClustering.cxx index 21274c2a5..47490cce6 100644 --- a/clus/src/MultiAlgBlobClustering.cxx +++ b/clus/src/MultiAlgBlobClustering.cxx @@ -1,6 +1,4 @@ #include "WireCellClus/MultiAlgBlobClustering.h" -#include "WireCellClus/Facade.h" -#include #include "WireCellClus/Facade_Summary.h" @@ -15,8 +13,9 @@ #include "WireCellUtil/ExecMon.h" #include "WireCellUtil/String.h" #include "WireCellUtil/Exceptions.h" -#include "WireCellUtil/Graph.h" +#include "WireCellUtil/NamedFactory.h" +#include #include WIRECELL_FACTORY(MultiAlgBlobClustering, WireCell::Clus::MultiAlgBlobClustering, WireCell::INamed, @@ -26,24 +25,69 @@ using namespace WireCell; using namespace WireCell::Clus; using namespace WireCell::Aux; using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; +using namespace WireCell::Clus::Facade; using namespace WireCell::PointCloud::Tree; MultiAlgBlobClustering::MultiAlgBlobClustering() : Aux::Logger("MultiAlgBlobClustering", "clus") - , m_bee_img("uboone", "img") - , m_bee_ld("uboone", "clustering") - , m_bee_dead("channel-deadarea", 1*units::mm, 3) // tolerance, minpts +// , m_bee_dead("channel-deadarea", 1*units::mm, 3) // tolerance, minpts +{ +} + + +static +std::string format_path( + std::string path, + const std::string& name, + int ident, + const std::map subpaths) +{ + auto it = subpaths.find(name); + if (it == subpaths.end()) { + path += "/" + name; + } + else { + path += it->second; + } + if (path.find("%") == std::string::npos) { + return path; + } + return String::format(path, ident); +} + +std::string MultiAlgBlobClustering::inpath(const std::string& name, int ident) { + return format_path(m_inpath, name, ident, m_insubpaths); } +std::string MultiAlgBlobClustering::outpath(const std::string& name, int ident) +{ + return format_path(m_outpath, name, ident, m_outsubpaths); +} + void MultiAlgBlobClustering::configure(const WireCell::Configuration& cfg) { + m_groupings = convert(cfg["groupings"], m_groupings); + m_inpath = get(cfg, "inpath", m_inpath); m_outpath = get(cfg, "outpath", m_outpath); + for (const auto& jsp : cfg["insubpaths"]) { + m_insubpaths[jsp["name"].asString()] = jsp["subpath"].asString(); + } + for (const auto& jsp : cfg["outsubpaths"]) { + m_outsubpaths[jsp["name"].asString()] = jsp["subpath"].asString(); + } + + { + auto jcid = cfg["cluster_id_order"]; + if (jcid.isString()) { + m_clusters_id_order = jcid.asString(); + } + } + if (cfg.isMember("bee_dir")) { - log->warn("the 'bee_dir' option is no longer supported, instead use 'bee_zip' to name a .zip file"); + log->debug("the 'bee_dir' option is no longer supported, instead use 'bee_zip' to name a .zip file"); } std::string bee_zip = get(cfg, "bee_zip", "mabc.zip"); // Add new configuration option for initial index @@ -69,35 +113,119 @@ void MultiAlgBlobClustering::configure(const WireCell::Configuration& cfg) m_save_deadarea = get(cfg, "save_deadarea", m_save_deadarea); m_dead_live_overlap_offset = get(cfg, "dead_live_overlap_offset", m_dead_live_overlap_offset); - // m_x_boundary_low_limit = get(cfg, "x_boundary_low_limit", m_x_boundary_low_limit); - // m_x_boundary_high_limit = get(cfg, "x_boundary_high_limit", m_x_boundary_high_limit); - - m_func_cfgs = cfg["func_cfgs"]; + for (auto jtn : cfg["pipeline"]) { + std::string tn = jtn.asString(); + log->debug("configuring clustering method: {}", tn); + auto imeth = Factory::find_tn(tn); + m_pipeline.emplace_back(EnsembleVisitor{tn, imeth}); + } m_perf = get(cfg, "perf", m_perf); - m_anode = Factory::find_tn(cfg["anode"].asString()); + for (const auto& aname : cfg["anodes"]) { + auto anode = Factory::find_tn(aname.asString()); + m_anodes.push_back(anode); + } - m_face = get(cfg, "face", 0); + m_dv = Factory::find_tn(cfg["detector_volumes"].asString()); - m_bee_img.detector(get(cfg, "bee_detector", "uboone")); - m_bee_img.algorithm(String::format("%s-%d-%d", m_bee_img.algorithm().c_str(), m_anode->ident(), m_face)); - log->debug("m_bee_img.algorithm: {}", m_bee_img.algorithm()); - m_bee_ld.detector(get(cfg, "bee_detector", "uboone")); - m_bee_ld.algorithm(String::format("%s-%d-%d", m_bee_ld.algorithm().c_str(), m_anode->ident(), m_face)); - log->debug("m_bee_ld.algorithm: {}", m_bee_ld.algorithm()); + m_dump_json = get(cfg, "dump_json", false); - m_geomhelper = Factory::find_tn(cfg["geom_helper"].asString()); + // Configure bee points sets + if (cfg.isMember("bee_points_sets")) { + auto bee_points_sets = cfg["bee_points_sets"]; + for (const auto& bps : bee_points_sets) { + BeePointsConfig bpc; + bpc.name = get(bps, "name", ""); + bpc.detector = get(bps, "detector", "uboone"); + bpc.algorithm = get(bps, "algorithm", bpc.name); + bpc.pcname = get(bps, "pcname", "3d"); + bpc.grouping = get(bps, "grouping", "live"); + bpc.visitor = get(bps, "visitor", ""); + bpc.filter = get(bps, "filter", 1); // 1 for on, 0 for off, -1 for inverse filter + + // Get coordinates + if (bps.isMember("coords")) { + for (const auto& coord : bps["coords"]) { + bpc.coords.push_back(coord.asString()); + } + } else { + // Default coordinates + bpc.coords = {"x", "y", "z"}; + } + + bpc.individual = get(bps, "individual", false); + + m_bee_points_configs.push_back(bpc); + + + // If individual, also initialize bee points for each APA and face + if (bpc.individual) { + for (const auto& anode : m_anodes) { + int apa = anode->ident(); + // Initialize the outer map if it doesn't exist + if (m_bee_points[bpc.name].by_apa_face.find(apa) == + m_bee_points[bpc.name].by_apa_face.end()) { + m_bee_points[bpc.name].by_apa_face[apa] = std::map(); + } + + // Initialize bee points for each face + for (size_t face_index = 0; face_index < anode->faces().size(); ++face_index) { + int face = anode->faces()[face_index]->which(); + std::string algo_name = String::format("%s-apa%d-face%d", bpc.algorithm.c_str(), apa, face); + // std::cout << "Test: Individual: " << algo_name << std::endl; + m_bee_points[bpc.name].by_apa_face[apa][face] = Bee::Points(bpc.detector, algo_name); + } + } + }else{ + m_bee_points[bpc.name].global.detector(bpc.detector); + m_bee_points[bpc.name].global.algorithm(String::format("%s-global", bpc.name)); + // std::cout << "Test: Global: " << m_bee_points[bpc.name].global.algorithm() << std::endl; + } + + log->debug("Configured bee points set: {}, algorithm: {}, individual: {}", + bpc.name, bpc.algorithm, bpc.individual ? "true" : "false"); + } + } - m_dump_json = get(cfg, "dump_json", false); + // Initialize patches for each APA and face + if (m_save_deadarea) { + for (const auto& anode : m_anodes) { + int apa = anode->ident(); + + // Initialize the outer map if it doesn't exist + if (m_bee_dead_patches.find(apa) == + m_bee_dead_patches.end()) { + m_bee_dead_patches[apa] = std::map(); + } + + // Initialize patches for each face + for (size_t face_index = 0; face_index < anode->faces().size(); ++face_index) { + int face = anode->faces()[face_index]->which(); + std::string name = String::format("channel-deadarea-apa%d-face%d", apa, face); + m_bee_dead_patches[apa].insert({face,Bee::Patches(name, 1*units::mm, 3)}); // Same parameters as the global one + } + } + } } WireCell::Configuration MultiAlgBlobClustering::default_configuration() const { Configuration cfg; + + assign(cfg["groupings"], m_groupings); + cfg["inpath"] = m_inpath; cfg["outpath"] = m_outpath; + + // repeat defaults as literals just incase some "clever" person tries to + // call this method AFTER configure() as that method mutates m_inlive, etc. + cfg["inlive"] = "/live"; + cfg["outlive"] = "/live"; + cfg["indead"] = "/dead"; + cfg["outdead"] = "/dead"; + // cfg["bee_dir"] = m_bee_dir; cfg["bee_zip"] = "mabc.zip"; cfg["save_deadarea"] = m_save_deadarea; @@ -141,68 +269,361 @@ void MultiAlgBlobClustering::flush(WireCell::Bee::Points& bpts, int ident) void MultiAlgBlobClustering::flush(int ident) { - flush(m_bee_img, ident); - flush(m_bee_ld, ident); - if (m_save_deadarea && m_bee_dead.size()) { - m_bee_dead.flush(); - m_sink.write(m_bee_dead); - m_bee_dead.clear(); + // flush(m_bee_img, ident); + // flush(m_bee_ld, ident); + // Flush all bee points sets + + for (auto& [name, apa_bpts] : m_bee_points) { + // C++17 can not use structured bindings in lambda capture list. + const std::string the_name = name; + + // Find the configuration for this name to check if it's individual + auto it = std::find_if(m_bee_points_configs.begin(), m_bee_points_configs.end(), + [&the_name](const BeePointsConfig& cfg) { return cfg.name == the_name; }); + + bool individual = (it != m_bee_points_configs.end()) ? it->individual : false; + + if (individual) { + // Write individual bee points + for (auto& [anode_id, face_map] : apa_bpts.by_apa_face) { + for (auto& [face, bpts] : face_map) { + if (!bpts.empty()) { + m_sink.write(bpts); + // Clear after writing + int run = 0, evt = 0; + if (ident > 0) { + run = (ident >> 16) & 0x7fff; + evt = (ident) & 0xffff; + } + bpts.reset(evt, 0, run); + } + } + } + } else { + // Write global bee points + if (!apa_bpts.global.empty()) { + m_sink.write(apa_bpts.global); + // Clear after writing + int run = 0, evt = 0; + if (ident > 0) { + run = (ident >> 16) & 0x7fff; + evt = (ident) & 0xffff; + } + apa_bpts.global.reset(evt, 0, run); + } + } + } + + + // if (m_save_deadarea && m_bee_dead.size()) { + // m_bee_dead.flush(); + // m_sink.write(m_bee_dead); + // m_bee_dead.clear(); + // } + if (m_save_deadarea) { + + // Flush individual patches + for (auto& [apa, face_map] : m_bee_dead_patches) { + for (auto& [face, patches] : face_map) { + if (patches.size()) { + patches.flush(); + m_sink.write(patches); + patches.clear(); + } + } + } } + m_last_ident = ident; } -// There are equivalent functions in Aux::Bee:: but the pc tree is subject to -// many schema and there is no "standard". So we keep this dumper here, since -// it is here we know the pc tree schema. -static -void fill_bee_points(WireCell::Bee::Points& bpts, const Points::node_t& root) + + +// Helper function remains the same as in the previous response + +void MultiAlgBlobClustering::fill_bee_points(const std::string& name, const Grouping& grouping) { - int clid = bpts.back_cluster_id(); - const double charge = 0; - for (const auto cnode : root.children()) { // this is a loop through all clusters ... - ++clid; - - Scope scope = {"3d", {"x", "y", "z"}}; - const auto& sv = cnode->value.scoped_view(scope); - - const auto& spcs = sv.pcs(); // spcs 'contains' all blobs in this cluster ... - - for (const auto& spc : spcs) { // each little 3D pc --> (blobs) spc represents x,y,z in a blob - auto x = spc.get().get("x")->elements(); - auto y = spc.get().get("y")->elements(); - auto z = spc.get().get("z")->elements(); - const size_t size = x.size(); - // fixme: add to Bee::Points a method to append vector-like things... - for (size_t ind = 0 ; indwarn("Bee points set '{}' not found, skipping", name); + return; + } + + auto& apa_bpts = m_bee_points[name]; + + // Find the configuration for this name + auto it = std::find_if(m_bee_points_configs.begin(), m_bee_points_configs.end(), + [&name](const BeePointsConfig& cfg) { return cfg.name == name; }); + + if (it == m_bee_points_configs.end()) { + log->warn("Configuration for bee points set '{}' not found, skipping", name); + return; + } + + const auto& config = *it; + + // Reset RSE values for all points objects + if (m_use_config_rse) { + apa_bpts.global.rse(m_runNo, m_subRunNo, m_eventNo); + for (auto& [apa, face_map] : apa_bpts.by_apa_face) { + for (auto& [face, bpts] : face_map) { + bpts.rse(m_runNo, m_subRunNo, m_eventNo); } } + } else { + // Use the default approach with ident + int run = 0, evt = 0; + if (m_last_ident > 0) { + run = (m_last_ident >> 16) & 0x7fff; + evt = (m_last_ident) & 0xffff; + } + apa_bpts.global.reset(evt, 0, run); + for (auto& [anode_id, face_map] : apa_bpts.by_apa_face) { + for (auto& [face, bpts] : face_map) { + bpts.reset(evt, 0, run); + } + } + } + + auto wpids = grouping.wpids(); + + + + if (config.individual){ // fill in the individual APA + for (auto wpid: wpids) { + int apa = wpid.apa(); + int face = wpid.face(); + auto it = apa_bpts.by_apa_face.find(apa); + if (it != apa_bpts.by_apa_face.end()) { + auto it2 = it->second.find(face); + if (it2 != it->second.end()) { + for (const auto* cluster : grouping.children()) { + fill_bee_points_from_cluster(it2->second, *cluster, config.pcname, config.coords, config.filter); + } + } + } + } + }else{ // fill in the global + // std::cout << "Test: " << name << " " << grouping.wpids().size() << " " << grouping.nchildren() << std::endl; + + for (const auto* cluster : grouping.children()) { + fill_bee_points_from_cluster(apa_bpts.global, *cluster, config.pcname, config.coords, config.filter); + } } } -static -void fill_bee_patches(WireCell::Bee::Patches& bee, const Points::node_t& root) + +// Helper function to fill bee points from a single cluster +void MultiAlgBlobClustering::fill_bee_points_from_cluster( + Bee::Points& bpts, const Cluster& cluster, + const std::string& pcname, const std::vector& coords, int filter) { - int first_slice = -1; - for (const auto cnode : root.children()) { - for (const auto bnode : cnode->children()) { - const auto& lpcs = bnode->value.local_pcs(); - - const auto& pc_scalar = lpcs.at("scalar"); - int slice_index_min = pc_scalar.get("slice_index_min")->elements()[0]; - if (first_slice < 0) { - first_slice = slice_index_min; + int clid = cluster.get_cluster_id(); //bpts.back_cluster_id() + 1; + + // std::cout << "Test: " << bpts.size() << " " << bpts.back_cluster_id() << " " << clid << std::endl; + + if (pcname == "steiner_pc"){ + // Export Steiner points ... + // std::cout << "Exporting Steiner points for cluster ID: " << clid << " " << cluster.nchildren() << std::endl; + + auto& steiner_pc = cluster.get_pc(pcname); + if (steiner_pc.empty()) { + return; + } + // Get coordinate arrays from the point cloud + const auto& x_coords = steiner_pc.get(coords.at(0))->elements(); + const auto& y_coords = steiner_pc.get(coords.at(1))->elements(); + const auto& z_coords = steiner_pc.get(coords.at(2))->elements(); + const auto& flag_steiner_terminal = steiner_pc.get("flag_steiner_terminal")->elements(); + + std::cout << "Steiner Test: " << x_coords.size() << " " << y_coords.size() << " " << z_coords.size() << std::endl; + + for (size_t i = 0; i < x_coords.size(); ++i) { + // Create point from steiner point cloud + Point vtx(x_coords[i], y_coords[i], z_coords[i]); + + // Get the point index from the default scope + auto point_index = cluster.get_closest_point_index(vtx); + + auto charge_result = cluster.calc_charge_wcp(point_index, 4000, true); + double point_charge = charge_result.second; // Extract the charge value from the pair + + if (flag_steiner_terminal[i]) { + bpts.append(Point(x_coords[i], y_coords[i], z_coords[i]), point_charge, 1, 1); // terminals ... + }else{ + bpts.append(Point(x_coords[i], y_coords[i], z_coords[i]), point_charge, 0, 0); // non-terminals ... + } + } + + + }else{ + // Get the scope + Scope scope = {pcname, coords}; + + auto filter_scope = cluster.get_scope_filter(scope); + + // std::cout << "Test: " << cluster.get_cluster_id() << " " << clid << " " << scope << " " << filter_scope << std::endl; + + bool use_scope = true; + if (filter == 1) { + use_scope = filter_scope; + } + else if (filter == 0) { + use_scope = true; // ignore filter_scope, always true + } + else if (filter == -1) { + use_scope = !filter_scope; + } + + if (use_scope) { + // Access the points through the cluster's scoped view + const WireCell::PointCloud::Tree::ScopedView& sv = cluster.sv(scope); + const auto& spcs = sv.pcs(); + const auto& nodes = sv.nodes(); // Get the nodes in the scoped view + + // Create a map to cache blob information to avoid recalculating for points in the same blob + std::unordered_map> blob_info; + + // std::cout << "Test: " << cluster.get_cluster_id() << " " << spcs.size() << std::endl; + + // For each scoped pointcloud (each corresponds to a blob) + for (size_t spc_idx = 0; spc_idx < spcs.size(); ++spc_idx) { + const auto& spc = spcs[spc_idx]; + auto x = spc.get().get(coords[0])->elements(); + auto y = spc.get().get(coords[1])->elements(); + auto z = spc.get().get(coords[2])->elements(); + + // Get the blob associated with this spc + // The node_with_major() function gets the node for this major index (blob) + const auto* node = nodes[spc_idx]; + const auto* blob = node->value.facade(); + + // Calculate blob information if not already cached + if (blob_info.find(blob) == blob_info.end()) { + double blob_charge = blob->charge(); + size_t blob_npoints = blob->npoints(); + blob_info[blob] = {blob_charge, blob_npoints}; + } + + // Get cached blob info + const auto& [blob_charge, blob_npoints] = blob_info[blob]; + + // Calculate charge per point + double point_charge = 0.0; + if (blob_npoints > 0) { + point_charge = blob_charge / blob_npoints; + } + + const size_t size = x.size(); + for (size_t ind = 0; ind < size; ++ind) { + // Use the calculated point_charge instead of the original charge + bpts.append(Point(x[ind], y[ind], z[ind]), point_charge, clid, clid); + } } - if (slice_index_min != first_slice) continue; - const auto& pc_corner = lpcs.at("corner"); - const auto& y = pc_corner.get("y")->elements(); - const auto& z = pc_corner.get("z")->elements(); - bee.append(y.begin(), y.end(), z.begin(), z.end()); } } + +} + + + + +void MultiAlgBlobClustering::fill_bee_patches_from_grouping( + const WireCell::Clus::Facade::Grouping& grouping) +{ + // auto wpids = grouping.wpids(); + + // For each cluster in the grouping + for (const auto* cluster : grouping.children()) { + // Get the wpids to determine which APA and face this cluster belongs to + + fill_bee_patches_from_cluster(*cluster); + + + // if (!wpids.empty()) { + // // Store patches by APA and face + // for (auto wpid : wpids) { + // int apa = wpid.apa(); + // int face = wpid.face(); + // + // } + // } + } +} + + +// Helper function to fill patches from a single cluster +void MultiAlgBlobClustering::fill_bee_patches_from_cluster( + const WireCell::Clus::Facade::Cluster& cluster) +{ + int first_slice = -1; + + // Get the underlying node that contains this cluster + const auto* cluster_node = cluster.node(); + if (!cluster_node) { + log->warn("Cannot access node for cluster"); + return; + } + + // Iterate through child nodes (blobs) + for (const auto* bnode : cluster_node->children()) { + auto wpid = bnode->value.facade()->wpid(); + int apa = wpid.apa(); + int face = wpid.face(); + + + auto it_apa = m_bee_dead_patches.find(apa); + if (it_apa != m_bee_dead_patches.end()) { + auto it_face = it_apa->second.find(face); + if (it_face != it_apa->second.end()) { + auto & patches = it_face->second; + + // Access the local point clouds in the node + const auto& lpcs = bnode->value.local_pcs(); + + // Get the scalar PC to find the slice index + if (lpcs.find("scalar") == lpcs.end()) { + continue; // Skip if no scalar PC + } + const auto& pc_scalar = lpcs.at("scalar"); + + // Get slice_index_min + if (!pc_scalar.get("slice_index_min")) { + continue; // Skip if no slice_index_min + } + int slice_index_min = pc_scalar.get("slice_index_min")->elements()[0]; + + // Set first_slice if not already set + if (first_slice < 0) { + first_slice = slice_index_min; + } + + // Skip blobs not on the first slice + if (slice_index_min != first_slice) continue; + + // Access the corner point cloud + if (lpcs.find("corner") == lpcs.end()) { + continue; // Skip if no corner PC + } + const auto& pc_corner = lpcs.at("corner"); + + // Get y and z coordinates + if (!pc_corner.get("y") || !pc_corner.get("z")) { + continue; // Skip if missing y or z + } + const auto& y = pc_corner.get("y")->elements(); + const auto& z = pc_corner.get("z")->elements(); + + // Add to patches + patches.append(y.begin(), y.end(), z.begin(), z.end()); + } + } + } } + struct Perf { bool enable; Log::logptr_t log; @@ -227,22 +648,78 @@ struct Perf { em(ctx); } - void dump(const std::string& ctx, const Grouping& grouping, bool shallow = true, bool mon = true) + void dump(const std::string& ctx, const Ensemble& ensemble, bool shallow = true, bool mon = true) { if (!enable) return; if (mon) (*this)(ctx); - log->debug("{} grouping {}", ctx, grouping); - if (shallow) return; - auto children = grouping.children(); // copy - sort_clusters(children); - size_t count = 0; - for (const auto* cluster : children) { - bool sane = cluster->sanity(log); - log->debug("{} cluster {} {} sane:{}", ctx, count++, *cluster, sane); + + log->debug("{} ensemble with {} groupings:", ctx, ensemble.nchildren()); + + for (const auto* grouping : ensemble.children()) { + + { + auto name = grouping->get_name(); + size_t npoints_total = 0; + size_t nzero = 0; + size_t count = 0; + for (const auto* cluster : grouping->children()) { + int n = cluster->npoints(); + if (n == 0) { + ++nzero; + } + npoints_total += n; + // log->debug("loaded cluster {} with {} points out of {}", count, n, npoints_total); + ++count; + // std::cout << "Xin: " << name << " loaded cluster " << count << " with " << n << "points and " << cluster->nchildren() << "blobs" << std::endl; + } + + + + log->debug("\tgrouping \"{}\": {}, {} points and {} clusters with no points", + name, *grouping, npoints_total, nzero); + (void)count; + } + + if (shallow) continue; + + auto children = grouping->children(); // copy + sort_clusters(children); + size_t count = 0; + for (const auto* cluster : children) { + bool sane = cluster->sanity(log); + log->debug("\t\tcluster {} {} sane:{}", count++, *cluster, sane); + } } } }; + +Grouping& MultiAlgBlobClustering::load_grouping( + Ensemble& ensemble, + const std::string& name, + const std::string& path, + const ITensorSet::pointer ints) +{ + const auto& tens = *ints->tensors(); + try { + ensemble.add_grouping_node(name, as_pctree(tens, path)); + } + catch (WireCell::KeyError& err) { + log->warn("No pc-tree at tensor datapath {}, making empty", path); + ensemble.make_grouping(name); + } + + Grouping* grouping = ensemble.with_name(name).at(0); + if (!grouping) { + raise("failed to make grouping node %s at %s", name, path); + } + + grouping->enumerate_idents(); + grouping->set_anodes(m_anodes); + grouping->set_detector_volumes(m_dv); + return *grouping; +} + bool MultiAlgBlobClustering::operator()(const input_pointer& ints, output_pointer& outts) { outts = nullptr; @@ -262,8 +739,8 @@ bool MultiAlgBlobClustering::operator()(const input_pointer& ints, output_pointe m_sink.set_rse(m_runNo, m_subRunNo, m_eventNo); } // Use default behavior - reset_bee(ident, m_bee_img); - reset_bee(ident, m_bee_ld); + // reset_bee(ident, m_bee_img); + // reset_bee(ident, m_bee_ld); m_last_ident = ident; } else if (m_last_ident != ident) { @@ -277,115 +754,116 @@ bool MultiAlgBlobClustering::operator()(const input_pointer& ints, output_pointe } // else do nothing when ident is unchanged. - std::string inpath = m_inpath; - if (inpath.find("%") != std::string::npos) { - inpath = String::format(inpath, ident); - } - - const auto& intens = *ints->tensors(); - auto root_live = std::move(as_pctree(intens, inpath + "/live")); - if (!root_live) { - log->error("Failed to get dead point cloud tree from \"{}\"", inpath); - raise("Failed to get live point cloud tree from \"%s\"", inpath); - } - auto grouping = root_live->value.facade(); - grouping->set_anode(m_anode); - grouping->set_params(m_geomhelper->get_params(m_anode->ident(), m_face)); - perf("loaded live clusters"); - { - size_t npoints_total = 0; - size_t nzero = 0; - for (const auto* cluster : grouping->children()) { - int n = cluster->npoints(); - if (n == 0) { - ++nzero; - } - npoints_total += n; - } - log->debug("loaded live grouping with {} clusters, {} points, and {} clusters with no points", - grouping->nchildren(), npoints_total, nzero); - // It is probably an error if nzero is not zero. - } + Points::node_t root; + Ensemble& ensemble = *root.value.facade(); - // log->debug("Got live pctree with {} children", root_live->nchildren()); - // log->debug(em("got live pctree")); - log->debug("as_pctree from \"{}\"", inpath + "/dead"); - const std::string deadinpath = inpath + "/dead"; - Points::node_ptr root_dead; - try { - root_dead = as_pctree(intens, deadinpath); - perf("loaded dead clusters"); - } - catch (WireCell::KeyError& err) { - log->warn("No pc-tree at datapath {}, assuming no 'dead' clusters", deadinpath); - root_dead = std::make_unique(); - } + for (const auto& gname : m_groupings) { + const auto datapath = inpath(gname, ident); + load_grouping(ensemble, gname, datapath, ints); + perf.dump("loaded " + gname, ensemble); + } - fill_bee_points(m_bee_img, *root_live.get()); - perf("loaded dump live clusters to bee"); if (m_save_deadarea) { - fill_bee_patches(m_bee_dead, *root_dead.get()); - perf("loaded dump dead regions to bee"); + auto gs = ensemble.with_name("live"); + if (gs.size()) { + // Fill patches from the dead grouping + fill_bee_patches_from_grouping(*gs[0]); + perf("dump dead regions to bee"); + } } - log->debug("will {} {} dead patches", m_save_deadarea ? "save" : "not save", m_bee_dead.size()); - cluster_set_t cluster_connected_dead; + perf.dump("pre clustering", ensemble); - // initialize clusters ... - Grouping& live_grouping = *root_live->value.facade(); + for (const auto& config : m_bee_points_configs) { + if (config.name != "img") { + continue; + } + auto gs = ensemble.with_name("live"); + if (gs.empty()) { + continue; + } + fill_bee_points(config.name, *gs[0]); + } - Grouping& dead_grouping = *root_dead->value.facade(); - dead_grouping.set_anode(m_anode); - dead_grouping.set_params(m_geomhelper->get_params(m_anode->ident(), m_face)); - + perf.dump("start clustering", ensemble); - //perf.dump("original live clusters", live_grouping, false, false); - //perf.dump("original dead clusters", dead_grouping, false, false); + // THE MAIN LOOP + for (const auto& cmeth : m_pipeline) { + cmeth.meth->visit(ensemble); + perf.dump(cmeth.name, ensemble); - perf.dump("pre clustering", live_grouping); + for (auto* grouping : ensemble.children()) { + grouping->enumerate_idents(m_clusters_id_order); + } - std::map>& dead_u_index = live_grouping.get_dead_winds(0, 0); - std::map>& dead_v_index = live_grouping.get_dead_winds(0, 1); - std::map>& dead_w_index = live_grouping.get_dead_winds(0, 2); - log->debug("dead_u_index size {}", dead_u_index.size()); - log->debug("dead_v_index size {}", dead_v_index.size()); - log->debug("dead_w_index size {}", dead_w_index.size()); + // for (const auto& config : m_bee_points_configs) { + // if (config.name == "img") continue; + // if (config.visitor != cmeth.name) continue; + // auto gs = ensemble.with_name(config.grouping); + // if (gs.empty()) { + // continue; + // } + // fill_bee_points(config.name, *gs[0]); + // } + } - for (const auto& func_cfg : m_func_cfgs) { - // std::cout << "func_cfg: " << func_cfg << std::endl; - auto func = getClusteringFunction(func_cfg); + // + // At this point, the ensemble may have more or fewer groupings just "live" + // and "dead" including no groupings at all. But for now, we assume the + // original "live" and "dead" still exist and with their original facades. + // Famous last words.... + // + - func(live_grouping, dead_grouping, cluster_connected_dead); + // Fill all configured bee points sets + for (const auto& config : m_bee_points_configs) { + if(config.name == "img") continue; - perf.dump(func_cfg["name"].asString(), live_grouping); - } + auto gs = ensemble.with_name(config.grouping); + if (gs.empty()) { + continue; + } + fill_bee_points(config.name, *gs[0]); - fill_bee_points(m_bee_ld, *root_live.get()); + } perf("dump live clusters to bee"); + auto grouping_names = ensemble.names(); + if (m_dump_json) { - Persist::dump(String::format("live-summary-%d.json", ident), json_summary(live_grouping), true); - Persist::dump(String::format("dead-summary-%d.json", ident), json_summary(dead_grouping), true); + for (const auto& name : grouping_names) { + auto gs = ensemble.with_name(name); + Persist::dump(String::format("%s-summary-%d.json", name, ident), + json_summary(*gs[0]), true); + } } - - std::string outpath = m_outpath; - if (outpath.find("%") != std::string::npos) { - outpath = String::format(outpath, ident); + log->debug("Produce pctrees with {} groupings", grouping_names.size()); + + ITensor::vector outtens; + for (const auto& name : grouping_names) { + + // This next bit may look a little weird and it is so some explanation + // is warranted. Originally, we had disembodied "root" grouping nodes, + // live and dead. To clean up the clustering api we added the + // "ensemble" as root node with children consisting of grouping nodes. + // At the time of writing, the as_tensors() does not like serializing + // non-root nodes I do not want to debug right now. And, I do not want + // the "ensemble" concept to leak out from the MABC+clustering context. + // So, I remove each grouping child node from the ensemble prior to + // serializing. The remove gives an auto_ptr so the node is destructed + // as this loop progresses. + auto gs = ensemble.with_name(name); + auto& grouping = *gs[0]; + auto node = ensemble.remove_child(grouping); + auto tens = as_tensors(*node, outpath(name, ident)); + outtens.insert(outtens.end(), tens.begin(), tens.end()); + log->debug("Produce {} tensors for grouping {}", tens.size(), name); } - auto outtens = as_tensors(*root_live.get(), outpath + "/live"); - perf("output live clusters to tensors"); - auto outtens_dead = as_tensors(*root_dead.get(), outpath + "/dead"); - perf("output dead clusters to tensors"); - // Merge - outtens.insert(outtens.end(), outtens_dead.begin(), outtens_dead.end()); outts = as_tensorset(outtens, ident); - perf("combine tensors"); - root_live = nullptr; - root_dead = nullptr; - perf("clear pc tree memory"); + perf("done"); return true; } diff --git a/clus/src/PAAL.h b/clus/src/PAAL.h new file mode 100644 index 000000000..61dfce06c --- /dev/null +++ b/clus/src/PAAL.h @@ -0,0 +1,81 @@ +/** This holds an small extract from paal. + + PAAL is by Piotr Wygocki (wygos at mimuw.edu.pl). + With a page at https://paal.mimuw.edu.pl/ + And repo URL: http://siekiera.mimuw.edu.pl:8082/paal + It is licensed under the BOOST License 1.0. + +Boost Software License - Version 1.0 - August 17th, 2003 + +Permission is hereby granted, free of charge, to any person or organization +obtaining a copy of the software and accompanying documentation covered by +this license (the "Software") to use, reproduce, display, distribute, +execute, and transmit the Software, and to prepare derivative works of the +Software, and to permit third-parties to whom the Software is furnished to +do so, all subject to the following: + +The copyright notices in the Software and this entire statement, including +the above license grant, this restriction and the following disclaimer, +must be included in all copies of the Software, in whole or in part, and +all derivative works of the Software, unless such copies or derivative +works are solely in the form of machine-executable object code generated by +a source language processor. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + */ + + +#ifndef WIRECELLCLUS_PAAL +#define WIRECELLCLUS_PAAL + +namespace WireCell::Clus::PAAL { + + struct less { + /** + * @brief operator() + * + * @tparam T + * @param x + * @param y + * + * @return + */ + template + auto operator()(const T &x, const T &y) const->decltype(x < y) { + return x < y; + } + }; + + template + class nearest_recorder + : boost::base_visitor> { + public: + using event_filter = Tag; + nearest_recorder(NearestMap &nearest_map, LastEdgeMap &vpred) + : m_nearest_map(nearest_map), m_vpred(vpred) {}; + template + void operator()(Edge const e, Graph const &g) { + m_nearest_map[target(e, g)] = m_nearest_map[source(e, g)]; + m_vpred[target(e, g)] = e; + } + + private: + NearestMap &m_nearest_map; + LastEdgeMap &m_vpred; + }; + + template + nearest_recorder + make_nearest_recorder(NearestMap &nearest_map, LastEdgeMap &vpred, Tag) { + return nearest_recorder{ nearest_map, vpred }; + } + +} + +#endif diff --git a/clus/src/PCTransforms.cxx b/clus/src/PCTransforms.cxx new file mode 100644 index 000000000..bd2d995df --- /dev/null +++ b/clus/src/PCTransforms.cxx @@ -0,0 +1,181 @@ +// A Clus::IPCTransform that does T0 correction. +// +// It takes a single configuration parameter: +// +// detector_volumes which defaults to "DetectorVolumes". +// + +#include "WireCellClus/IPCTransform.h" +#include "WireCellIface/IDetectorVolumes.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + +#include +#include +#include + +class PCTransformSet; + +WIRECELL_FACTORY(PCTransformSet, PCTransformSet, + WireCell::Clus::IPCTransformSet, + WireCell::IConfigurable) + + +// Note, we do not register any of the individual IPCTransforms because the +// crazy clus code evolved to reinvent this pattern and it's too damn ugly at +// this point to try to refactor properly. + + +using namespace WireCell; +using namespace WireCell::Clus; + +class T0Correction : public WireCell::Clus::IPCTransform +{ +public: + + virtual ~T0Correction() = default; + + T0Correction(IDetectorVolumes::pointer dv) + : m_dv(dv) { + + for (const auto& [wfid, _] : m_dv->wpident_faces()) { + WirePlaneId wpid(wfid); + m_time_global_offsets[wpid.apa()][wpid.face()] = m_dv->metadata(wpid)["time_offset"].asDouble(); + m_drift_speeds[wpid.apa()][wpid.face()] = m_dv->metadata(wpid)["drift_speed"].asDouble(); + } + } + + /** + * From time2drift in Facade_Util.cxx + * x_raw = xorig + face->dirx * (time_read_out + time_global_offset) * abs_drift_speed; + * x_corr = xorig + face->dirx * (time_read_out - clustser_t0) * abs_drift_speed; + *x_corr - x_raw = face->dirx * (- clustser_t0 - time_global_offset) * abs_drift_speed; + */ + + // get x_corr from x_raw + virtual Point forward(const Point &pos_raw, double cluster_t0, int face, + int apa) const override { + Point pos_corr(pos_raw); + pos_corr[0] -= m_dv->face_dirx(WirePlaneId(kAllLayers, face, apa)) * (cluster_t0 ) * + m_drift_speeds.at(apa).at(face); + return pos_corr; + } + + virtual Point backward(const Point &pos_corr, double cluster_t0, int face, + int apa) const override { + Point pos_raw(pos_corr); + pos_raw[0] += m_dv->face_dirx(WirePlaneId(kAllLayers, face, apa)) * (cluster_t0 ) * + m_drift_speeds.at(apa).at(face); + return pos_raw; + } + + virtual bool filter(const Point &pos_corr, double clustser_t0, int face, + int apa) const override { + auto wpid = m_dv->contained_by(pos_corr); + if (!wpid.valid()) return false; + if (wpid.apa() != apa || wpid.face() != face) return false; + return true; + // return ().valid() ? true : false; + } + + virtual Dataset forward(const Dataset &pc_raw, const std::vector& arr_raw_names, const std::vector& arr_cor_names, double cluster_t0, int face, + int apa) const override { + // std::cout << "Test: " << m_time_global_offsets.at(apa).at(face) << " " << cluster_t0 << std::endl; + + const auto &arr_x = pc_raw.get(arr_raw_names[0])->elements(); + const auto &arr_y = pc_raw.get(arr_raw_names[1])->elements(); + const auto &arr_z = pc_raw.get(arr_raw_names[2])->elements(); + std::vector arr_x_corr(arr_x.size()); + for (size_t i = 0; i < arr_x.size(); ++i) { + arr_x_corr[i] = arr_x[i] - m_dv->face_dirx(WirePlaneId(kAllLayers, face, apa)) * (cluster_t0 ) * + m_drift_speeds.at(apa).at(face); + } + Dataset ds_corr; + ds_corr.add(arr_cor_names[0], Array(arr_x_corr)); + ds_corr.add(arr_cor_names[1], Array(arr_y)); + ds_corr.add(arr_cor_names[2], Array(arr_z)); + + // ds_corr.add("x_corr", Array(arr_x_corr)); + // ds_corr.add("y_corr", Array(arr_y)); + // ds_corr.add("z_corr", Array(arr_z)); + return ds_corr; + } + + virtual Dataset backward(const Dataset &pc_corr, const std::vector& arr_cor_names, const std::vector& arr_raw_names, double cluster_t0, int face, + int apa) const override { + const auto &arr_x = pc_corr.get(arr_cor_names[0])->elements(); + const auto &arr_y = pc_corr.get(arr_cor_names[1])->elements(); + const auto &arr_z = pc_corr.get(arr_cor_names[2])->elements(); + std::vector arr_x_corr(arr_x.size()); + for (size_t i = 0; i < arr_x.size(); ++i) { + arr_x_corr[i] = arr_x[i] + m_dv->face_dirx(WirePlaneId(kAllLayers, face, apa)) * (cluster_t0 ) * + m_drift_speeds.at(apa).at(face); + } + Dataset ds_raw; + ds_raw.add("x", Array(arr_x_corr)); + ds_raw.add("y", Array(arr_y)); + ds_raw.add("z", Array(arr_z)); + return ds_raw; + } + + virtual Dataset filter(const Dataset &pc_corr, const std::vector& arr_cor_names, double clustser_t0, int face, + int apa) const override { + std::vector arr_filter(pc_corr.size_major()); + const auto &arr_x = pc_corr.get(arr_cor_names[0])->elements(); + const auto &arr_y = pc_corr.get(arr_cor_names[1])->elements(); + const auto &arr_z = pc_corr.get(arr_cor_names[2])->elements(); + for (size_t i = 0; i < arr_x.size(); ++i) { + arr_filter[i] = false; + auto wpid = m_dv->contained_by(Point(arr_x[i], arr_y[i], arr_z[i])); + if (wpid.valid()) { + if (wpid.apa() == apa && wpid.face() == face) { + arr_filter[i] = true; + } + } + // if (wpid.apa() != apa || wpid.face() != face) return false; + // ().valid() ? 1 : 0; + } + Dataset ds; + ds.add("filter", Array(arr_filter)); + return ds; + } + +private: + IDetectorVolumes::pointer m_dv; // do not own + + // // m_time_global_offsets.at(apa).at(face) = time_global_offset + std::map> m_time_global_offsets; + std::map> m_drift_speeds; +}; + +class PCTransformSet : public WireCell::Clus::IPCTransformSet, + public WireCell::IConfigurable +{ +public: + + PCTransformSet() {} + virtual ~PCTransformSet() {} + + virtual Configuration default_configuration() const { + Configuration cfg; + cfg["detector_volumes"] = "DetectorVolumes"; + return cfg; + } + virtual void configure(const Configuration& cfg) { + std::string dvtn = get(cfg, "detector_volumes", "DetectorVolumes"); + auto dv = Factory::find_tn(dvtn); + m_pcts["T0Correction"] = std::make_shared(dv); + // ... + } + + virtual IPCTransform::pointer pc_transform(const std::string &name) const { + auto it = m_pcts.find(name); + if (it == m_pcts.end()) { return nullptr; } + return it->second; + } + +private: + std::map m_pcts; +}; diff --git a/clus/src/PRGraph.cxx b/clus/src/PRGraph.cxx new file mode 100644 index 000000000..0d858f7bc --- /dev/null +++ b/clus/src/PRGraph.cxx @@ -0,0 +1,96 @@ +#include "WireCellClus/PRGraph.h" + +namespace WireCell::Clus::PR { + + bool add_vertex(Graph& g, VertexPtr vtx) + { + if (vtx->descriptor_valid()) { + return false; + } + auto& gb = g[boost::graph_bundle]; + const size_t index = gb.num_node_indices; + auto desc = boost::add_vertex(NodeBundle{vtx, index}, g); + ++ gb.num_node_indices; + vtx->set_descriptor(desc); + return true; + } + + bool remove_vertex(Graph& graph, VertexPtr vtx) + { + if (! vtx->descriptor_valid()) { return false; } + auto desc = vtx->get_descriptor(); + boost::remove_vertex(desc, graph); + vtx->invalidate_descriptor(); + return true; + } + + bool add_segment(Graph& g, SegmentPtr seg, VertexPtr vtx1, VertexPtr vtx2) + { + bool changed = false; + changed = add_vertex(g, vtx1) || changed; + changed = add_vertex(g, vtx2) || changed; + + if (seg->descriptor_valid()) { + return changed; + } + + auto& gb = g[boost::graph_bundle]; + const size_t index = gb.num_edge_indices; + auto [desc,added] = boost::add_edge(vtx1->get_descriptor(), + vtx2->get_descriptor(), + EdgeBundle{seg, index}, g); + + seg->set_descriptor(desc); + + // Edge was added + if (added) { + ++ gb.num_edge_indices; + seg->set_descriptor(desc); + return true; + } + + // Edge already existed, assure its object is this one. + g[desc].segment = seg; + + return changed; + } + + bool remove_segment(Graph& graph, SegmentPtr seg) + { + if (! seg->descriptor_valid()) { return false; } + auto desc = seg->get_descriptor(); + boost::remove_edge(desc, graph); + seg->invalidate_descriptor(); + return true; + } + + + std::pair find_endpoints(Graph& graph, SegmentPtr seg) + { + if (! seg->descriptor_valid()) { return std::pair{}; } + + auto ed = seg->get_descriptor(); + + auto vd1 = boost::source(ed, graph); + auto vd2 = boost::target(ed, graph); + + auto [ed2,ingraph] = boost::edge(vd1, vd2, graph); + if (!ingraph) { return std::pair{}; } + + auto vtx1 = graph[vd1].vertex; + auto vtx2 = graph[vd2].vertex; + + auto ept = seg->wcpts().front().point; + + double d1 = ray_length(Ray{vtx1->wcpt().point, ept}); + double d2 = ray_length(Ray{vtx2->wcpt().point, ept}); + + if (d1 < d2) { + return std::make_pair(vtx1, vtx2); + } + return std::make_pair(vtx2, vtx1); + } + +} + + diff --git a/clus/src/PRGraphType.cxx b/clus/src/PRGraphType.cxx new file mode 100644 index 000000000..0fe2facac --- /dev/null +++ b/clus/src/PRGraphType.cxx @@ -0,0 +1,18 @@ +#include "WireCellClus/PRGraphType.h" +#include "WireCellUtil/GraphTools.h" + +namespace WireCell::Clus::PR { + + node_vector graph_nodes(Graph& g) { + return node_vector(boost::vertices(g).first, boost::vertices(g).second); + } + + node_vector ordered_nodes(Graph& g) { + auto nodes = graph_nodes(g); + std::sort(nodes.begin(), nodes.end(), [&g](const node_descriptor& a, const node_descriptor& b) { + return g[a].index < g[b].index; + }); + return nodes; + } + +} diff --git a/clus/src/PRSegment.cxx b/clus/src/PRSegment.cxx new file mode 100644 index 000000000..ffc6354b4 --- /dev/null +++ b/clus/src/PRSegment.cxx @@ -0,0 +1,68 @@ +#include "WireCellClus/PRSegment.h" +#include "WireCellClus/PRSegmentFunctions.h" + +namespace WireCell::Clus::PR { + + void Segment::reset_fit_prop(){ + for (auto& fit : m_fits) { + fit.reset(); + } + } + + int Segment::fit_index(int i){ + if (i < 0 || i >= m_fits.size()) { + throw std::out_of_range("Invalid fit index"); + } + return m_fits[i].index; + } + void Segment::fit_index(int i, int idx){ + if (i < 0 || i >= m_fits.size()) { + throw std::out_of_range("Invalid fit index"); + } + m_fits[i].index = idx; + } + bool Segment::fit_flag_skip(int i){ + if (i < 0 || i >= m_fits.size()) { + throw std::out_of_range("Invalid fit index"); + } + return m_fits[i].flag_fix; + } + void Segment::fit_flag_skip(int i, bool flag){ + if (i < 0 || i >= m_fits.size()) { + throw std::out_of_range("Invalid fit index"); + } + m_fits[i].flag_fix = flag; + } + + void Segment::set_fit_associate_vec(std::vector& tmp_fit_pt_vec, std::vector& tmp_fit_index, std::vector& tmp_fit_skip, const IDetectorVolumes::pointer& dv,const std::string& cloud_name){ + // Store fit points in m_fits vector + m_fits.clear(); + m_fits.reserve(tmp_fit_pt_vec.size()); + + for (size_t i = 0; i < tmp_fit_pt_vec.size(); ++i) { + Fit fit; + // Convert WCP::Point to WireCell::Point + fit.point = WireCell::Point(tmp_fit_pt_vec[i].x(), tmp_fit_pt_vec[i].y(), tmp_fit_pt_vec[i].z()); + if (i < tmp_fit_index.size()) { + fit.index = tmp_fit_index[i]; + } + if (i < tmp_fit_skip.size()) { + if (tmp_fit_skip[i]) { + fit.flag_fix = true; + } + } + m_fits.push_back(fit); + } + + // Create dynamic point cloud with the fit points + if (dv) { + create_segment_fit_point_cloud(shared_from_this(), dv, cloud_name); + } + + } + + + + +} + diff --git a/clus/src/PRSegmentFunctions.cxx b/clus/src/PRSegmentFunctions.cxx new file mode 100644 index 000000000..ba4dcdd30 --- /dev/null +++ b/clus/src/PRSegmentFunctions.cxx @@ -0,0 +1,2158 @@ +#include "WireCellClus/PRSegmentFunctions.h" +#include "WireCellClus/Facade_Cluster.h" +#include "WireCellClus/DynamicPointCloud.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellUtil/Units.h" +#include "WireCellUtil/KSTest.h" +#include +#include +#include + +namespace WireCell::Clus::PR { + void create_segment_point_cloud(SegmentPtr segment, + const std::vector& path_points, + const IDetectorVolumes::pointer& dv, + const std::string& cloud_name) + { + if (!segment || !segment->cluster()) { + raise("create_segment_point_cloud: invalid segment or missing cluster"); + } + + auto& cluster = *segment->cluster(); + + // Create point-plane pairs + std::vector> point_plane_pairs; + for (const auto& point : path_points) { + WirePlaneId wpid = dv->contained_by(point); + point_plane_pairs.emplace_back(point, wpid); + } + + // Get wpid_params (from detector configuration) + const auto& wpids = cluster.grouping()->wpids(); + std::map> wpid_params; + std::map> wpid_U_dir; + std::map> wpid_V_dir; + std::map> wpid_W_dir; + std::set apas; + Facade::compute_wireplane_params(wpids, dv, wpid_params, wpid_U_dir, wpid_V_dir, wpid_W_dir, apas); + + // Create DynamicPointCloud + auto dpc = std::make_shared(wpid_params); + + // Create DPCPoints using factory function + auto dpc_points = Facade::make_points_direct(&cluster, dv, wpid_params, point_plane_pairs, true); + + // Add points to DynamicPointCloud + dpc->add_points(dpc_points); + + // Remove existing point cloud if it exists + if (segment->dpcloud(cloud_name)) { + segment->dpcloud(cloud_name, nullptr); + } + + // Associate with segment + segment->dpcloud(cloud_name, dpc); + } + + void create_segment_fit_point_cloud(SegmentPtr segment, + const IDetectorVolumes::pointer& dv, + const std::string& cloud_name){ + std::vector fit_points; + + if (!segment || !segment->cluster()) { + raise("create_segment_fit_point_cloud: invalid segment or missing cluster"); + } + + // Extract points from segment fits + const auto& fits = segment->fits(); + fit_points.reserve(fits.size()); + for (const auto& fit : fits) { + if (fit.valid()) { + fit_points.push_back(fit.point); + } + } + create_segment_point_cloud(segment, fit_points, dv, cloud_name); + + } + + + std::pair segment_get_closest_point(SegmentPtr seg, const WireCell::Point& point, const std::string& cloud_name){ + double min_dist = 1e9; + WireCell::Point closest_point(0,0,0); + + if (!seg) { + raise("get_closest_point: invalid segment"); + } + + auto dpc = seg->dpcloud(cloud_name); + if (!dpc) { + raise("get_closest_point: segment missing DynamicPointCloud with name " + cloud_name); + } + + const auto& points = dpc->get_points(); + if (points.empty()) { + raise("get_closest_point: DynamicPointCloud has no points"); + } + + // Use KD-tree to find the closest point + auto& kd_tree = dpc->kd3d(); + auto knn_results = kd_tree.knn(1, point); + + if (!knn_results.empty()) { + size_t closest_index = knn_results[0].first; + min_dist = std::sqrt(knn_results[0].second); // knn returns squared distance + + // Get the actual point from the DynamicPointCloud + const auto& dpc_point = points[closest_index]; + closest_point = WireCell::Point(dpc_point.x, dpc_point.y, dpc_point.z); + } + + return {min_dist, closest_point}; + } + + std::tuple segment_get_closest_2d_distances(SegmentPtr seg, const WireCell::Point& point, int apa, int face, const std::string& cloud_name) { + if (!seg) { + raise("segment_get_closest_2d_distances: invalid segment"); + } + + auto dpc = seg->dpcloud(cloud_name); + if (!dpc) { + raise("segment_get_closest_2d_distances: segment missing DynamicPointCloud with name 'fit'"); + } + + const auto& points = dpc->get_points(); + if (points.empty()) { + raise("segment_get_closest_2d_distances: DynamicPointCloud has no points"); + } + + // Use DynamicPointCloud's optimized method to get 2D distances for each plane + auto closest_2d_u = dpc->get_closest_2d_point_info(point, 0, face, apa); // U plane + auto closest_2d_v = dpc->get_closest_2d_point_info(point, 1, face, apa); // V plane + auto closest_2d_w = dpc->get_closest_2d_point_info(point, 2, face, apa); // W plane + + // Extract distances for each plane (U=0, V=1, W=2) + double min_dist_u = std::get<0>(closest_2d_u); + double min_dist_v = std::get<0>(closest_2d_v); + double min_dist_w = std::get<0>(closest_2d_w); + + return std::make_tuple(min_dist_u, min_dist_v, min_dist_w); + } + + std::tuple segment_search_kink(SegmentPtr seg, WireCell::Point& start_p, const std::string& cloud_name, double dQ_dx_threshold){ + auto tmp_results = segment_get_closest_point(seg, start_p, cloud_name); + WireCell::Point test_p = tmp_results.second; + + WireCell::Vector drift_dir_abs(1,0,0); + + const auto& fits = seg->fits(); + if (fits.empty()) { + WireCell::Point p1 = WireCell::Point(0,0,0); + WireCell::Vector dir(0,0,0); + return std::make_tuple(p1, dir, dir, false); + } + + std::vector refl_angles(fits.size(), 0); + std::vector para_angles(fits.size(), 0); + + // Start the angle search + for (size_t i = 0; i < fits.size(); i++) { + double angle1 = 0; + double angle2 = 0; + + for (int j = 0; j < 6; j++) { + WireCell::Vector v10(0,0,0); + WireCell::Vector v20(0,0,0); + + if (i >= (j+1)*2) { + v10 = fits[i].point - fits[i-(j+1)*2].point; + } else { + v10 = fits[i].point - fits.front().point; + } + + if (i+(j+1)*2 < fits.size()) { + v20 = fits[i+(j+1)*2].point - fits[i].point; + } else { + v20 = fits.back().point - fits[i].point; + } + + if (j == 0) { + double dot_product = v10.dot(v20); + double mag_product = v10.magnitude() * v20.magnitude(); + if (mag_product > 0) { + angle1 = std::acos(std::max(-1.0, std::min(1.0, dot_product / mag_product))) / M_PI * 180.0; + } + + double drift_dot1 = v10.dot(drift_dir_abs); + double drift_dot2 = v20.dot(drift_dir_abs); + double drift_mag1 = v10.magnitude(); + double drift_mag2 = v20.magnitude(); + + double drift_angle1 = 90.0, drift_angle2 = 90.0; + if (drift_mag1 > 0) drift_angle1 = std::acos(std::max(-1.0, std::min(1.0, drift_dot1 / drift_mag1))) / M_PI * 180.0; + if (drift_mag2 > 0) drift_angle2 = std::acos(std::max(-1.0, std::min(1.0, drift_dot2 / drift_mag2))) / M_PI * 180.0; + + angle2 = std::max(std::abs(drift_angle1 - 90.0), std::abs(drift_angle2 - 90.0)); + } else { + if (v10.magnitude() != 0 && v20.magnitude() != 0) { + double dot_product = v10.dot(v20); + double mag_product = v10.magnitude() * v20.magnitude(); + double current_angle1 = std::acos(std::max(-1.0, std::min(1.0, dot_product / mag_product))) / M_PI * 180.0; + angle1 = std::min(current_angle1, angle1); + + double drift_dot1 = v10.dot(drift_dir_abs); + double drift_dot2 = v20.dot(drift_dir_abs); + double drift_mag1 = v10.magnitude(); + double drift_mag2 = v20.magnitude(); + + double drift_angle1 = 90.0, drift_angle2 = 90.0; + if (drift_mag1 > 0) drift_angle1 = std::acos(std::max(-1.0, std::min(1.0, drift_dot1 / drift_mag1))) / M_PI * 180.0; + if (drift_mag2 > 0) drift_angle2 = std::acos(std::max(-1.0, std::min(1.0, drift_dot2 / drift_mag2))) / M_PI * 180.0; + + double current_angle2 = std::max(std::abs(drift_angle1 - 90.0), std::abs(drift_angle2 - 90.0)); + angle2 = std::min(current_angle2, angle2); + } + } + } + + refl_angles[i] = angle1; + para_angles[i] = angle2; + } + + bool flag_check = false; + int save_i = -1; + bool flag_switch = false; + bool flag_search = false; + + for (size_t i = 0; i < fits.size(); i++) { + // Check if close to test point + double dist_to_test = (test_p - fits[i].point).magnitude(); + if (dist_to_test < 0.1 * units::cm) flag_check = true; + + // Check distance constraints + double dist_to_front = (fits[i].point - fits.front().point).magnitude(); + double dist_to_back = (fits[i].point - fits.back().point).magnitude(); + double dist_to_start = (fits[i].point - start_p).magnitude(); + + if (dist_to_front < 1*units::cm || + dist_to_back < 1*units::cm || + dist_to_start < 1*units::cm) continue; + + if (flag_check) { + // Calculate average and max dQ/dx in local region + double ave_dQ_dx = 0; + int ave_count = 0; + double max_dQ_dx = fits[i].dQ / (fits[i].dx + 1e-9); + + for (int j = -2; j <= 2; j++) { + int idx = i + j; + if (idx >= 0 && idx < static_cast(fits.size())) { + double local_dQ_dx = fits[idx].dQ / (fits[idx].dx + 1e-9); + ave_dQ_dx += local_dQ_dx; + ave_count++; + if (local_dQ_dx > max_dQ_dx) max_dQ_dx = local_dQ_dx; + } + } + if (ave_count != 0) ave_dQ_dx /= ave_count; + + // Calculate angle sums + double sum_angles = 0; + double nsum = 0; + double sum_angles1 = 0; + double nsum1 = 0; + + for (int j = -2; j <= 2; j++) { + int idx = i + j; + if (idx >= 0 && idx < static_cast(fits.size())) { + if (para_angles[idx] > 10) { + sum_angles += pow(refl_angles[idx], 2); + nsum++; + } + if (para_angles[idx] > 7.5) { + sum_angles1 += pow(refl_angles[idx], 2); + nsum1++; + } + } + } + if (nsum != 0) sum_angles = sqrt(sum_angles / nsum); + if (nsum1 != 0) sum_angles1 = sqrt(sum_angles1 / nsum1); + + // Apply kink detection criteria + if (para_angles[i] > 10 && refl_angles[i] > 30 && sum_angles > 15) { + save_i = i; + break; + } else if (para_angles[i] > 7.5 && refl_angles[i] > 45 && sum_angles1 > 25) { + save_i = i; + break; + } else if (para_angles[i] > 15 && refl_angles[i] > 27 && sum_angles > 12.5) { + save_i = i; + break; + } else if (para_angles[i] > 15 && refl_angles[i] > 22 && sum_angles > 19 && + max_dQ_dx > dQ_dx_threshold*1.5 && ave_dQ_dx > dQ_dx_threshold) { + save_i = i; + flag_search = true; + break; + } + } + } + + // Return results + if (save_i > 0 && save_i+1 < static_cast(fits.size())) { + WireCell::Point p = fits[save_i].point; + + WireCell::Point prev_p(0,0,0); + WireCell::Point next_p(0,0,0); + int num_p = 0; + int num_p1 = 0; + + double length1 = 0; + double length2 = 0; + WireCell::Point last_p1, last_p2; + + // Calculate direction vectors by averaging nearby points + for (int i = 1; i < 10; i++) { + if (save_i >= i) { + length1 += (fits[save_i-i].point - fits[save_i-i+1].point).magnitude(); + prev_p = prev_p + fits[save_i-i].point; + last_p1 = fits[save_i-i].point; + num_p++; + } + if (save_i+i < static_cast(fits.size())) { + length2 += (fits[save_i+i].point - fits[save_i+i-1].point).magnitude(); + next_p = next_p + fits[save_i+i].point; + last_p2 = fits[save_i+i].point; + num_p1++; + } + } + + double length1_1 = (last_p1 - fits[save_i].point).magnitude(); + double length2_1 = (last_p2 - fits[save_i].point).magnitude(); + + // Check for direction switch + if (std::abs(length2 - length2_1) < 0.03 * length2_1 && length1 * length2_1 > 1.06 * length2 * length1_1) { + flag_switch = true; + flag_search = true; + } else if (std::abs(length1 - length1_1) < 0.03 * length1_1 && length2 * length1_1 > 1.06 * length1 * length2_1) { + flag_search = true; + } + + prev_p = prev_p * (1.0/num_p); + next_p = next_p * (1.0/num_p1); + + WireCell::Vector dir = (p - prev_p).norm(); + WireCell::Vector dir1 = (p - next_p).norm(); + + // Calculate local charge density + double sum_dQ = 0, sum_dx = 0; + for (int i = -2; i <= 2; i++) { + int idx = save_i + i; + if (idx >= 0 && idx < static_cast(fits.size())) { + sum_dQ += fits[idx].dQ; + sum_dx += fits[idx].dx; + } + } + + if (flag_search) { + if (flag_switch) { + return std::make_tuple(p, dir1, dir, true); + } else { + return std::make_tuple(p, dir, dir1, true); + } + } else if (sum_dQ / (sum_dx + 1e-9) > 25000/units::cm) { //not too low ... + if (flag_switch) { + return std::make_tuple(p, dir1, dir, false); + } else { + return std::make_tuple(p, dir, dir1, false); + } + } else { + if (flag_switch) { + return std::make_tuple(p, dir1, dir, true); + } else { + return std::make_tuple(p, dir, dir1, true); + } + } + } else { + WireCell::Point p1 = fits.back().point; + WireCell::Vector dir(0,0,0); + return std::make_tuple(p1, dir, dir, false); + } + } + + + + + bool break_segment(Graph& graph, SegmentPtr seg, Point point, double max_dist/*=1e9*/) + { + /// sanity checks + if (! seg->descriptor_valid()) { + raise("break_segment: segment has invalid descriptor\n"); + } + auto ed = seg->get_descriptor(); + auto vd1 = boost::source(ed, graph); + auto vd2 = boost::target(ed, graph); + auto [_, ingraph] = boost::edge(vd1, vd2, graph); + if (! ingraph) { + raise("break_segment: segment not in graph\n"); + } + + const auto& fits = seg->fits(); + auto itfits = closest_point(fits, point, owp_to_point); + + // reject if test point is at begin or end of fits. + if (itfits == fits.begin() || itfits+1 == fits.end()) { + return false; + } + + const auto& wcpts = seg->wcpts(); + auto itwcpts = closest_point(wcpts, point, owp_to_point); + + // clamp the wcpts to not be first/last + if (itwcpts == wcpts.begin()) { + ++itwcpts; + } + else if (itwcpts+1 == wcpts.end()) { + --itwcpts; + } + + + // update graph + remove_segment(graph, seg); + + auto vtx1 = graph[vd1].vertex; + auto vtx2 = graph[vd2].vertex; + auto vtx = make_vertex(graph); + + // WARNING there is no "direction" in the graph. You can not assume the + // "source()" of a segment is closest to the segments first point. As + // of now, at least... + auto seg1 = make_segment(graph, vtx, vtx1); + auto seg2 = make_segment(graph, vtx, vtx2); + + + // fill in the new objects. All three get the middle thing + + seg1->wcpts(std::vector(wcpts.begin(), itwcpts+1)); + seg2->wcpts(std::vector(itwcpts, wcpts.end())); + vtx->wcpt(*itwcpts); + + seg1->fits(std::vector(fits.begin(), itfits+1)); + seg2->fits(std::vector(itfits, fits.end())); + vtx->fit(*itfits); + + //.... more for segment + // dir_weak + // flags (dir, shower traj, shower topo) + // particle type and mass and score + // points clouds + + return true; + } + + + double segment_track_length(SegmentPtr seg, int flag, int n1, int n2, WireCell::Vector dir_perp) + { + double length = 0; + + if (flag == 1) { + // Sum dx values from fits (equivalent to original flag==1 case) + auto& fits = seg->fits(); + if (n1>=0 && n2 >=0){ + n1 = std::max(0, n1); + n2 = std::min(static_cast(fits.size())-1, n2); + for (int i = n1; i+1 <= n2; ++i) { + auto& fit = fits[i]; + if (fit.valid() && fit.dx > 0) { + length += fit.dx; + } + } + }else{ + for (auto& fit : fits) { + if (fit.valid() && fit.dx > 0) { + length += fit.dx; + } + } + } + } else { + // Calculate geometric length from fits (equivalent to original flag==0 case) + const auto& fits = seg->fits(); + if (fits.size() < 2) { + return 0.0; + } + if (n1 >=0 && n2 >=0){ + n1 = std::max(0, n1); + n2 = std::min(static_cast(fits.size())-1, n2); + for (int i = n1; i + 1 <= n2; i++) { + const Point& p1 = fits[i].point; + const Point& p2 = fits[i + 1].point; + WireCell::Vector segment_vec = p2 - p1; + if (dir_perp.magnitude() > 0) { + double mag_sq = segment_vec.magnitude() * segment_vec.magnitude(); + double dot_sq = std::pow(segment_vec.dot(dir_perp.norm()), 2); + length += std::sqrt(mag_sq - dot_sq); + }else{ + length += segment_vec.magnitude(); + } + } + }else{ + for (size_t i = 0; i + 1 < fits.size(); i++) { + const Point& p1 = fits[i].point; + const Point& p2 = fits[i + 1].point; + WireCell::Vector segment_vec = p2 - p1; + if (dir_perp.magnitude() > 0) { + double mag_sq = segment_vec.magnitude() * segment_vec.magnitude(); + double dot_sq = std::pow(segment_vec.dot(dir_perp.norm()), 2); + length += std::sqrt(mag_sq - dot_sq); + }else{ + length += segment_vec.magnitude(); + } + } + } + } + + return length; + } + + double segment_track_direct_length(SegmentPtr seg, int n1, int n2, WireCell::Vector dir_perp){ + double length = 0; + + const auto& fits = seg->fits(); + if (fits.empty()) { + return 0.0; + } + + if (n1<0 && n2 <0){ + n1 = 0; + n2 = static_cast(fits.size()) - 1; + } + + // Clamp indices to valid range (following WCPPID logic) + if (n1 < 0) n1 = 0; + if (n1 >= static_cast(fits.size())) n1 = static_cast(fits.size()) - 1; + if (n2 < 0) n2 = 0; + if (n2 >= static_cast(fits.size())) n2 = static_cast(fits.size()) - 1; + + const Point& p1 = fits[n1].point; + const Point& p2 = fits[n2].point; + WireCell::Vector temp_dir = p1 - p2; + + if (dir_perp.magnitude() > 0) { + // Calculate length with perpendicular direction subtracted + double mag_sq = temp_dir.magnitude() * temp_dir.magnitude(); + double dot_sq = std::pow(temp_dir.dot(dir_perp.norm()), 2); + length = std::sqrt(mag_sq - dot_sq); + } else { + // Simple direct distance + length = temp_dir.magnitude(); + } + + return length; + } + + double segment_track_max_deviation(SegmentPtr seg, int n1, int n2){ + double max_deviation = 0.0; + + const auto& fits = seg->fits(); + if (fits.empty()) { + return 0.0; + } + + if (n1<0 && n2 <0){ + n1 = 0; + n2 = static_cast(fits.size()) - 1; + } + + // Handle default values and clamp indices (following WCPPID logic) + if (n1 < 0) n1 = 0; + if (n1 >= static_cast(fits.size())) n1 = static_cast(fits.size()) - 1; + if (n2 < 0) n2 = static_cast(fits.size()) - 1; + if (n2 >= static_cast(fits.size())) n2 = static_cast(fits.size()) - 1; + + // Ensure n1 <= n2 + if (n1 > n2) std::swap(n1, n2); + + if (n1 != n2) { + const Point& p1 = fits[n1].point; + const Point& p2 = fits[n2].point; + WireCell::Vector line_vec = p2 - p1; + double line_length_sq = line_vec.magnitude2(); + + for (int i = n1; i <= n2; i++) { + const Point& test_point = fits[i].point; + + if (line_length_sq > 0) { + // Calculate distance from point to line using projection + WireCell::Vector point_vec = test_point - p1; + double projection = point_vec.dot(line_vec) / line_length_sq; + + // Clamp projection to line segment bounds + projection = std::max(0.0, std::min(1.0, projection)); + + // Find closest point on line segment + Point closest_on_line = p1 + line_vec * projection; + + // Calculate distance + double distance = (test_point - closest_on_line).magnitude(); + + if (distance > max_deviation) { + max_deviation = distance; + } + } else { + // Line has zero length, distance is just point-to-point distance + double distance = (test_point - p1).magnitude(); + if (distance > max_deviation) { + max_deviation = distance; + } + } + } + } + + return max_deviation; + } + + + + + double segment_median_dQ_dx(SegmentPtr seg) + { + auto& fits = seg->fits(); + if (fits.empty()) { + return 0.0; + } + + std::vector vec_dQ_dx; + vec_dQ_dx.reserve(fits.size()); + + for (auto& fit : fits) { + if (fit.valid() && fit.dx > 0 && fit.dQ >= 0) { + // Add small epsilon to avoid division by zero (same as original) + vec_dQ_dx.push_back(fit.dQ / (fit.dx + 1e-9)); + } + } + + if (vec_dQ_dx.empty()) { + return 0.0; + } + + // Use nth_element to find median (same algorithm as original) + size_t median_index = vec_dQ_dx.size() / 2; + std::nth_element(vec_dQ_dx.begin(), + vec_dQ_dx.begin() + median_index, + vec_dQ_dx.end()); + + return vec_dQ_dx[median_index]; + } + + double segment_rms_dQ_dx(SegmentPtr seg) + { + auto& fits = seg->fits(); + if (fits.empty()) { + return 0.0; + } + + std::vector vec_dQ_dx; + vec_dQ_dx.reserve(fits.size()); + + for (auto& fit : fits) { + if (fit.valid() && fit.dx > 0 && fit.dQ >= 0) { + // Add small epsilon to avoid division by zero (same as original) + vec_dQ_dx.push_back(fit.dQ / (fit.dx + 1e-9)); + } + } + + if (vec_dQ_dx.empty()) { + return 0.0; + } + + // Calculate mean + double sum = std::accumulate(vec_dQ_dx.begin(), vec_dQ_dx.end(), 0.0); + double mean = sum / vec_dQ_dx.size(); + + // Calculate variance + double sq_sum = std::inner_product(vec_dQ_dx.begin(), vec_dQ_dx.end(), vec_dQ_dx.begin(), 0.0); + double variance = sq_sum / vec_dQ_dx.size() - mean * mean; + + return std::sqrt(variance); + } + + + double segment_track_length_threshold(SegmentPtr seg, double threshold) + { + auto& fits = seg->fits(); + if (fits.empty()) { + return 0.0; + } + + double length = 0; + for (auto& fit : fits) { + if (fit.valid() && fit.dx > 0 ) { + double dQ_dx = fit.dQ / (fit.dx + 1e-9); // Add epsilon to avoid division by zero + if (dQ_dx > threshold || threshold == 0) { + length += fit.dx; + } + } + } + + return length; + } + + + + + + + double segment_geometric_length(SegmentPtr seg, int n1, int n2, WireCell::Vector dir_perp) + { + return segment_track_length(seg, 0, n1, n2, dir_perp); // Always use geometric calculation + } + + + bool eval_ks_ratio(double ks1, double ks2, double ratio1, double ratio2){ + // std::cout << ks1 << " " << ks2 << " " << ratio1 << " " << ratio2 << " " << sqrt(pow(ks2/0.06,2)+pow((ratio2-1)/0.06,2)) << " " << ks1-ks2 + (fabs(ratio1-1)-fabs(ratio2-1))/1.5*0.3 << " " << ks1-ks2 + (fabs(ratio1-1)-fabs(ratio2-1))/1.5*0.3 << " " << std::endl; + if (ks1-ks2 >= 0.0) return false; + if (sqrt(pow(ks2/0.06,2)+pow((ratio2-1)/0.06,2))< 1.4 && ks1-ks2 + (fabs(ratio1-1)-fabs(ratio2-1))/1.5*0.3 > -0.02) return false; + + if (ks1 - ks2 < -0.02 && ((ks2 > 0.09 && fabs(ratio2-1) >0.1) || ratio2 > 1.5 || ks2 > 0.2)) return true; + if ( ks1-ks2 + (fabs(ratio1-1)-fabs(ratio2-1))/1.5*0.3 < 0) return true; + + return false; + } + + bool segment_is_shower_trajectory(SegmentPtr seg, double step_size, double mip_dQ_dx){ + bool flag_shower_trajectory = false; + double length = segment_track_length(seg, 0); + + // Too long + if (length > 50 * units::cm) return flag_shower_trajectory; + + const auto& fits = seg->fits(); + if (fits.empty()) return flag_shower_trajectory; + + int ncount = std::round(length / step_size); + if (ncount == 0) ncount = 1; + + std::vector> sections(ncount); + for (int i = 0; i < ncount; i++) { + sections[i] = std::make_pair( + std::round(fits.size() / ncount * i), + std::round(fits.size() / ncount * (i + 1)) + ); + } + sections.back().second = fits.size() - 1; + + int n_shower_like = 0; + WireCell::Vector drift_dir(1, 0, 0); + + for (size_t j = 0; j < ncount; j++) { + int first_idx = sections[j].first; + int second_idx = sections[j].second; + + if (first_idx >= static_cast(fits.size())) first_idx = fits.size() - 1; + if (second_idx >= static_cast(fits.size())) second_idx = fits.size() - 1; + + WireCell::Vector dir_1 = fits[first_idx].point - fits[second_idx].point; + if (dir_1.magnitude() > 0) { + dir_1 = dir_1.norm(); + } + + double tmp_dQ_dx = segment_median_dQ_dx(seg) / (mip_dQ_dx); + + // Calculate angle difference + double dot_product = drift_dir.dot(dir_1); + double angle_rad = std::acos(std::max(-1.0, std::min(1.0, dot_product))); + double angle_diff = std::abs(angle_rad / M_PI * 180.0 - 90.0); + + if (angle_diff > 10) { // Not parallel case + double direct_length = segment_track_direct_length(seg, first_idx, second_idx, WireCell::Vector(0,0,0)); + double integrated_length = segment_track_length(seg, 0, first_idx, second_idx, WireCell::Vector(0,0,0)); + double max_dev = segment_track_max_deviation(seg, first_idx, second_idx); + + double length_ratio; + if (direct_length == 0) length_ratio = 1; + else length_ratio = direct_length / integrated_length; + + if (tmp_dQ_dx * 0.11 + 2 * length_ratio < 2.03 && + tmp_dQ_dx < 2 && + length_ratio < 0.95 && + (angle_diff < 60 || integrated_length < 10 * units::cm || + (integrated_length >= 10 * units::cm && max_dev > 0.75 * units::cm))) { + n_shower_like++; + } + } else { // Parallel case + WireCell::Vector dir_2 = drift_dir.cross(dir_1); + if (dir_2.magnitude() > 0) { + dir_2 = dir_2.norm(); + } + + double direct_length = segment_track_direct_length(seg, first_idx, second_idx, dir_2); + double integrated_length = segment_track_length(seg, 0, first_idx, second_idx, dir_2); + double max_dev = segment_track_max_deviation(seg, first_idx, second_idx); + + double length_ratio; + if (direct_length == 0) length_ratio = 1; + else length_ratio = direct_length / integrated_length; + + if (tmp_dQ_dx * 0.11 + 2 * length_ratio < 2.06 && + tmp_dQ_dx < 2 && + length_ratio < 0.97 && + (integrated_length < 10 * units::cm || + (integrated_length >= 10 * units::cm && max_dev > 0.75 * units::cm))) { + n_shower_like++; + } + } + } + + if (n_shower_like >= 0.5 * sections.size()) { + flag_shower_trajectory = true; + } + + // Set the flag on the segment if it's identified as shower trajectory + if (flag_shower_trajectory) { + seg->set_flags(SegmentFlags::kShowerTrajectory); + } + + return flag_shower_trajectory; + } + + WireCell::Vector segment_cal_dir_3vector(SegmentPtr seg){ + const auto& fits = seg->fits(); + if (fits.size() < 2) { + return WireCell::Vector(0, 0, 0); + } + + WireCell::Point p(0, 0, 0); + int flag_dir = seg->dirsign(); + + if (flag_dir == 1) { + // Forward direction: from first point using next few points + for (size_t i = 1; i < 5 && i < fits.size(); i++) { + p = p + (fits[i].point - fits[0].point); + } + } else if (flag_dir == -1) { + // Backward direction: from last point using previous few points + for (size_t i = 1; i < 5 && (fits.size() - i - 1) < fits.size(); i++) { + if (fits.size() - i - 1 < fits.size()) { + p = p + (fits[fits.size() - i - 1].point - fits.back().point); + } + } + } else { + // Default case (flag_dir == 0): use forward direction + for (size_t i = 1; i < 5 && i < fits.size(); i++) { + p = p + (fits[i].point - fits[0].point); + } + } + + WireCell::Vector v1(p.x(), p.y(), p.z()); + if (v1.magnitude() > 0) { + v1 = v1.norm(); + } + return v1; + } + + WireCell::Vector segment_cal_dir_3vector(SegmentPtr seg, WireCell::Point& p, double dis_cut){ + const auto& fits = seg->fits(); + if (fits.empty()) { + return WireCell::Vector(0, 0, 0); + } + + WireCell::Point p1(0, 0, 0); + int ncount = 0; + + for (size_t i = 0; i < fits.size(); i++) { + double dis = (fits[i].point - p).magnitude(); + if (dis < dis_cut) { + p1 = p1 + fits[i].point; + ncount++; + } + } + + if (ncount == 0) { + return WireCell::Vector(0, 0, 0); + } + + WireCell::Point avg_point = p1 * (1.0 / ncount); + WireCell::Vector v1 = avg_point - p; + if (v1.magnitude() > 0) { + v1 = v1.norm(); + } + return v1; + } + + WireCell::Vector segment_cal_dir_3vector(SegmentPtr seg, int direction, int num_points, int start){ + const auto& fits = seg->fits(); + if (fits.empty() || start >= static_cast(fits.size()) || start <= 0) { + std::cout << "bad start point in segment_cal_dir_3vector" << std::endl; + return WireCell::Vector(0, 0, 0); + } + + WireCell::Point p(0, 0, 0); + + if (direction == 1) { + // Forward direction + for (int i = start; i < start + num_points - 1 && i < static_cast(fits.size()); i++) { + p = p + (fits[i].point - fits[start - 1].point); + } + } else if (direction == -1) { + // Backward direction + for (int i = start; i < start + num_points - 1 && (fits.size() - i - 1) < fits.size(); i++) { + if (fits.size() - start < fits.size()) { + p = p + (fits[fits.size() - i - 1].point - fits[fits.size() - start].point); + } + } + } + + WireCell::Vector v1(p.x(), p.y(), p.z()); + if (v1.magnitude() > 0) { + v1 = v1.norm(); + } + return v1; + } + + double segment_cal_kine_dQdx(SegmentPtr seg, const IRecombinationModel::pointer& recomb_model){ + if (!seg || !recomb_model) { + return 0.0; + } + + auto& fits = seg->fits(); + if (fits.empty()) { + return 0.0; + } + + double kine_energy = 0.0; + + + for (size_t i = 0; i < fits.size(); i++) { + if (!fits[i].valid() || fits[i].dx <= 0) continue; + + double dX = fits[i].dx; + double dQ = fits[i].dQ; + if (i == 0 && fits.size() > 1) { + // First point: check against distance to next point + double dis = (fits[1].point - fits[0].point).magnitude(); + if (dX> dis * 1.5) { + dX = dis; + } + } else if (i + 1 == fits.size() && fits.size() > 1) { + // Last point: check against distance to previous point + double dis = (fits[i].point - fits[i-1].point).magnitude(); + if (dX > dis * 1.5) { + dX = dis; + } + } + // std::cout << i << " " << fits[i].dQ << " " << fits[i].dx/units::cm << " " << dX/units::cm << std::endl; + // Filter out unreasonable values (same threshold as original) + if (dQ/dX / (43e3/units::cm) > 1000) dQ = 0; + + // Calculate dE/dx using Box model inverse formula from original code + double dE = recomb_model->dE(dQ, dX); + + // std::cout << dQ << " " << dX << " " << dE << std::endl; + + // Apply bounds (same as original) + if (dE < 0) dE = 0; + if (dE > 50 * units::MeV / units::cm * dX) dE = 50 * units::MeV / units::cm * dX; + + // Calculate path length with special handling for first and last points + kine_energy += dE; + } + + return kine_energy; + } + + double cal_kine_dQdx(std::vector& vec_dQ, std::vector& vec_dx, const IRecombinationModel::pointer& recomb_model){ + if (vec_dQ.size() != vec_dx.size() || vec_dQ.empty() || !recomb_model) { + return 0.0; + } + + double kine_energy = 0.0; + + for (size_t i = 0; i < vec_dQ.size(); i++) { + // Calculate dQ/dx with units conversion (same as original) + double dQ = vec_dQ[i]; + double dx = vec_dx[i]; + + // Filter out unreasonable values (same threshold as original) + if (dQ/dx / (43e3/units::cm) > 1000) dQ = 0; + + // Calculate dE/dx using Box model inverse formula from original code + double dE = recomb_model->dE(dQ, dx); + + // Apply bounds (same as original) + if (dE < 0) dE = 0; + if (dE > 50 * units::MeV / units::cm * dx) dE = 50 * units::MeV / units::cm * dx; + + // Calculate path length with special handling for first and last points + kine_energy += dE; + } + + return kine_energy; + } + + std::vector do_track_comp(std::vector& L , std::vector& dQ_dx, double compare_range, double offset_length, const Clus::ParticleDataSet::pointer& particle_data, double MIP_dQdx){ + + double end_L = L.back() + 0.15*units::cm - offset_length; + + int ncount = 0; + std::vector vec_x; + std::vector vec_y; + + for (size_t i = 0; i != L.size(); i++) { + if (end_L - L.at(i) < compare_range && end_L - L.at(i) > 0) { // check up to compared range + vec_x.push_back(end_L - L.at(i)); + vec_y.push_back(dQ_dx.at(i)); + ncount++; + } + } + + // Create reference vectors for different particles + std::vector muon_ref(ncount); + std::vector const_ref(ncount, MIP_dQdx); // MIP-like constant + std::vector proton_ref(ncount); + std::vector electron_ref(ncount); + + for (size_t i = 0; i != ncount; i++) { + muon_ref[i] = particle_data->get_dEdx_function("muon")->scalar_function((vec_x[i])/units::cm) /units::cm; + proton_ref[i] = particle_data->get_dEdx_function("proton")->scalar_function((vec_x[i])/units::cm)/ units::cm; + electron_ref[i] = particle_data->get_dEdx_function("electron")->scalar_function((vec_x[i])/units::cm)/ units::cm; + } + + // Perform KS-like tests using kslike_compare + double ks1 = WireCell::kslike_compare(vec_y, muon_ref); + double ratio1 = std::accumulate(muon_ref.begin(), muon_ref.end(), 0.0) / + (std::accumulate(vec_y.begin(), vec_y.end(), 0.0) + 1e-9); + + double ks2 = WireCell::kslike_compare(vec_y, const_ref); + double ratio2 = std::accumulate(const_ref.begin(), const_ref.end(), 0.0) / + (std::accumulate(vec_y.begin(), vec_y.end(), 0.0) + 1e-9); + + double ks3 = WireCell::kslike_compare(vec_y, proton_ref); + double ratio3 = std::accumulate(proton_ref.begin(), proton_ref.end(), 0.0) / + (std::accumulate(vec_y.begin(), vec_y.end(), 0.0) + 1e-9); + + double ks4 = WireCell::kslike_compare(vec_y, electron_ref); + double ratio4 = std::accumulate(electron_ref.begin(), electron_ref.end(), 0.0) / + (std::accumulate(vec_y.begin(), vec_y.end(), 0.0) + 1e-9); + + std::vector results; + // Convert bool result to double (1.0 for true, 0.0 for false) + results.push_back(eval_ks_ratio(ks1, ks2, ratio1, ratio2) ? 1.0 : 0.0); // direction metric + results.push_back(sqrt(pow(ks1, 2) + pow(ratio1-1, 2))); // muon information + results.push_back(sqrt(pow(ks3, 2) + pow(ratio3-1, 2))); // proton information + results.push_back(sqrt(pow(ks4, 2) + pow(ratio4-1, 2))); // electron information + + return results; + } + + double cal_kine_range(double L, int pdg_code, const Clus::ParticleDataSet::pointer& particle_data){ + + IScalarFunction::pointer range_function = nullptr; + + if (abs(pdg_code) == 11) { // electron + range_function = particle_data->get_range_function("electron"); + } + else if (abs(pdg_code) == 13) { // muon + range_function = particle_data->get_range_function("muon"); + } + else if (abs(pdg_code) == 211) { // pion + range_function = particle_data->get_range_function("pion"); + } + else if (abs(pdg_code) == 321) { // kaon + range_function = particle_data->get_range_function("kaon"); + } + else if (abs(pdg_code) == 2212) { // proton + range_function = particle_data->get_range_function("proton"); + } + + if (!range_function) { + // Default to muon if particle type not recognized + range_function = particle_data->get_range_function("muon"); + } + + double kine_energy = range_function->scalar_function(L/units::cm) * units::MeV; + return kine_energy; + } + + // success, flag_dir, particle_type, particle_score + std::tuple segment_do_track_pid(SegmentPtr segment, std::vector& L , std::vector& dQ_dx, double compare_range , double offset_length, bool flag_force, const Clus::ParticleDataSet::pointer& particle_data, double MIP_dQdx){ + + if (L.size() != dQ_dx.size() || L.empty() || !segment) { + return std::make_tuple(false, 0, 0, 0.0); + } + + std::vector rL(L.size(), 0); + std::vector rdQ_dx(L.size(), 0); + + // Get reverse vectors + for (size_t i = 0; i != L.size(); i++) { + rL.at(i) = L.back() - L.at(L.size() - 1 - i); + rdQ_dx.at(i) = dQ_dx.at(L.size() - 1 - i); + } + + std::vector result_forward = do_track_comp(L, dQ_dx, compare_range, offset_length, particle_data, MIP_dQdx); + std::vector result_backward = do_track_comp(rL, rdQ_dx, compare_range, offset_length, particle_data, MIP_dQdx); + + // Direction determination + bool flag_forward = static_cast(std::round(result_forward.at(0))); + bool flag_backward = static_cast(std::round(result_backward.at(0))); + + // Calculate length from path (total walk length over fits or wcpts) + double length = segment_track_length(segment, 0); + + + // // Calculate straight-line distance between endpoints (length1 equivalent) + // double length1 = 0.0; + // auto& fits = segment->fits(); + // length1 = (fits.front().point - fits.back().point).magnitude(); + + // Forward particle type determination + int forward_particle_type = 13; // default muon + double min_forward_val = result_forward.at(1); + if (result_forward.at(2) < min_forward_val) { + min_forward_val = result_forward.at(2); + forward_particle_type = 2212; // proton + } + if (result_forward.at(3) < min_forward_val && length < 20*units::cm) { + min_forward_val = result_forward.at(3); + forward_particle_type = 11; // electron + } + + // Backward particle type determination + int backward_particle_type = 13; // default muon + double min_backward_val = result_backward.at(1); + if (result_backward.at(2) < min_backward_val) { + min_backward_val = result_backward.at(2); + backward_particle_type = 2212; // proton + } + if (result_backward.at(3) < min_backward_val && length < 20*units::cm) { + min_backward_val = result_backward.at(3); + backward_particle_type = 11; // electron + } + + // Decision logic + int flag_dir = 0; + int particle_type = 0; + double particle_score = 0.0; + + if (flag_forward == 1 && flag_backward == 0) { + flag_dir = 1; + particle_type = forward_particle_type; + particle_score = min_forward_val; + return std::make_tuple(true, flag_dir, particle_type, particle_score); + } + else if (flag_forward == 0 && flag_backward == 1) { + flag_dir = -1; + particle_type = backward_particle_type; + particle_score = min_backward_val; + return std::make_tuple(true, flag_dir, particle_type, particle_score); + } + else if (flag_forward == 1 && flag_backward == 1) { + if (min_forward_val < min_backward_val) { + flag_dir = 1; + particle_type = forward_particle_type; + particle_score = min_forward_val; + } + else { + flag_dir = -1; + particle_type = backward_particle_type; + particle_score = min_backward_val; + } + return std::make_tuple(true, flag_dir, particle_type, particle_score); + } + else if (flag_forward == 0 && flag_backward == 0 && flag_force) { + if (min_forward_val < min_backward_val) { + particle_score = min_forward_val; + particle_type = forward_particle_type; + flag_dir = 1; + } + else { + particle_score = min_backward_val; + particle_type = backward_particle_type; + flag_dir = -1; + } + return std::make_tuple(true, flag_dir, particle_type, particle_score); + } + + // Reset before return - failure case + return std::make_tuple(false, 0, 0, 0.0); + } + + // 4-momentum: E, px, py, pz, + WireCell::D4Vector segment_cal_4mom(SegmentPtr segment, int pdg_code, const Clus::ParticleDataSet::pointer& particle_data, const IRecombinationModel::pointer& recomb_model, double MIP_dQdx){ + double length = segment_track_length(segment, 0); + double kine_energy = 0; + + WireCell::D4Vector results(0.0, 0.0, 0.0, 0.0); // 4-momentum: E, px, py, pz + + if (length < 4*units::cm){ + kine_energy = segment_cal_kine_dQdx(segment, recomb_model); // short track + }else if (segment->flags_any(PR::SegmentFlags::kShowerTrajectory)){ + kine_energy = segment_cal_kine_dQdx(segment, recomb_model); + }else{ + kine_energy = cal_kine_range(length, pdg_code, particle_data); + } + // results[4] = kine_energy; + + double particle_mass = particle_data->get_particle_mass(pdg_code); + + results[0]= kine_energy + particle_mass; + double mom = sqrt(pow(results[3],2) - pow(particle_mass,2)); + auto v1 = segment_cal_dir_3vector(segment); + results[1] = mom * v1.x(); + results[2] = mom * v1.y(); + results[3] = mom * v1.z(); + + return results; + } + + void segment_determine_dir_track(SegmentPtr segment, int start_n, int end_n, const Clus::ParticleDataSet::pointer& particle_data, const IRecombinationModel::pointer& recomb_model, double MIP_dQdx, bool flag_print) { + if (!segment || !particle_data) { + return; + } + + // Reset direction flag + segment->dirsign(0); + + const auto& fits = segment->fits(); + int npoints = fits.size(); + int start_n1 = 0, end_n1 = npoints - 1; + + // If more than one point, exclude the vertex + if (end_n != 1) { + end_n1 = npoints - 2; + npoints -= 1; + } + if (start_n != 1) { + npoints -= 1; + start_n1 = 1; + } + + if (npoints == 0 || end_n1 < start_n1) return; + + std::vector L(npoints, 0); + std::vector dQ_dx(npoints, 0); + + double dis = 0; + for (int i = start_n1; i <= end_n1; i++) { + L.at(i - start_n1) = dis; + if (fits[i].dx > 0) { + dQ_dx.at(i - start_n1) = fits[i].dQ / (fits[i].dx + 1e-9); + } + if (i + 1 < static_cast(fits.size())) { + dis += (fits[i+1].point - fits[i].point).magnitude(); + } + } + + int pdg_code = 0; + double particle_score = 0.0; + + if (npoints >= 2) { // reasonably long + bool tmp_flag_pid = false; + + if (start_n == 1 && end_n == 1 && npoints >= 15) { + // Can use the dQ/dx to do PID and direction + auto result = segment_do_track_pid(segment, L, dQ_dx, 35*units::cm, 1*units::cm, true, particle_data); + tmp_flag_pid = std::get<0>(result); + if (tmp_flag_pid) { + segment->dirsign(std::get<1>(result)); + pdg_code = std::get<2>(result); + particle_score = std::get<3>(result); + } + + if (!tmp_flag_pid) { + result = segment_do_track_pid(segment, L, dQ_dx, 15*units::cm, 1*units::cm, true, particle_data); + tmp_flag_pid = std::get<0>(result); + if (tmp_flag_pid) { + segment->dirsign(std::get<1>(result)); + pdg_code = std::get<2>(result); + particle_score = std::get<3>(result); + } + } + } else { + // Can use the dQ/dx to do PID and direction + auto result = segment_do_track_pid(segment, L, dQ_dx, 35*units::cm, 0*units::cm, false, particle_data); + tmp_flag_pid = std::get<0>(result); + if (tmp_flag_pid) { + segment->dirsign(std::get<1>(result)); + pdg_code = std::get<2>(result); + particle_score = std::get<3>(result); + } + + if (!tmp_flag_pid) { + result = segment_do_track_pid(segment, L, dQ_dx, 15*units::cm, 0*units::cm, false, particle_data); + tmp_flag_pid = std::get<0>(result); + if (tmp_flag_pid) { + segment->dirsign(std::get<1>(result)); + pdg_code = std::get<2>(result); + particle_score = std::get<3>(result); + } + } + } + } + + double length = segment_track_length(segment, 0); + + // Short track what to do??? + if (pdg_code == 0) { + // Calculate median dQ/dx + double medium_dQ_dx = segment_median_dQ_dx(segment); + if (medium_dQ_dx > MIP_dQdx * 1.75) { + pdg_code = 2212; // proton + } else if (medium_dQ_dx < MIP_dQdx * 1.2) { + pdg_code = 13; // muon + } else if (medium_dQ_dx < MIP_dQdx * 1.5 && length < 4*units::cm) { + pdg_code = 13; + } + } + + // Electron and both end contain stuff + if (abs(pdg_code) == 11 && (start_n > 1 && end_n > 1)) { + segment->dir_weak(true); + segment->dirsign(0); + if (particle_score < 0.15) pdg_code = 13; + } else if (abs(pdg_code) == 11 && ((start_n > 1 && segment->dirsign() == -1) || (end_n > 1 && segment->dirsign() == 1))) { + segment->dir_weak(true); + segment->dirsign(0); + if (particle_score < 0.15) pdg_code = 13; + } else if (length < 1.5*units::cm) { + segment->dir_weak(true); + } + + // Vertex activities + if (length < 1.5*units::cm && (start_n == 1 || end_n == 1)) { + if (start_n == 1 && end_n > 2) { + segment->dirsign(-1); + double medium_dQ_dx = segment_median_dQ_dx(segment); + if (medium_dQ_dx > MIP_dQdx * 1.75) { + pdg_code = 2212; + } else if (medium_dQ_dx < MIP_dQdx * 1.2) { + pdg_code = 211; + } + } else if (end_n == 1 && start_n > 2) { + segment->dirsign(1); + double medium_dQ_dx = segment_median_dQ_dx(segment); + if (medium_dQ_dx > MIP_dQdx * 1.75) { + pdg_code = 2212; + } else if (medium_dQ_dx < MIP_dQdx * 1.2) { + pdg_code = 211; + } + } + } + + // If the particle score is really bad, make it a shower + if (length > 10*units::cm && particle_score > 1.0 && particle_score < 100) { + pdg_code = 11; + particle_score = 200; + segment->dirsign(0); + } + + // Set particle mass and calculate 4-momentum + if (pdg_code != 0) { + // Calculate 4-momentum using the identified particle type + auto four_momentum = segment_cal_4mom(segment, pdg_code, particle_data, recomb_model); + + // Create ParticleInfo with the identified particle + auto pinfo = std::make_shared( + pdg_code, // PDG code + particle_data->get_particle_mass(pdg_code), // mass + particle_data->pdg_to_name(pdg_code), // name + four_momentum // 4-momentum + ); + + // Set additional properties if available + pinfo->set_particle_score(particle_score); // This method would need to be added + + // Store particle info in segment (this would require adding particle_info to Segment class) + segment->particle_info(pinfo); + } + + if (flag_print && pdg_code != 0) { + std::cout << "Segment PID: PDG=" << pdg_code + << ", Score=" << particle_score + << ", Length=" << length / units::cm << " cm" + << ", Direction=" << segment->dirsign() + << (segment->dir_weak() ? " (weak)" : "") + << ", Medium dQ/dx=" << segment_median_dQ_dx(segment) / (MIP_dQdx) + << " MIP" + << std::endl; + } + } + + void segment_determine_shower_direction_trajectory(SegmentPtr segment, int start_n, int end_n, const Clus::ParticleDataSet::pointer& particle_data, const IRecombinationModel::pointer& recomb_model, double MIP_dQdx, bool flag_print){ + segment->dirsign(0); + double length = segment_track_length(segment, 0); + + // hack for now ... + int pdg_code = 11; + + if (start_n==1 && end_n >1){ + segment->dirsign(-1); + }else if (start_n > 1 && end_n == 1){ + segment->dirsign(1); + }else{ + segment_determine_dir_track(segment, start_n, end_n, particle_data, recomb_model, MIP_dQdx, false); + if (segment->particle_info()->pdg() != 11){ + segment->dirsign(0); + } + } + + auto four_momentum = segment_cal_4mom(segment, pdg_code, particle_data, recomb_model); + + // Create ParticleInfo with the identified particle + auto pinfo = std::make_shared( + pdg_code, // PDG code + particle_data->get_particle_mass(pdg_code), // mass + particle_data->pdg_to_name(pdg_code), // name + four_momentum // 4-momentum + ); + + // Store particle info in segment (this would require adding particle_info to Segment class) + segment->particle_info(pinfo); + + if (flag_print ) { + std::cout << "Segment PID: PDG=" << pdg_code + << ", Length=" << length / units::cm << " cm" + << ", Direction=" << segment->dirsign() + << (segment->dir_weak() ? " (weak)" : "") + << ", Medium dQ/dx=" << segment_median_dQ_dx(segment) / (MIP_dQdx) + << " MIP" + << std::endl; + } + + + } + + void clustering_points_segments(std::set segments, const IDetectorVolumes::pointer& dv, const std::string& cloud_name, double search_range, double scaling_2d){ + std::map > map_cluster_segs; + for (auto seg : segments){ + if (seg->cluster()){ + map_cluster_segs[seg->cluster()].insert(seg); + } + } + + for (auto it : map_cluster_segs){ + auto clus = it.first; + auto& segs = it.second; + + // get the default point cloud from cluster + const auto& points = clus->points(); + + // Get the graph directly + const auto& graph = clus->find_graph("basic_pid"); + + std::map> map_segment_points; + std::map> map_pindex_segment; + //std::cout << "Cluster has " << npoints << " points and " << segs.size() << " segments." << std::endl; + //std::cout << "Number of vertices in the graph: " << boost::num_vertices(graph) << std::endl; + + // core algorithms + + // define steiner terminal for segments ... + for (auto seg: segs){ + auto& fits = seg->fits(); + if (fits.size() > 2){ + for (size_t i = 1; i+1 < fits.size(); i++){ + geo_point_t gp = {fits[i].point.x(), fits[i].point.y(), fits[i].point.z()}; + // use cluster to get the indices of the closest 5 points + auto closest_results = clus->kd_knn(5, gp); + for (const auto& [point_index, distance] : closest_results) { + if (map_pindex_segment.find(point_index) == map_pindex_segment.end()) { + map_pindex_segment[point_index] = std::make_pair(seg, distance); + break; + } + } + } + }else{ + geo_point_t gp = {(fits[0].point.x()+fits[1].point.x())/2., (fits[0].point.y()+fits[1].point.y())/2., (fits[0].point.z()+fits[1].point.z())/2.}; + auto closest_results = clus->kd_knn(5, gp); + for (const auto& [point_index, distance] : closest_results) { + if (map_pindex_segment.find(point_index) == map_pindex_segment.end()) { + map_pindex_segment[point_index] = std::make_pair(seg, distance); + break; + } + } + } + } + + + // these are terminals ... + if (map_pindex_segment.size()>0){ + + // Convert terminals from int to vertex_type + std::vector terminals; + for (auto it = map_pindex_segment.begin(); it!=map_pindex_segment.end(); it++){ + terminals.push_back(static_cast(it->first)); + } + + auto vor = WireCell::Clus::Graphs::Weighted::voronoi(graph, terminals); + + // Now we can find the nearest terminal for every vertex in the graph + // The Voronoi diagram provides: + // - vor.terminal[v]: the nearest terminal vertex for vertex v + // - vor.distance[v]: the distance to the nearest terminal for vertex v + std::map> vertex_to_nearest_terminal; + // Iterate through all vertices in the graph + const int num_graph_vertices = boost::num_vertices(graph); + for (int vertex_idx = 0; vertex_idx < num_graph_vertices; ++vertex_idx) { + // Get the nearest terminal for this vertex + int nearest_terminal_idx = vor.terminal[vertex_idx]; + double distance_to_terminal = vor.distance[vertex_idx]; + // Store the mapping + vertex_to_nearest_terminal[vertex_idx] = std::make_pair(nearest_terminal_idx, distance_to_terminal); + } + // std::cout << "Debug: Number of graph vertices: " << num_graph_vertices << std::endl; + // now examine to remove ghost points .... + for (size_t i=0;i!=num_graph_vertices;i++){ + if (map_pindex_segment.find(vertex_to_nearest_terminal.at(i).first) == map_pindex_segment.end()) continue; + geo_point_t gp(points[0][i], points[1][i], points[2][i]); + auto main_sg = map_pindex_segment[vertex_to_nearest_terminal.at(i).first].first; + + auto point_wpid = clus->wire_plane_id(i); + auto apa = point_wpid.apa(); + auto face = point_wpid.face(); + + // use the dynamic point cloud of fit, and then derive distances ... + // Get 3D closest point using the "fit" point cloud + std::pair closest_dis_point = segment_get_closest_point(main_sg, gp, "fit"); + + // Calculate 2D distances for each wire plane (U, V, W) using APA/face information + std::tuple closest_2d_dis = segment_get_closest_2d_distances(main_sg, gp, apa, face, "fit"); + + std::tuple min_2d_dis = closest_2d_dis; + + // check against main_sg; + bool flag_change = true; + + // Compare against all segments in the cluster to find minimum 2D distances + for (auto seg : segs) { + if (main_sg == seg) continue; + + // Get 2D distances for this segment + std::tuple temp_2d_dis = segment_get_closest_2d_distances(seg, gp, apa, face, "fit"); + // Update minimum distances for each plane + if (std::get<0>(temp_2d_dis) < std::get<0>(min_2d_dis)) std::get<0>(min_2d_dis) = std::get<0>(temp_2d_dis); + if (std::get<1>(temp_2d_dis) < std::get<1>(min_2d_dis)) std::get<1>(min_2d_dis) = std::get<1>(temp_2d_dis); + if (std::get<2>(temp_2d_dis) < std::get<2>(min_2d_dis)) std::get<2>(min_2d_dis) = std::get<2>(temp_2d_dis); + } + + if (std::get<0>(min_2d_dis) == std::get<0>(closest_2d_dis) && std::get<1>(min_2d_dis) == std::get<1>(closest_2d_dis) && std::get<2>(min_2d_dis) == std::get<2>(closest_2d_dis)) // all closest + flag_change = false; + else if (std::get<0>(min_2d_dis) == std::get<0>(closest_2d_dis) && std::get<1>(min_2d_dis) == std::get<1>(closest_2d_dis) ) //&& (std::get<2>(closest_2d_dis) < scaling_2d * search_range || closest_dis_point.first < search_range)) // 2 closest + flag_change = false; + else if (std::get<0>(min_2d_dis) == std::get<0>(closest_2d_dis) && std::get<2>(min_2d_dis) == std::get<2>(closest_2d_dis) ) //&& (std::get<1>(closest_2d_dis) < scaling_2d * search_range || closest_dis_point.first < search_range)) + flag_change = false; + else if (std::get<1>(min_2d_dis) == std::get<1>(closest_2d_dis) && std::get<2>(min_2d_dis) == std::get<2>(closest_2d_dis) ) //&& (std::get<0>(closest_2d_dis) < scaling_2d * search_range || closest_dis_point.first < search_range)) + flag_change = false; + else if (std::get<0>(min_2d_dis) == std::get<0>(closest_2d_dis) && (closest_dis_point.first < search_range || (std::get<1>(closest_2d_dis) < scaling_2d * search_range && std::get<2>(closest_2d_dis) < scaling_2d * search_range)) ) + flag_change = false; + else if (std::get<1>(min_2d_dis) == std::get<1>(closest_2d_dis) && (closest_dis_point.first < search_range || (std::get<0>(closest_2d_dis) < scaling_2d * search_range && std::get<2>(closest_2d_dis) < scaling_2d * search_range) )) + flag_change = false; + else if (std::get<2>(min_2d_dis) == std::get<2>(closest_2d_dis) && (closest_dis_point.first < search_range || (std::get<1>(closest_2d_dis) < scaling_2d * search_range && std::get<0>(closest_2d_dis) < scaling_2d * search_range) )) + flag_change = false; + + // deal with dead channels ... + if (!flag_change){ + auto grouping = clus->grouping(); + int ch_range = 0; // Default channel range for dead channel checking + + // Check U plane (pind=0) for dead channels + if (grouping->get_closest_dead_chs(gp, ch_range, apa, face, 0) && std::get<0>(closest_2d_dis) > scaling_2d * search_range){ + if (std::get<1>(closest_2d_dis) < scaling_2d * search_range || std::get<2>(closest_2d_dis) < scaling_2d * search_range) + flag_change = true; + // Check V plane (pind=1) for dead channels + }else if (grouping->get_closest_dead_chs(gp, ch_range, apa, face, 1) && std::get<1>(closest_2d_dis) > scaling_2d * search_range){ + if (std::get<0>(closest_2d_dis) < scaling_2d * search_range || std::get<2>(closest_2d_dis) < scaling_2d * search_range) + flag_change = true; + // Check W plane (pind=2) for dead channels + }else if (grouping->get_closest_dead_chs(gp, ch_range, apa, face, 2) && std::get<2>(closest_2d_dis) > scaling_2d * search_range){ + if (std::get<1>(closest_2d_dis) < scaling_2d * search_range || std::get<0>(closest_2d_dis) < scaling_2d * search_range) + flag_change = true; + } + } + + // change the point's clustering ... + if (!flag_change){ + map_segment_points[main_sg].push_back(gp); + } + } + } + + + // convert points to geo_point_t format + // add points to segments ... + for (const auto& [seg, geo_points] : map_segment_points) { + create_segment_point_cloud(seg, geo_points, dv, cloud_name); + } + } + } + + bool segment_determine_shower_direction(SegmentPtr segment, const Clus::ParticleDataSet::pointer& particle_data, const IRecombinationModel::pointer& recomb_model, const std::string& cloud_name, double MIP_dQdx, double rms_cut){ + segment->dirsign(0); + const auto& fits = segment->fits(); + + if (fits.empty()) return false; + + // Get the fit point cloud for KD-tree queries + auto dpcloud_fit = segment->dpcloud("fit"); + if (!dpcloud_fit) return false; + + // Get the associated point cloud + auto dpcloud_assoc = segment->dpcloud(cloud_name); + if (!dpcloud_assoc) return false; + + const auto& assoc_points = dpcloud_assoc->get_points(); + if (assoc_points.empty()) return false; + + // Initialize vectors to store analysis results for each fit point + std::vector> local_points_vec(fits.size()); + std::vector> vec_rms_vals(fits.size(), std::make_tuple(0,0,0)); + std::vector vec_dQ_dx(fits.size(), 0); + std::vector vec_dir(fits.size()); + + // Build KD-tree index for fit points and associate points with nearest fit point + auto& kd_tree_fit = dpcloud_fit->kd3d(); + + for (const auto& pt : assoc_points) { + WireCell::Point test_p(pt.x, pt.y, pt.z); + auto results = kd_tree_fit.knn(1, test_p); + if (!results.empty()) { + size_t closest_fit_idx = results.front().first; + local_points_vec.at(closest_fit_idx).push_back(test_p); + } + } + + WireCell::Vector drift_dir_abs(1, 0, 0); // Drift direction + + // Calculate local directions and RMS spreads for each fit point + for (size_t i = 0; i < local_points_vec.size(); i++) { + // Calculate local direction from neighboring fit points + WireCell::Vector v1(0, 0, 0); + for (size_t j = 1; j < 3; j++) { + if (i + j < fits.size()) { + v1 += WireCell::Vector( + fits[i+j].point.x() - fits[i].point.x(), + fits[i+j].point.y() - fits[i].point.y(), + fits[i+j].point.z() - fits[i].point.z() + ); + } + if (i >= j) { + v1 += WireCell::Vector( + fits[i].point.x() - fits[i-j].point.x(), + fits[i].point.y() - fits[i-j].point.y(), + fits[i].point.z() - fits[i-j].point.z() + ); + } + } + + WireCell::Vector dir_1 = v1.magnitude() > 0 ? v1.norm() : WireCell::Vector(1, 0, 0); + vec_dir.at(i) = dir_1; + + // Set up orthogonal coordinate system + WireCell::Vector dir_2, dir_3; + double angle_deg = std::acos(dir_1.dot(drift_dir_abs)) * 180.0 / M_PI; + + if (angle_deg < 7.5) { + dir_1 = WireCell::Vector(1, 0, 0); + dir_2 = WireCell::Vector(0, 1, 0); + dir_3 = WireCell::Vector(0, 0, 1); + } else { + dir_2 = drift_dir_abs.cross(dir_1).norm(); + dir_3 = dir_1.cross(dir_2); + } + + // Project associated points onto the local coordinate system + std::vector> vec_projs; + for (const auto& pt : local_points_vec.at(i)) { + double proj_1 = dir_1.dot(pt); + double proj_2 = dir_2.dot(pt); + double proj_3 = dir_3.dot(pt); + vec_projs.push_back(std::make_tuple(proj_1, proj_2, proj_3)); + } + + // Calculate RMS spread in each direction + int ncount = local_points_vec.at(i).size(); + if (ncount > 1) { + WireCell::Point fit_pt(fits[i].point.x(), fits[i].point.y(), fits[i].point.z()); + std::tuple means = std::make_tuple( + dir_1.dot(fit_pt), + dir_2.dot(fit_pt), + dir_3.dot(fit_pt) + ); + + for (const auto& proj : vec_projs) { + std::get<0>(vec_rms_vals.at(i)) += std::pow(std::get<0>(proj) - std::get<0>(means), 2); + std::get<1>(vec_rms_vals.at(i)) += std::pow(std::get<1>(proj) - std::get<1>(means), 2); + std::get<2>(vec_rms_vals.at(i)) += std::pow(std::get<2>(proj) - std::get<2>(means), 2); + } + + std::get<0>(vec_rms_vals.at(i)) = std::sqrt(std::get<0>(vec_rms_vals.at(i)) / ncount); + std::get<1>(vec_rms_vals.at(i)) = std::sqrt(std::get<1>(vec_rms_vals.at(i)) / ncount); + std::get<2>(vec_rms_vals.at(i)) = std::sqrt(std::get<2>(vec_rms_vals.at(i)) / ncount); + } + + // Calculate dQ/dx + vec_dQ_dx.at(i) = fits[i].dQ / (fits[i].dx + 1e-9) / MIP_dQdx; + } + + // Analyze spread characteristics + double max_spread = 0; + double large_spread_length = 0; + double total_effective_length = 0; + double total_length = 0; + + // bool flag_prev = false; + for (size_t i = 0; i + 1 < local_points_vec.size(); i++) { + double length = std::sqrt( + std::pow(fits[i+1].point.x() - fits[i].point.x(), 2) + + std::pow(fits[i+1].point.y() - fits[i].point.y(), 2) + + std::pow(fits[i+1].point.z() - fits[i].point.z(), 2) + ); + total_length += length; + + if (std::get<2>(vec_rms_vals.at(i)) != 0) { + total_effective_length += length; + if (std::get<2>(vec_rms_vals.at(i)) > rms_cut) { + large_spread_length += length; + // flag_prev = true; + } + if (std::get<2>(vec_rms_vals.at(i)) > max_spread) { + max_spread = std::get<2>(vec_rms_vals.at(i)); + } + } + } + + // Determine direction based on spread analysis + int flag_dir = 0; + + // Check if this looks like a shower based on spread + bool is_shower_like = ( + (max_spread > 0.7*units::cm && large_spread_length > 0.2 * total_effective_length && + total_effective_length > 3*units::cm && total_effective_length < 15*units::cm && + (large_spread_length > 2.7*units::cm || large_spread_length > 0.35 * total_effective_length)) || + (max_spread > 0.8*units::cm && large_spread_length > 0.3 * total_effective_length && + total_effective_length >= 15*units::cm) || + (max_spread > 1.0*units::cm && large_spread_length > 0.4 * total_effective_length) + ); + + if (is_shower_like) { + WireCell::Vector main_dir1, main_dir2; + bool flag_skip_angle1 = false; + bool flag_skip_angle2 = false; + + // Create copies of points since segment_cal_dir_3vector expects non-const reference + WireCell::Point front_pt = fits.front().point; + WireCell::Point back_pt = fits.back().point; + + if (fits.front().point.z() < fits.back().point.z()) { + main_dir1 = segment_cal_dir_3vector(segment, front_pt, 15*units::cm); + main_dir2 = segment_cal_dir_3vector(segment, back_pt, 6*units::cm); + double angle1 = std::acos(main_dir1.dot(drift_dir_abs)) * 180.0 / M_PI; + if (std::fabs(angle1 - 90) < 10) flag_skip_angle1 = true; + } else { + main_dir1 = segment_cal_dir_3vector(segment, front_pt, 6*units::cm); + main_dir2 = segment_cal_dir_3vector(segment, back_pt, 15*units::cm); + double angle2 = std::acos(main_dir2.dot(drift_dir_abs)) * 180.0 / M_PI; + if (std::fabs(angle2 - 90) < 10) flag_skip_angle2 = true; + } + + // Group consecutive segments with large spread in forward direction + // Each tuple: (start_index, end_index, max_rms_in_range) + std::vector> threshold_segs; + + for (size_t i = 0; i < vec_dQ_dx.size(); i++) { + double angle = std::acos(main_dir1.dot(vec_dir.at(i))) * 180.0 / M_PI; + if ((angle < 30 || (flag_skip_angle1 && angle < 60)) && + (std::get<2>(vec_rms_vals.at(i))/units::cm > 0.4 || vec_dQ_dx.at(i) > 1.6)) { + + if (threshold_segs.empty()) { + // Start new segment group + threshold_segs.push_back(std::make_tuple(i, i, std::get<2>(vec_rms_vals.at(i)))); + } else { + // Check if continuous with previous group + if (i == std::get<1>(threshold_segs.back()) + 1) { + // Extend existing group + std::get<1>(threshold_segs.back()) = i; + if (std::get<2>(threshold_segs.back()) < std::get<2>(vec_rms_vals.at(i))) { + std::get<2>(threshold_segs.back()) = std::get<2>(vec_rms_vals.at(i)); + } + } else { + // Start new group (gap detected) + threshold_segs.push_back(std::make_tuple(i, i, std::get<2>(vec_rms_vals.at(i)))); + } + } + } + } + + // Calculate total and max continuous length for forward direction + double total_length1 = 0, max_length1 = 0; + for (const auto& seg : threshold_segs) { + int start_n = std::get<0>(seg); + if (start_n > 0) start_n--; // Include one segment before + int end_n = std::get<1>(seg); + + double tmp_length = 0; + for (int i = start_n; i < end_n && i + 1 < (int)fits.size(); i++) { + tmp_length += std::sqrt( + std::pow(fits[i+1].point.x() - fits[i].point.x(), 2) + + std::pow(fits[i+1].point.y() - fits[i].point.y(), 2) + + std::pow(fits[i+1].point.z() - fits[i].point.z(), 2) + ); + } + total_length1 += tmp_length; + if (tmp_length > max_length1) max_length1 = tmp_length; + } + + // Group consecutive segments with large spread in backward direction + threshold_segs.clear(); + + for (int i = vec_dQ_dx.size() - 1; i >= 0; i--) { + double angle = 180 - std::acos(main_dir2.dot(vec_dir.at(i))) * 180.0 / M_PI; + if ((angle < 30 || (flag_skip_angle2 && angle < 60)) && + (std::get<2>(vec_rms_vals.at(i))/units::cm > 0.4 || vec_dQ_dx.at(i) > 1.6)) { + + if (threshold_segs.empty()) { + // Start new segment group + threshold_segs.push_back(std::make_tuple(i, i, std::get<2>(vec_rms_vals.at(i)))); + } else { + // Check if continuous with previous group (decrementing) + if (i == std::get<1>(threshold_segs.back()) - 1) { + // Extend existing group + std::get<1>(threshold_segs.back()) = i; + if (std::get<2>(threshold_segs.back()) < std::get<2>(vec_rms_vals.at(i))) { + std::get<2>(threshold_segs.back()) = std::get<2>(vec_rms_vals.at(i)); + } + } else { + // Start new group (gap detected) + threshold_segs.push_back(std::make_tuple(i, i, std::get<2>(vec_rms_vals.at(i)))); + } + } + } + } + + // Calculate total and max continuous length for backward direction + double total_length2 = 0, max_length2 = 0; + for (const auto& seg : threshold_segs) { + int start_n = std::get<0>(seg); + if (start_n < (int)fits.size() - 1) start_n++; // Include one segment after + int end_n = std::get<1>(seg); + + double tmp_length = 0; + for (int i = start_n; i > end_n && i > 0; i--) { + tmp_length += std::sqrt( + std::pow(fits[i-1].point.x() - fits[i].point.x(), 2) + + std::pow(fits[i-1].point.y() - fits[i].point.y(), 2) + + std::pow(fits[i-1].point.z() - fits[i].point.z(), 2) + ); + } + total_length2 += tmp_length; + if (tmp_length > max_length2) max_length2 = tmp_length; + } + + // Compare using both total and max continuous lengths + if (total_length1 + max_length1 > 1.1 * (total_length2 + max_length2)) { + flag_dir = 1; + } else if (1.1 * (total_length1 + max_length1) < total_length2 + max_length2) { + flag_dir = -1; + } + } else { + // Not shower-like, use simpler direction determination + if (total_length < 5*units::cm) { + if (!segment_is_shower_trajectory(segment)) segment_determine_dir_track(segment, 0, fits.size(), particle_data, recomb_model); + // For short segments, could call determine_dir_track here if needed + } else { + // Count consistent directions at each end + WireCell::Point front_pt = fits.front().point; + WireCell::Point back_pt = fits.back().point; + WireCell::Vector main_dir_front = segment_cal_dir_3vector(segment, front_pt, 6*units::cm); + int ncount_front = 0; + for (size_t i = 0; i < vec_dQ_dx.size(); i++) { + double angle = std::acos(main_dir_front.dot(vec_dir.at(i))) * 180.0 / M_PI; + if (angle < 30) ncount_front++; + } + + WireCell::Vector main_dir_back = segment_cal_dir_3vector(segment, back_pt, 6*units::cm); + int ncount_back = 0; + for (int i = vec_dQ_dx.size() - 1; i >= 0; i--) { + double angle = 180 - std::acos(main_dir_back.dot(vec_dir.at(i))) * 180.0 / M_PI; + if (angle < 30) ncount_back++; + } + + if (1.2 * ncount_front < ncount_back) { + flag_dir = -1; + } else if (ncount_front > 1.2 * ncount_back) { + flag_dir = 1; + } + } + } + + segment->dirsign(flag_dir); + return (flag_dir != 0); + } + + bool segment_is_shower_topology(SegmentPtr segment, bool tmp_val, double MIP_dQ_dx){ + int flag_dir = 0; + bool flag_shower_topology = tmp_val; + const auto& fits = segment->fits(); + + if (fits.empty()) return false; + + // Get the fit point cloud for KD-tree queries + auto dpcloud_fit = segment->dpcloud("fit"); + if (!dpcloud_fit) return false; + + // Get the associated point cloud + auto dpcloud_assoc = segment->dpcloud("associated"); + if (!dpcloud_assoc) return false; + + const auto& assoc_points = dpcloud_assoc->get_points(); + if (assoc_points.empty()) return false; + + // Initialize vectors to store analysis results for each fit point + std::vector> local_points_vec(fits.size()); + std::vector> vec_rms_vals(fits.size(), std::make_tuple(0,0,0)); + std::vector vec_dQ_dx(fits.size(), 0); + + // Build KD-tree index for fit points and associate points with nearest fit point + auto& kd_tree_fit = dpcloud_fit->kd3d(); + + for (const auto& pt : assoc_points) { + WireCell::Point test_p(pt.x, pt.y, pt.z); + auto results = kd_tree_fit.knn(1, test_p); + if (!results.empty()) { + size_t closest_fit_idx = results.front().first; + local_points_vec.at(closest_fit_idx).push_back(test_p); + } + } + + WireCell::Vector drift_dir_abs(1, 0, 0); // Drift direction + + // Calculate local directions and RMS spreads for each fit point + for (size_t i = 0; i < local_points_vec.size(); i++) { + // Calculate local direction from neighboring fit points + WireCell::Vector v1(0, 0, 0); + for (size_t j = 1; j < 3; j++) { + if (i + j < fits.size()) { + v1 += WireCell::Vector( + fits[i+j].point.x() - fits[i].point.x(), + fits[i+j].point.y() - fits[i].point.y(), + fits[i+j].point.z() - fits[i].point.z() + ); + } + if (i >= j) { + v1 += WireCell::Vector( + fits[i].point.x() - fits[i-j].point.x(), + fits[i].point.y() - fits[i-j].point.y(), + fits[i].point.z() - fits[i-j].point.z() + ); + } + } + + WireCell::Vector dir_1 = v1.magnitude() > 0 ? v1.norm() : WireCell::Vector(1, 0, 0); + + // Set up orthogonal coordinate system + WireCell::Vector dir_2, dir_3; + double angle_deg = std::acos(dir_1.dot(drift_dir_abs)) * 180.0 / M_PI; + + if (angle_deg < 7.5) { + dir_1 = WireCell::Vector(1, 0, 0); + dir_2 = WireCell::Vector(0, 1, 0); + dir_3 = WireCell::Vector(0, 0, 1); + } else { + dir_2 = drift_dir_abs.cross(dir_1).norm(); + dir_3 = dir_1.cross(dir_2); + } + + // Project associated points onto the local coordinate system + std::vector> vec_projs; + for (const auto& pt : local_points_vec.at(i)) { + double proj_1 = dir_1.dot(pt); + double proj_2 = dir_2.dot(pt); + double proj_3 = dir_3.dot(pt); + vec_projs.push_back(std::make_tuple(proj_1, proj_2, proj_3)); + } + + // Calculate RMS spread in each direction + int ncount = local_points_vec.at(i).size(); + if (ncount > 1) { + WireCell::Point fit_pt(fits[i].point.x(), fits[i].point.y(), fits[i].point.z()); + std::tuple means = std::make_tuple( + dir_1.dot(fit_pt), + dir_2.dot(fit_pt), + dir_3.dot(fit_pt) + ); + + for (const auto& proj : vec_projs) { + std::get<0>(vec_rms_vals.at(i)) += std::pow(std::get<0>(proj) - std::get<0>(means), 2); + std::get<1>(vec_rms_vals.at(i)) += std::pow(std::get<1>(proj) - std::get<1>(means), 2); + std::get<2>(vec_rms_vals.at(i)) += std::pow(std::get<2>(proj) - std::get<2>(means), 2); + } + + std::get<0>(vec_rms_vals.at(i)) = std::sqrt(std::get<0>(vec_rms_vals.at(i)) / ncount); + std::get<1>(vec_rms_vals.at(i)) = std::sqrt(std::get<1>(vec_rms_vals.at(i)) / ncount); + std::get<2>(vec_rms_vals.at(i)) = std::sqrt(std::get<2>(vec_rms_vals.at(i)) / ncount); + } + + // Calculate dQ/dx + vec_dQ_dx.at(i) = fits[i].dQ / (fits[i].dx + 1e-9) / MIP_dQ_dx; + } + + // Analyze spread characteristics + double max_spread = 0; + double large_spread_length = 0; + double total_effective_length = 0; + + double max_cont_length = 0; + double max_cont_weighted_length = 0; + double cont_length = 0; + double cont_weighted_length = 0; + bool flag_prev = false; + + for (size_t i = 0; i + 1 < local_points_vec.size(); i++) { + double length = std::sqrt( + std::pow(fits[i+1].point.x() - fits[i].point.x(), 2) + + std::pow(fits[i+1].point.y() - fits[i].point.y(), 2) + + std::pow(fits[i+1].point.z() - fits[i].point.z(), 2) + ); + + if (std::get<2>(vec_rms_vals.at(i)) != 0) { + total_effective_length += length; + if (std::get<2>(vec_rms_vals.at(i)) > 0.4 * units::cm) { + large_spread_length += length; + cont_length += length; + cont_weighted_length += length * std::get<2>(vec_rms_vals.at(i)); + flag_prev = true; + } else { + if (flag_prev && cont_length > max_cont_length) { + max_cont_length = cont_length; + max_cont_weighted_length = cont_weighted_length; + } + cont_length = 0; + cont_weighted_length = 0; + flag_prev = false; + } + if (std::get<2>(vec_rms_vals.at(i)) > max_spread) { + max_spread = std::get<2>(vec_rms_vals.at(i)); + } + } + } + (void)max_cont_weighted_length; // Currently unused + + // Determine if this is shower topology based on spread patterns + if ((max_spread > 0.7*units::cm && large_spread_length > 0.2 * total_effective_length && + total_effective_length > 3*units::cm && total_effective_length < 15*units::cm && + (large_spread_length > 2.7*units::cm || large_spread_length > 0.35 * total_effective_length)) || + (max_spread > 0.8*units::cm && large_spread_length > 0.3 * total_effective_length && + total_effective_length >= 15*units::cm) || + (max_spread > 0.8*units::cm && large_spread_length > 8*units::cm && + large_spread_length > 0.18 * total_effective_length) || + (max_spread > 1.0*units::cm && large_spread_length > 0.4 * total_effective_length) || + (max_spread > 1.0*units::cm && large_spread_length > 5*units::cm && + large_spread_length > 0.23 * total_effective_length)) { + + flag_shower_topology = true; + } + + // If identified as shower topology, determine direction + if (flag_shower_topology) { + // Group consecutive segments with large spread in forward direction + std::vector> threshold_segs; + + for (size_t i = 0; i < vec_dQ_dx.size(); i++) { + if (std::get<2>(vec_rms_vals.at(i))/units::cm > 0.4) { + if (threshold_segs.empty()) { + threshold_segs.push_back(std::make_tuple(i, i, std::get<2>(vec_rms_vals.at(i)))); + } else { + if (i == std::get<1>(threshold_segs.back()) + 1) { + // Extend existing group + std::get<1>(threshold_segs.back()) = i; + if (std::get<2>(threshold_segs.back()) < std::get<2>(vec_rms_vals.at(i))) { + std::get<2>(threshold_segs.back()) = std::get<2>(vec_rms_vals.at(i)); + } + } else { + // Start new group + threshold_segs.push_back(std::make_tuple(i, i, std::get<2>(vec_rms_vals.at(i)))); + } + } + } + } + + // Calculate total and max continuous length for forward direction + double total_length1 = 0, max_length1 = 0; + for (const auto& seg : threshold_segs) { + int start_n = std::get<0>(seg); + if (start_n > 0) start_n--; + int end_n = std::get<1>(seg); + + double tmp_length = 0; + for (int i = start_n; i < end_n && i + 1 < (int)fits.size(); i++) { + tmp_length += std::sqrt( + std::pow(fits[i+1].point.x() - fits[i].point.x(), 2) + + std::pow(fits[i+1].point.y() - fits[i].point.y(), 2) + + std::pow(fits[i+1].point.z() - fits[i].point.z(), 2) + ); + } + total_length1 += tmp_length; + if (tmp_length > max_length1) max_length1 = tmp_length; + } + + // Group consecutive segments with large spread in backward direction + threshold_segs.clear(); + + for (int i = vec_dQ_dx.size() - 1; i >= 0; i--) { + if (std::get<2>(vec_rms_vals.at(i))/units::cm > 0.4) { + if (threshold_segs.empty()) { + threshold_segs.push_back(std::make_tuple(i, i, std::get<2>(vec_rms_vals.at(i)))); + } else { + if (i == std::get<1>(threshold_segs.back()) - 1) { + // Extend existing group + std::get<1>(threshold_segs.back()) = i; + if (std::get<2>(threshold_segs.back()) < std::get<2>(vec_rms_vals.at(i))) { + std::get<2>(threshold_segs.back()) = std::get<2>(vec_rms_vals.at(i)); + } + } else { + // Start new group + threshold_segs.push_back(std::make_tuple(i, i, std::get<2>(vec_rms_vals.at(i)))); + } + } + } + } + + // Calculate total and max continuous length for backward direction + double total_length2 = 0, max_length2 = 0; + for (const auto& seg : threshold_segs) { + int start_n = std::get<0>(seg); + if (start_n < (int)fits.size() - 1) start_n++; + int end_n = std::get<1>(seg); + + double tmp_length = 0; + for (int i = start_n; i > end_n && i > 0; i--) { + tmp_length += std::sqrt( + std::pow(fits[i-1].point.x() - fits[i].point.x(), 2) + + std::pow(fits[i-1].point.y() - fits[i].point.y(), 2) + + std::pow(fits[i-1].point.z() - fits[i].point.z(), 2) + ); + } + total_length2 += tmp_length; + if (tmp_length > max_length2) max_length2 = tmp_length; + } + + // Determine direction based on spread comparison + if (total_length1 + max_length1 > 1.1 * (total_length2 + max_length2)) { + flag_dir = 1; + } else if (1.1 * (total_length1 + max_length1) < total_length2 + max_length2) { + flag_dir = -1; + } + + // Override shower topology for very long segments with little spread + double tmp_total_length = segment_track_length(segment, 0); + if (tmp_total_length > 50*units::cm && + total_length1 < 0.25 * tmp_total_length && + total_length2 < 0.25 * tmp_total_length) { + flag_dir = 0; + flag_shower_topology = false; + } + } + + if (flag_shower_topology) segment->set_flags(SegmentFlags::kShowerTopology); + segment->dirsign(flag_dir); + return flag_shower_topology; + } + +} diff --git a/clus/src/PRShower.cxx b/clus/src/PRShower.cxx new file mode 100644 index 000000000..9bf336dd6 --- /dev/null +++ b/clus/src/PRShower.cxx @@ -0,0 +1,55 @@ +#include "WireCellClus/PRShower.h" +#include "WireCellClus/PRGraph.h" +namespace WireCell::Clus::PR { + + Shower::Shower(Graph& graph) + : TrajectoryView(graph) + { + } + + Shower::~Shower() + { + } + + + VertexPtr Shower::start_vertex() + { + return m_start_vertex; + } + + SegmentPtr Shower::start_segment() + { + return m_start_segment; + } + + + // Chainable setters + + /// Chainable setter of start vertex + Shower& Shower::start_vertex(VertexPtr vtx) + { + if (! vtx->descriptor_valid()) { + m_start_vertex = nullptr; + return *this; + } + this->add_vertex(vtx); + m_start_vertex = vtx; + return *this; + } + + + /// Chainable setter of start segment + Shower& Shower::start_segment(SegmentPtr seg) + { + if (! seg->descriptor_valid()) { + m_start_segment = nullptr; + return *this; + } + this->add_segment(seg); + m_start_segment = seg; + return *this; + } + + + +} diff --git a/clus/src/PRTrajectoryView.cxx b/clus/src/PRTrajectoryView.cxx new file mode 100644 index 000000000..294c95670 --- /dev/null +++ b/clus/src/PRTrajectoryView.cxx @@ -0,0 +1,71 @@ +#include "WireCellClus/PRTrajectoryView.h" +#include "WireCellClus/PRGraph.h" + +namespace WireCell::Clus::PR { + + bool TrajectoryViewNodePredicate::operator()(const node_descriptor& desc) const + { + return view.has_node(desc); + } + + + bool TrajectoryViewEdgePredicate::operator()(const edge_descriptor& desc) const + { + return view.has_edge(desc); + } + + + + TrajectoryView::TrajectoryView(Graph& graph) + : m_graph(graph, TrajectoryViewNodePredicate(*this), TrajectoryViewEdgePredicate(*this)) + , m_nodes(0) + , m_edges(0, EdgeDescriptorHash(graph), EdgeDescriptorEqual(graph)) + {} + + TrajectoryView::~TrajectoryView() {} + + const TrajectoryView::view_graph_type& TrajectoryView::view_graph() const + { + return m_graph; + } + + bool TrajectoryView::has_node(node_descriptor desc) const + { + return m_nodes.count(desc) > 0; + } + + bool TrajectoryView::has_edge(edge_descriptor desc) const + { + return m_edges.count(desc) > 0; + } + + bool TrajectoryView::add_vertex(VertexPtr vtx) + { + if (! vtx->descriptor_valid()) { + return false; + } + m_nodes.insert(vtx->get_descriptor()); + return true; + } + + + bool TrajectoryView::add_segment(SegmentPtr seg) + { + if (! seg->descriptor_valid()) { + return false; + } + m_edges.insert(seg->get_descriptor()); + return true; + } + + + bool TrajectoryView::remove_vertex(VertexPtr vtx) + { + return m_nodes.erase(vtx->get_descriptor()) == 1; + } + + bool TrajectoryView::remove_segment(SegmentPtr seg) + { + return m_edges.erase(seg->get_descriptor()) == 1; + } +} diff --git a/clus/src/ParticleDataSet.cxx b/clus/src/ParticleDataSet.cxx new file mode 100644 index 000000000..eb0ed49af --- /dev/null +++ b/clus/src/ParticleDataSet.cxx @@ -0,0 +1,108 @@ +#include "WireCellClus/ParticleDataSet.h" +#include "WireCellUtil/NamedFactory.h" +#include "WireCellUtil/Units.h" + +WIRECELL_FACTORY(ParticleDataSet, WireCell::Clus::ParticleDataSet, WireCell::IConfigurable) + +using namespace WireCell; + +Clus::ParticleDataSet::ParticleDataSet() {} +Clus::ParticleDataSet::~ParticleDataSet() {} + +void Clus::ParticleDataSet::configure(const WireCell::Configuration& config) { + // Configure dE/dx functions + if (config.isMember("dedx_functions")) { + const auto& dedx_config = config["dedx_functions"]; + for (const auto& particle : dedx_config.getMemberNames()) { + const auto& func_name = dedx_config[particle].asString(); + auto func = Factory::find_tn(func_name); + if (func) { + m_dedx_functions[particle] = func; + } + } + } + + // Configure range functions + if (config.isMember("range_functions")) { + const auto& range_config = config["range_functions"]; + for (const auto& particle : range_config.getMemberNames()) { + const auto& func_name = range_config[particle].asString(); + auto func = Factory::find_tn(func_name); + if (func) { + m_range_functions[particle] = func; + } + } + } +} + +WireCell::Configuration Clus::ParticleDataSet::default_configuration() const { + Configuration cfg; + cfg["dedx_functions"] = Json::Value(Json::objectValue); + cfg["range_functions"] = Json::Value(Json::objectValue); + return cfg; +} + +IScalarFunction::pointer Clus::ParticleDataSet::get_dEdx_function(const std::string& particle) const { + auto it = m_dedx_functions.find(particle); + return (it != m_dedx_functions.end()) ? it->second : nullptr; +} + +IScalarFunction::pointer Clus::ParticleDataSet::get_range_function(const std::string& particle) const { + auto it = m_range_functions.find(particle); + return (it != m_range_functions.end()) ? it->second : nullptr; +} + +std::vector Clus::ParticleDataSet::get_particles() const { + std::vector particles; + for (const auto& pair : m_dedx_functions) { + particles.push_back(pair.first); + } + return particles; +} + +double Clus::ParticleDataSet::get_particle_mass(int pdg_code) const { + // Particle Data Group (PDG) codes and their masses in MeV/c^2 + static const std::map pdg_mass_map = { + {11, 0.5109989461}, // electron + {-11, 0.5109989461}, // positron + {13, 105.6583745}, // muon + {-13, 105.6583745}, // anti-muon + {211, 139.57039}, // charged pion + {-211, 139.57039}, // charged pion + {321, 493.677}, // charged kaon + {-321, 493.677}, // charged kaon + {2212, 938.2720813}, // proton + {-2212, 938.2720813} // anti-proton + }; + + auto it = pdg_mass_map.find(pdg_code); + if (it != pdg_mass_map.end()) { + return it->second * units::MeV; // Convert to internal units (MeV/c^2) + } else { + return 0.0; // Unknown particle type + } + +} + +std::string Clus::ParticleDataSet::pdg_to_name(int pdg_code) const { + // Particle Data Group (PDG) codes and their names + static const std::map pdg_name_map = { + {11, "electron"}, + {-11, "positron"}, + {13, "muon"}, + {-13, "anti-muon"}, + {211, "pi_plus"}, + {-211, "pi_minus"}, + {321, "K_plus"}, + {-321, "K_minus"}, + {2212, "proton"}, + {-2212, "anti-proton"} + }; + + auto it = pdg_name_map.find(pdg_code); + if (it != pdg_name_map.end()) { + return it->second; + } else { + return "unknown"; + } +} diff --git a/clus/src/PointTreeBuilding.cxx b/clus/src/PointTreeBuilding.cxx index e655224f3..2c779f41a 100644 --- a/clus/src/PointTreeBuilding.cxx +++ b/clus/src/PointTreeBuilding.cxx @@ -69,6 +69,8 @@ void PointTreeBuilding::configure(const WireCell::Configuration& cfg) raise("failed to get anode plane"); } + m_dv = Factory::find_tn(cfg["detector_volumes"].asString()); + m_face = get(cfg, "face", 0); log->debug("using face: {}", m_face); if (m_anode->face(m_face) == nullptr) { @@ -76,7 +78,7 @@ void PointTreeBuilding::configure(const WireCell::Configuration& cfg) } // Fixme: this is an utterly broken thing and should be replaced. - m_geomhelper = Factory::find_tn(cfg["geom_helper"].asString()); + // m_geomhelper = Factory::find_tn(cfg["geom_helper"].asString()); auto samplers = cfg["samplers"]; if (samplers.isNull()) { @@ -98,6 +100,25 @@ void PointTreeBuilding::configure(const WireCell::Configuration& cfg) } +double PointTreeBuilding::get_time_offset(const WirePlaneId& wpid) const{ + if (cache_map_time_offset.find(wpid) == cache_map_time_offset.end()) { + cache_map_time_offset[wpid] = m_dv->metadata(wpid)["time_offset"].asDouble(); + } + return cache_map_time_offset[wpid]; +} +double PointTreeBuilding::get_drift_speed(const WirePlaneId& wpid) const{ + if (cache_map_drift_speed.find(wpid) == cache_map_drift_speed.end()) { + cache_map_drift_speed[wpid] = m_dv->metadata(wpid)["drift_speed"].asDouble(); + } + return cache_map_drift_speed[wpid]; +} +double PointTreeBuilding::get_tick(const WirePlaneId& wpid) const{ + if (cache_map_tick.find(wpid) == cache_map_tick.end()) { + cache_map_tick[wpid] = m_dv->metadata(wpid)["tick"].asDouble(); + } + return cache_map_tick[wpid]; +} + WireCell::Configuration PointTreeBuilding::default_configuration() const { @@ -157,34 +178,17 @@ namespace { // - make_corner_dataset } -// static Dataset make2dds (const Dataset& ds3d, const double angle) { -// Dataset ds; -// const auto& x = ds3d.get("x")->elements(); -// const auto& y = ds3d.get("y")->elements(); -// const auto& z = ds3d.get("z")->elements(); -// std::vector x2d(x.size()); -// std::vector y2d(y.size()); -// for (size_t ind=0; ind& angles) const +{ + const auto& gr = icluster->graph(); log->debug("load cluster {} at call={}: {}", icluster->ident(), m_count, dumps(gr)); auto clusters = get_geom_clusters(gr); log->debug("got {} clusters", clusters.size()); size_t nblobs = 0; + size_t nskipped = 0; Points::node_ptr root = std::make_unique(); auto& sampler = m_samplers.at("3d"); for (auto& [cluster_id, vdescs] : clusters) { @@ -194,41 +198,28 @@ Points::node_ptr PointTreeBuilding::sample_live(const WireCell::ICluster::pointe if (code != 'b') { continue; } + const IBlob::pointer iblob = std::get(gr[vdesc].ptr); - named_pointclouds_t pcs; - /// TODO: use nblobs or iblob->ident()? A: Index. The sampler takes blob->ident() as well. - auto [pc3d, aux] = sampler->sample_blob(iblob, nblobs); - if (pc3d.get("x")->size_major() == 0) { - log->debug("blob {} has no points", iblob->ident()); + + auto pcs = Aux::sample_live(sampler, iblob, angles, tick, nblobs); + /// DO NOT EXTEND FURTHER! see #426, #430 + + if (pcs.empty()) { continue; } - pcs.emplace("3d", pc3d); - pcs.emplace("2dp0", make2dds(pc3d, angle_u)); - pcs.emplace("2dp1", make2dds(pc3d, angle_v)); - pcs.emplace("2dp2", make2dds(pc3d, angle_w)); - const Point center = calc_blob_center(pcs["3d"]); - auto scalar_ds = make_scalar_dataset(iblob, center, pcs["3d"].get("x")->size_major(), tick); - int_t max_wire_interval = aux.get("max_wire_interval")->elements()[0]; - int_t min_wire_interval = aux.get("min_wire_interval")->elements()[0]; - int_t max_wire_type = aux.get("max_wire_type")->elements()[0]; - int_t min_wire_type = aux.get("min_wire_type")->elements()[0]; - scalar_ds.add("max_wire_interval", Array({(int_t)max_wire_interval})); - scalar_ds.add("min_wire_interval", Array({(int_t)min_wire_interval})); - scalar_ds.add("max_wire_type", Array({(int_t)max_wire_type})); - scalar_ds.add("min_wire_type", Array({(int_t)min_wire_type})); - pcs.emplace("scalar", std::move(scalar_ds)); cnode->insert(Points(std::move(pcs))); - ++nblobs; } } - log->debug("sampled {} live blobs to tree with {} children", nblobs, root->nchildren()); + if (nskipped) { + log->debug("skipped {} live blobs. You may want to follow up with a ClusteringPointed in an MABC. See Issue #425", nskipped); + } + log->debug("sampled {} live blobs in {} clusters", nblobs, root->nchildren()); return root; } Points::node_ptr PointTreeBuilding::sample_dead(const WireCell::ICluster::pointer icluster, const double tick) const { - using int_t = Facade::int_t; const auto& gr = icluster->graph(); log->debug("load cluster {} at call={}: {}", icluster->ident(), m_count, dumps(gr)); @@ -236,10 +227,6 @@ Points::node_ptr PointTreeBuilding::sample_dead(const WireCell::ICluster::pointe log->debug("got {} clusters", clusters.size()); size_t nblobs = 0; Points::node_ptr root = std::make_unique(); - // if (m_samplers.find("dead") == m_samplers.end()) { - // raise("m_samplers must have \"dead\" sampler"); - // } - // auto& sampler = m_samplers.at("dead"); for (auto& [cluster_id, vdescs] : clusters) { auto cnode = root->insert(std::make_unique()); for (const auto& vdesc : vdescs) { @@ -247,26 +234,24 @@ Points::node_ptr PointTreeBuilding::sample_dead(const WireCell::ICluster::pointe if (code != 'b') { continue; } + auto iblob = std::get(gr[vdesc].ptr); - named_pointclouds_t pcs; - auto scalar_ds = make_scalar_dataset(iblob, {0,0,0}, 0, tick); - scalar_ds.add("max_wire_interval", Array({(int_t)-1})); - scalar_ds.add("min_wire_interval", Array({(int_t)-1})); - scalar_ds.add("max_wire_type", Array({(int_t)-1})); - scalar_ds.add("min_wire_type", Array({(int_t)-1})); - pcs.emplace("scalar", scalar_ds); - pcs.emplace("corner", make_corner_dataset(iblob)); - // for (const auto& [name, pc] : pcs) { - // log->debug("{} -> keys {} size_major {}", name, pc.keys().size(), pc.size_major()); - // } + + + auto pcs = Aux::sample_dead(iblob, tick); + // std::cout << "Xin: " << "bad sampling points in dead " << " " << pcs.size() << std::endl; + + if (pcs.empty()) { + continue; + } cnode->insert(Points(std::move(pcs))); + // DO NOT EXTEND THIS. see #430. + ++nblobs; } - /// DEBUGONLY - // if (nblobs > 1) { - // break; - // } } + + // std::cout << "Xin: " << "sampled " << nblobs << " dead blobs in " << root->nchildren() << " clusters" << std::endl; log->debug("sampled {} dead blobs to tree with {} children", nblobs, root->nchildren()); return root; @@ -281,20 +266,8 @@ void PointTreeBuilding::add_ctpc(Points::node_ptr& root, const WireCell::ICluste // log->debug("add_ctpc load cluster {} at call={}: {}", icluster->ident(), m_count, dumps(cg)); auto grouping = root->value.facade(); - const auto& tp = grouping->get_params(); const auto& proj_centers = grouping->proj_centers(); const auto& pitch_mags = grouping->pitch_mags(); - /// DEBUGONLY: remove these prints after debugging - // for(const auto& [face, mags] : pitch_mags) { - // for(const auto& [pind, mag] : mags) { - // log->debug("face {} pind {} pitch_mag {}", face, pind, mag); - // } - // } - // for (const auto& [face, centers] : proj_centers) { - // for(const auto& [pind, center] : centers) { - // log->debug("face {} pind {} center {}", face, pind, center); - // } - // } Facade::mapfp_t> ds_x, ds_y, ds_charge, ds_charge_err; Facade::mapfp_t> ds_cident, ds_wind, ds_slice_index; @@ -305,7 +278,6 @@ void PointTreeBuilding::add_ctpc(Points::node_ptr& root, const WireCell::ICluste if (cgnode.code() == 's') { auto& slice = std::get(cgnode.ptr); ++nslices; - const auto& slice_index = slice->start()/tp.tick; const auto& activity = slice->activity(); for (const auto& [ichan, charge] : activity) { if(charge.uncertainty() > m_dead_threshold) { @@ -313,24 +285,25 @@ void PointTreeBuilding::add_ctpc(Points::node_ptr& root, const WireCell::ICluste // std::cout << "Test: m_dead_threshold " << m_dead_threshold << " charge.uncertainty() " << charge.uncertainty() << " " << charge.value() << " " << ichan << " " << slice_index << std::endl; continue; } + // std::cout << "Test: live " << " m_dead_threshold " << m_dead_threshold + // << " charge.uncertainty() " << charge.uncertainty() + // << " " << charge.value() << " " << ichan->ident() + // << " " << slice->start() << std::endl; const auto& cident = ichan->ident(); const auto& wires = ichan->wires(); for (const auto& wire : wires) { const auto& wind = wire->index(); - const auto& plane = wire->planeid().index(); - // log->debug("slice {} chan {} charge {} wind {} plane {} face {}", slice_index, cident, charge, wind, plane, wire->planeid().face()); - // const auto& face = wire->planeid().face(); - const auto& face = m_face; - /// FIXME: is this the way to get face? - -// std::cout << "Test: " << slice->start() << " " << slice_index << " " << tp.time_offset << " " << tp.drift_speed << std::endl; - - const auto& x = Facade::time2drift(m_anode->face(face), tp.time_offset, tp.drift_speed, slice->start()); - const double y = pitch_mags.at(face).at(plane)* (wind +0.5) + proj_centers.at(face).at(plane); // the additon of 0.5 is to match with the convetion of WCP (X. Q.) - - // if (abs(wind-815) < 2 or abs(wind-1235) < 2 or abs(wind-1378) < 2) { - // log->debug("slice {} chan {} charge {} wind {} plane {} face {} x {} y {}", slice_index, cident, charge, - // wind, plane, face, x, y); + const auto& wpid_wire = wire->planeid(); + const auto& plane = wpid_wire.index(); + const auto& wpid_all = WirePlaneId(kAllLayers, wpid_wire.face(), wpid_wire.apa()); + const auto& face = wpid_wire.face(); + const auto& x = Facade::time2drift(m_anode->faces()[face], get_time_offset(wpid_all), get_drift_speed(wpid_all), slice->start()); + const double y = pitch_mags.at(m_anode->ident()).at(face).at(plane)* (wind +0.5) + proj_centers.at(m_anode->ident()).at(face).at(plane); // the additon of 0.5 is to match with the convetion of WCP (X. Q.) + // if (nslices < 2) { + // log->debug("dv: time_offset {} drift_speed {} tick {}", + // get_time_offset(wpid_all), + // get_drift_speed(wpid_all), + // get_tick(wpid_all)); // } ds_x[face][plane].push_back(x); ds_y[face][plane].push_back(y); @@ -338,6 +311,7 @@ void PointTreeBuilding::add_ctpc(Points::node_ptr& root, const WireCell::ICluste ds_charge_err[face][plane].push_back(charge.uncertainty()); ds_cident[face][plane].push_back(cident); ds_wind[face][plane].push_back(wind); + const auto& slice_index = slice->start()/get_tick(wpid_all); ds_slice_index[face][plane].push_back(slice_index); } } @@ -346,6 +320,8 @@ void PointTreeBuilding::add_ctpc(Points::node_ptr& root, const WireCell::ICluste } // log->debug("got {} slices", nslices); + int anode_ident = m_anode->ident(); + std::vector plane_names = {"U", "V", "W"}; for (const auto& [face, planes] : ds_x) { for (const auto& [plane, x] : planes) { // log->debug("ds_x {} ds_y {} ds_charge {} ds_charge_err {} ds_cident {} ds_wind {} ds_slice_index {}", @@ -359,7 +335,7 @@ void PointTreeBuilding::add_ctpc(Points::node_ptr& root, const WireCell::ICluste ds.add("cident", Array(ds_cident[face][plane])); ds.add("wind", Array(ds_wind[face][plane])); ds.add("slice_index", Array(ds_slice_index[face][plane])); - const std::string ds_name = String::format("ctpc_f%dp%d", face, plane); + const std::string ds_name = String::format("ctpc_a%df%dp%d",anode_ident, face, plane_names[plane]); // root->insert(Points(named_pointclouds_t{{ds_name, std::move(ds)}})); root->value.local_pcs().emplace(ds_name, ds); // log->debug("added point cloud {} with {} points", ds_name, x.size()); @@ -368,6 +344,7 @@ void PointTreeBuilding::add_ctpc(Points::node_ptr& root, const WireCell::ICluste // for (const auto& [name, pc] : root->value.local_pcs()) { // log->debug("contains point cloud {} with {} points", name, pc.get("x")->size_major()); // } + (void)nslices; // unused, but useful for debugging } void PointTreeBuilding::add_dead_winds(Points::node_ptr& root, const WireCell::ICluster::pointer icluster) const { @@ -376,7 +353,6 @@ void PointTreeBuilding::add_dead_winds(Points::node_ptr& root, const WireCell::I using int_t = Facade::int_t; const auto& cg = icluster->graph(); auto grouping = root->value.facade(); - const auto& tp = grouping->get_params(); std::set faces; std::set planes; for (const auto& vdesc : GraphTools::mir(boost::vertices(cg))) { @@ -386,19 +362,19 @@ void PointTreeBuilding::add_dead_winds(Points::node_ptr& root, const WireCell::I // const auto& slice_index = slice->start()/m_tick; const auto& activity = slice->activity(); for (const auto& [ichan, charge] : activity) { + // std::cout << "Test: m_dead_threshold " << m_dead_threshold << " charge.uncertainty() " << charge.uncertainty() << " " << charge.value() << " " << ichan->ident() << " " << slice->start() << std::endl; if(charge.uncertainty() < m_dead_threshold) continue; // log->debug("m_dead_threshold {} charge.uncertainty() {}", m_dead_threshold, charge.uncertainty()); // const auto& cident = ichan->ident(); const auto& wires = ichan->wires(); for (const auto& wire : wires) { const auto& wind = wire->index(); - const auto& plane = wire->planeid().index(); - // const auto& face = wire->planeid().face(); - // log->debug("dead chan {} charge {} wind {} plane {} face {}", ichan->ident(), charge, wind, plane, wire->planeid().face()); - const auto& face = m_face; - /// FIXME: is this the way to get face? - const auto& xbeg = Facade::time2drift(m_anode->face(face), tp.time_offset, tp.drift_speed, slice->start()); - const auto& xend = Facade::time2drift(m_anode->face(face), tp.time_offset, tp.drift_speed, slice->start() + slice->span()); + const auto& wpid_wire = wire->planeid(); + const auto& plane = wpid_wire.index(); + const auto& wpid_all = WirePlaneId(kAllLayers, wpid_wire.face(), wpid_wire.apa()); + const auto& face = wpid_wire.face(); + const auto& xbeg = Facade::time2drift(m_anode->faces()[face], get_time_offset(wpid_all), get_drift_speed(wpid_all), slice->start()); + const auto& xend = Facade::time2drift(m_anode->faces()[face], get_time_offset(wpid_all), get_drift_speed(wpid_all), slice->start() + slice->span()); // if (true) { // log->debug("dead chan {} slice_index_min {} slice_index_max {} charge {} xbeg {} xend {}", ichan->ident(), // slice_index, (slice->start() + slice->span()) / m_tick, charge, xbeg, xend); @@ -406,14 +382,7 @@ void PointTreeBuilding::add_dead_winds(Points::node_ptr& root, const WireCell::I faces.insert(face); planes.insert(plane); - auto & dead_winds = grouping->get_dead_winds(face, plane); - // fix a bug how do we know the smaller or bigger value of xbeg and xend? - // if (dead_winds.find(wind) == dead_winds.end()) { - // dead_winds[wind] = {xbeg, xend}; - // } else { - // const auto& [xbeg_now, xend_now] = dead_winds[wind]; - // dead_winds[wind] = {std::min(xbeg, xbeg_now), std::max(xend, xend_now)}; - // } + auto & dead_winds = grouping->get_dead_winds(m_anode->ident(), face, plane); if (dead_winds.find(wind) == dead_winds.end()) { dead_winds[wind] = {std::min(xbeg,xend)-0.1*units::cm, std::max(xbeg,xend) + 0.1*units::cm}; } else { @@ -436,27 +405,30 @@ void PointTreeBuilding::add_dead_winds(Points::node_ptr& root, const WireCell::I // log->debug("dead wind {} xbeg {} xend {}", wind, xbeg_xend.first, xbeg_xend.second); // } // } - log->debug("got dead winds {} {} {} ", grouping->get_dead_winds(0, 0).size(), grouping->get_dead_winds(0, 1).size(), - grouping->get_dead_winds(0, 2).size()); + log->debug("got dead winds {} {} {} ", grouping->get_dead_winds(m_anode->ident(), 0, 0).size(), grouping->get_dead_winds(m_anode->ident(), 0, 1).size(), + grouping->get_dead_winds(m_anode->ident(), 0, 2).size()); Facade::mapfp_t> xbegs, xends; Facade::mapfp_t> winds; for (const auto& face : faces) { for (const auto& plane : planes) { - for (const auto& [wind, xbeg_xend] : grouping->get_dead_winds(face, plane)) { + for (const auto& [wind, xbeg_xend] : grouping->get_dead_winds(m_anode->ident(), face, plane)) { xbegs[face][plane].push_back(xbeg_xend.first); xends[face][plane].push_back(xbeg_xend.second); winds[face][plane].push_back(wind); } } } + int anode_ident = m_anode->ident(); + std::vector plane_names = {"U", "V", "W"}; for (const auto& face : faces) { for (const auto& plane : planes) { Dataset ds; ds.add("xbeg", Array(xbegs[face][plane])); ds.add("xend", Array(xends[face][plane])); ds.add("wind", Array(winds[face][plane])); - const std::string ds_name = String::format("dead_winds_f%dp%d", face, plane); + const std::string ds_name = String::format("dead_winds_a%df%dp%d",anode_ident, face, plane_names[plane]); + // const std::string ds_name = String::format("dead_winds_f%dp%d", face, plane); // root->insert(Points(named_pointclouds_t{{ds_name, std::move(ds)}})); root->value.local_pcs().emplace(ds_name, ds); // log->debug("added point cloud {} with {} points", ds_name, xbeg.size()); @@ -503,14 +475,38 @@ bool PointTreeBuilding::operator()(const input_vector& invec, output_pointer& te datapath = String::format(datapath, ident); } - const auto& tp_json = m_geomhelper->get_params(m_anode->ident(), m_face); - Points::node_ptr root_live = sample_live(iclus_live, tp_json["tick"].asDouble(), tp_json["angle_u"].asDouble(), - tp_json["angle_v"].asDouble(), tp_json["angle_w"].asDouble()); + // const auto& tp_json = m_geomhelper->get_params(m_anode->ident(), m_face); + + // fixme: this replicates functionality in pimpos. + std::vector angles(3); + for (size_t ind=0; ind<3; ++ind) { + const auto layer = iplane2layer[ind]; // in WirePlaneId.h + WirePlaneId wpid(layer, m_face, m_anode->ident()); + Vector wire_dir = m_dv->wire_direction(wpid); + angles[ind] = std::atan2(wire_dir.z(), wire_dir.y()); + } + + WirePlaneId wpid_all(kAllLayers, m_face, m_anode->ident()); + double tick = get_tick(wpid_all); + + Points::node_ptr root_live = sample_live(iclus_live, tick, angles); auto grouping = root_live->value.facade(); - grouping->set_anode(m_anode); - grouping->set_params(tp_json); + grouping->set_anodes({m_anode}); + // grouping->set_params(tp_json); add_ctpc(root_live, iclus_live); add_dead_winds(root_live, iclus_live); + + /// DEBUGONLY + { + std::vector layers = {kUlayer, kVlayer, kWlayer}; + for (const auto& layer : layers) { + WirePlaneId wpid(layer, m_face, m_anode->ident()); + int face_dirx = m_dv->face_dirx(wpid); + Vector wire_direction = m_dv->wire_direction(wpid); + Vector pitch_vector = m_dv->pitch_vector(wpid); + log->debug("wpid.name {} face_dirx {} wire_direction {} pitch_vector {}", wpid.name(), face_dirx, wire_direction, pitch_vector); + } + } /// TODO: remove after debugging // { // for (const auto& [name, pc] : root_live->value.local_pcs()) { @@ -585,7 +581,7 @@ bool PointTreeBuilding::operator()(const input_vector& invec, output_pointer& te if(ident != iclus_dead->ident()) { raise("ident mismatch between live and dead clusters"); } - Points::node_ptr root_dead = sample_dead(iclus_dead, tp_json["tick"].asDouble()); + Points::node_ptr root_dead = sample_dead(iclus_dead, tick); /// DEBUGONLY: // { // Facade::Grouping& dead_grouping = *root_dead->value.facade(); diff --git a/clus/src/PointTreeMerging.cxx b/clus/src/PointTreeMerging.cxx new file mode 100644 index 000000000..1d6a9a3aa --- /dev/null +++ b/clus/src/PointTreeMerging.cxx @@ -0,0 +1,153 @@ +#include "WireCellClus/PointTreeMerging.h" +#include "WireCellUtil/PointTree.h" +#include "WireCellUtil/NamedFactory.h" +#include "WireCellUtil/ExecMon.h" + +#include "WireCellAux/TensorDMpointtree.h" +#include "WireCellAux/TensorDMcommon.h" + +WIRECELL_FACTORY(PointTreeMerging, WireCell::Clus::PointTreeMerging, + WireCell::INamed, + WireCell::ITensorSetFanin, + WireCell::IConfigurable) + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::PointCloud::Tree; +using namespace WireCell::Aux; +using namespace WireCell::Aux::TensorDM; + + +Clus::PointTreeMerging::PointTreeMerging() + : Aux::Logger("PointTreeMerging", "clus") +{ +} + +std::vector Clus::PointTreeMerging::input_types() +{ + log->debug("m_multiplicity {}", m_multiplicity); + const std::string tname = std::string(typeid(input_type).name()); + std::vector ret(m_multiplicity, tname); + log->debug("input_types: ret.size() {}", ret.size()); + return ret; +} + +void Clus::PointTreeMerging::configure(const WireCell::Configuration& cfg) +{ + m_inpath = get(cfg, "inpath", m_inpath); + m_outpath = get(cfg, "outpath", m_outpath); + m_multiplicity = get(cfg, "multiplicity", m_multiplicity); + log->debug("{}", cfg); + log->debug("m_multiplicity {}", m_multiplicity); +} + +WireCell::Configuration Clus::PointTreeMerging::default_configuration() const +{ + Configuration cfg; + return cfg; +} + +void Clus::PointTreeMerging::finalize() +{ +} + +static void merge_pct(Points::node_t* tgt, Points::node_t* src) +{ + if (!src) { + return; + } + + // merge local pcs for root node + auto tgt_pc = tgt->value.local_pcs(); + for (const auto& src_pc : src->value.local_pcs()) { + auto name = src_pc.first; + if (tgt_pc.find(name) == tgt_pc.end()) { + tgt_pc.emplace(name, src_pc.second); + } else { + auto& tgt_pcds = tgt_pc[name]; + tgt_pcds.append(src_pc.second); + } + } + + // merge children + bool notify_value = true; + tgt->take_children(*src, notify_value); +} + + +bool Clus::PointTreeMerging::operator()(const input_vector& invec, output_pointer& outts) +{ + outts = nullptr; + if (invec.empty()) { + raise("no input tensors"); + return true; + } + // check input size + if (invec.size() != m_multiplicity) { + raise("unexpected multiplicity got %d want %d", invec.size(), m_multiplicity); + return true; + } + // boilerplate for EOS handling + size_t neos = 0; + for (const auto& in : invec) { + if (!in) { ++neos; } + } + if (neos == invec.size()) { + // all inputs are EOS, good. + log->debug("EOS at call {}", m_count++); + return true; + } + if (neos) { raise("missing %d input tensors ", neos); } + + const int ident = invec[0]->ident(); + + // input preparation + std::string inpath = m_inpath; + if (inpath.find("%") != std::string::npos) { + inpath = String::format(inpath, ident); + } + auto root_live = as_pctree(*invec[0]->tensors(), inpath + "/live"); + if (!root_live) { + log->error("Failed to get point cloud tree from \"{}\"", inpath + "/live"); + return false; + } + auto root_dead = as_pctree(*invec[0]->tensors(), inpath + "/dead"); + if (!root_dead) { + log->error("Failed to get point cloud tree from \"{}\"", inpath + "/dead"); + return false; + } + + // merge + for (size_t i = 1; i < invec.size(); ++i) { + if (!invec[i]) { + raise("missing input tensor %d", i); + } + merge_pct(root_live.get(), as_pctree(*invec[i]->tensors(), inpath + "/live").get()); + // log->debug("live root node {} with {} children", i, root_live->nchildren()); + merge_pct(root_dead.get(), as_pctree(*invec[i]->tensors(), inpath + "/dead").get()); + } + + + log->debug("merged live PC tree with {} children", root_live->nchildren()); + log->debug("merged dead PC tree with {} children", root_dead->nchildren()); + + // output + std::string outpath = m_outpath; + if (outpath.find("%") != std::string::npos) { + outpath = String::format(outpath, ident); + } + auto outtens = as_tensors(*root_live.get(), outpath + "/live"); + auto outtens_dead = as_tensors(*root_dead.get(), outpath + "/dead"); + outtens.insert(outtens.end(), outtens_dead.begin(), outtens_dead.end()); + for(const auto& ten : outtens) { + log->debug("outtens {} {}", ten->metadata()["datapath"].asString(), ten->size()); + break; + } + outts = as_tensorset(outtens, ident); + + root_live = nullptr; + root_dead = nullptr; + + m_count++; + return true; +} diff --git a/clus/src/SteinerFunctions.cxx b/clus/src/SteinerFunctions.cxx new file mode 100644 index 000000000..ff40d1ab4 --- /dev/null +++ b/clus/src/SteinerFunctions.cxx @@ -0,0 +1,23 @@ +#include "WireCellUtil/Exceptions.h" +#include "SteinerFunctions.h" + +using namespace WireCell; +using namespace WireCell::Clus; + +void Steiner::improve_grapher_2(Steiner::Grapher& grapher/*, ...*/) +{ + raise("not implemented"); +} +void Steiner::improve_grapher_1(Steiner::Grapher& grapher/*,...*/) +{ + raise("not implemented"); +} +void Steiner::improve_grapher(Steiner::Grapher& grapher/*,...*/) +{ + raise("not implemented"); +} +void Steiner::improve_grapher(Steiner::Grapher& grapher, Steiner::Grapher& other_grapher/*,...*/) +{ + raise("not implemented"); +} + diff --git a/clus/src/SteinerFunctions.h b/clus/src/SteinerFunctions.h new file mode 100644 index 000000000..c3e4f7186 --- /dev/null +++ b/clus/src/SteinerFunctions.h @@ -0,0 +1,28 @@ +/** + These hold steiner-related free functions. + + */ + +#ifndef WIRECELLCLUS_STEINERFUNCTIONS +#define WIRECELLCLUS_STEINERFUNCTIONS + +#include "SteinerGrapher.h" + +namespace WireCell::Clus::Steiner { + + // Xin, these certainly require the "..."'s to be expanded as we figure out + // WCT data types that are equivalent to what WCP uses. + + // Xin, in keeping with "grapher" == "pr3dcluster" I name these accordingly. + // If they do NOT need the extra part of "grapher" it would be better to + // pass the underlying Cluster and then rename the functions accordingly. + + void improve_grapher_2(Grapher& grapher/*, ...*/); + void improve_grapher_1(Grapher& grapher/*,...*/); + void improve_grapher(Grapher& grapher/*,...*/); + void improve_grapher(Grapher& grapher, Grapher& other_grapher/*,...*/); + +} + + +#endif diff --git a/clus/src/SteinerGrapher.cxx b/clus/src/SteinerGrapher.cxx new file mode 100644 index 000000000..c0ceff4e6 --- /dev/null +++ b/clus/src/SteinerGrapher.cxx @@ -0,0 +1,1062 @@ +#include "WireCellUtil/Exceptions.h" +#include "SteinerGrapher.h" + +#include "WireCellUtil/Units.h" +#include "WireCellUtil/Point.h" +#include "WireCellClus/Graphs.h" +#include "WireCellClus/DynamicPointCloud.h" +#include +#include +#include +#include + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; + + +void Steiner::Grapher::create_steiner_tree( + const Facade::Cluster* reference_cluster, // may not be the same as m_cluster + const std::vector& path_point_indices, // of m_cluster + const std::string& graph_name, + const std::string& steiner_graph_name, + bool disable_dead_mix_cell, + const std::string& steiner_pc_name) +{ + log->debug("create_steiner_tree: starting with reference_cluster={}, path_size={}", + (reference_cluster ? "provided" : "null"), path_point_indices.size()); + + // Phase 1: Find initial steiner terminals + vertex_set steiner_terminals = find_steiner_terminals(graph_name, disable_dead_mix_cell); + log->debug("create_steiner_tree: found {} initial steiner terminals", steiner_terminals.size()); + + std::cout << "Test1: " << steiner_terminals.size() << std::endl; + + if (steiner_terminals.empty()) { + log->warn("create_steiner_tree: no steiner terminals found, returning empty graph"); + return; + } + + // Phase 2: Apply reference cluster spatial filtering + if (reference_cluster) { + vertex_set original_size = steiner_terminals; + steiner_terminals = filter_by_reference_cluster(steiner_terminals, reference_cluster); + log->debug("create_steiner_tree: reference cluster filtering: {} -> {} terminals", + original_size.size(), steiner_terminals.size()); + } + + std::cout << "Test2: " << steiner_terminals.size() << std::endl; + + + // Phase 3: Apply path-based filtering if path is provided + if (!path_point_indices.empty()) { + vertex_set pre_path_size = steiner_terminals; + steiner_terminals = filter_by_path_constraints(steiner_terminals, path_point_indices); + log->debug("create_steiner_tree: path filtering: {} -> {} terminals", + pre_path_size.size(), steiner_terminals.size()); + } + + std::cout << "Test3: " << steiner_terminals.size() << std::endl; + + + // Phase 4: Add extreme points + vertex_set extreme_points = get_extreme_points_for_reference(reference_cluster); + steiner_terminals.insert(extreme_points.begin(), extreme_points.end()); + log->debug("create_steiner_tree: added {} extreme points, total terminals: {}", + extreme_points.size(), steiner_terminals.size()); + + std::cout << "Test4: " << steiner_terminals.size() << std::endl; + + + if (steiner_terminals.empty()) { + log->warn("create_steiner_tree: no terminals remain after filtering, returning empty graph"); + return; + } + + const auto& base_graph = get_graph(graph_name); + + const auto& original_pc = get_point_cloud("default"); // default pc ... + + // Configure charge weighting to match prototype values + Graphs::Weighted::ChargeWeightingConfig charge_config; + charge_config.Q0 = 10000.0; // From prototype + charge_config.factor1 = 0.8; // From prototype + charge_config.factor2 = 0.4; // From prototype + charge_config.enable_weighting = true; // Enable charge weighting + + // Use the enhanced approach with cluster reference for charge calculation + auto steiner_result = Graphs::Weighted::create_enhanced_steiner_graph( + base_graph, steiner_terminals, original_pc, m_cluster, charge_config); + + // std::cout << "Test5: " << " Graph vertices: " << boost::num_vertices(steiner_result.graph) << ", edges: " << boost::num_edges(steiner_result.graph) << std::endl; + + // just run this once the steiner graph is created + Graphs::Weighted::establish_same_blob_steiner_edges_steiner_graph(steiner_result, m_cluster); + + // std::cout << "Test5: " << " Graph vertices: " << boost::num_vertices(steiner_result.graph) << ", edges: " << boost::num_edges(steiner_result.graph) << std::endl; + + // Phase 6: Store results for later access + m_flag_steiner_terminal = steiner_result.flag_steiner_terminal; + m_old_to_new_index = steiner_result.old_to_new_index; + m_new_to_old_index = steiner_result.new_to_old_index; + + // Store the subset point cloud + if (!steiner_pc_name.empty()) { + put_point_cloud(std::move(steiner_result.point_cloud), steiner_pc_name); + log->debug("create_steiner_tree: created steiner subset point cloud '{}'", steiner_pc_name); + } + + // can I do this to store the graph ??? + m_cluster.give_graph(steiner_graph_name, std::move(steiner_result.graph)); + + + log->debug("create_steiner_tree: created reduced steiner graph with {} vertices (was {}), {} edges", + boost::num_vertices(steiner_result.graph), boost::num_vertices(base_graph), + boost::num_edges(steiner_result.graph)); + + + // return steiner_result.graph; + +} + +// ======================================== +// Helper Method Implementations +// ======================================== + +Steiner::Grapher::vertex_set Steiner::Grapher::filter_by_reference_cluster( + const vertex_set& terminals, + const Facade::Cluster* reference_cluster) const +{ + if (!reference_cluster) { + return terminals; + } + + vertex_set filtered_terminals; + + // Get reference cluster's time-blob mapping + const auto& ref_time_blob_map = reference_cluster->time_blob_map(); // this one has the time blob map ... + + if (ref_time_blob_map.empty()) { + log->debug("filter_by_reference_cluster: reference cluster has empty time_blob_map"); + return terminals; + } + + // Filter terminals based on spatial relationship with reference cluster + for (auto terminal_idx : terminals) { + //std::cout << "Test: " << terminal_idx << " " << ref_time_blob_map.size() << std::endl; + if (is_point_spatially_related_to_reference(terminal_idx, ref_time_blob_map)) { + filtered_terminals.insert(terminal_idx); + } + } + + return filtered_terminals; +} + +Steiner::Grapher::vertex_set Steiner::Grapher::filter_by_path_constraints( + const vertex_set& terminals, + const std::vector& path_point_indices) const +{ + if (path_point_indices.empty()) { + return terminals; + } + + std::vector wpids_vec = m_cluster.wpids_blob(); + std::set wpids_set(wpids_vec.begin(), wpids_vec.end()); + std::vector wpids(wpids_set.begin(), wpids_set.end()); + + std::map> wpid_params; + + // Access the detector volumes from the config + IDetectorVolumes::pointer dv = m_config.dv; + + + for (const auto& wpid : wpids) { + int apa = wpid.apa(); + int face = wpid.face(); + + // Create wpids for all three planes with this APA and face + WirePlaneId wpid_u(kUlayer, face, apa); + WirePlaneId wpid_v(kVlayer, face, apa); + WirePlaneId wpid_w(kWlayer, face, apa); + + // Get drift direction based on face orientation + int face_dirx = dv->face_dirx(wpid_u); + geo_point_t drift_dir(face_dirx, 0, 0); + + // Get wire directions for all planes + Vector wire_dir_u = dv->wire_direction(wpid_u); + Vector wire_dir_v = dv->wire_direction(wpid_v); + Vector wire_dir_w = dv->wire_direction(wpid_w); + + // Calculate angles + double angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + double angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + double angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + wpid_params[wpid] = std::make_tuple(drift_dir, angle_u, angle_v, angle_w); + } + + auto path_point_cloud = std::make_shared(wpid_params); + const double step_dis = 0.6 * units::cm; + + path_point_cloud->add_points(make_points_cluster_skeleton(&m_cluster, dv, wpid_params, path_point_indices, false, step_dis)); // check path_indices & m_cluster + + // std::cout << path_point_cloud->get_points().size() << " points in path point cloud" << std::endl; + + + + vertex_set filtered_terminals; + + // Distance thresholds from prototype + const double distance_3d_threshold = 6.0 * units::cm; + const double distance_2d_threshold = 1.8 * units::cm; + + for (auto terminal_idx : terminals) { + Point point = m_cluster.point3d(terminal_idx); + auto wpid = m_cluster.wpid(terminal_idx); + + // Calculate distances similar to prototype's logic + + auto result_3d = path_point_cloud->kd3d().knn(1, point); + double dis_3d = sqrt(result_3d[0].second); + + auto dis_2d_u = path_point_cloud->get_closest_2d_point_info(point, 0, wpid.face(), wpid.apa()); + auto dis_2d_v = path_point_cloud->get_closest_2d_point_info(point, 1, wpid.face(), wpid.apa()); + auto dis_2d_w = path_point_cloud->get_closest_2d_point_info(point, 2, wpid.face(), wpid.apa()); + + double dis_2d[3]={std::get<0>(dis_2d_u), std::get<0>(dis_2d_v), std::get<0>(dis_2d_w)}; + + + // Apply prototype's filtering logic: + // Remove if close in 2D projections but far in 3D + bool close_in_2d = (dis_2d[0] < distance_2d_threshold && dis_2d[1] < distance_2d_threshold) || + (dis_2d[0] < distance_2d_threshold && dis_2d[2] < distance_2d_threshold) || + (dis_2d[1] < distance_2d_threshold && dis_2d[2] < distance_2d_threshold); + bool should_remove = close_in_2d && (dis_3d > distance_3d_threshold); + + // std::cout << "Test1: " << point << " " << dis_3d << " " << dis_2d[0] << " " << dis_2d[1] << " " << dis_2d[2] << std::endl; + + if (!should_remove) { + filtered_terminals.insert(terminal_idx); + } + } + + return filtered_terminals; +} + +Steiner::Grapher::vertex_set Steiner::Grapher::get_extreme_points_for_reference( + const Facade::Cluster* reference_cluster) const +{ + vertex_set extreme_points; + + // Use cluster's existing extreme point calculation + // If reference cluster is provided, we should get extreme points that are + // spatially related to it. For now, use all extreme points and filter later. + + try { + // Get extreme points using cluster's method + // This maps to prototype's get_extreme_wcps() functionality + auto extreme_point_groups = m_cluster.get_extreme_wcps(reference_cluster); + + // Convert extreme points to vertex indices + for (const auto& point_group : extreme_point_groups) { + for (const auto& point : point_group) { + // Find the vertex index for this point + // This requires mapping 3D point back to vertex index + auto closest_idx = find_closest_vertex_to_point(point); + if (closest_idx != SIZE_MAX) { + extreme_points.insert(closest_idx); + } + } + } + } catch (const std::exception& e) { + log->warn("get_extreme_points_for_reference: failed to get extreme points: {}", e.what()); + } + + return extreme_points; +} + + + + + +bool Steiner::Grapher::is_point_spatially_related_to_reference( + size_t point_idx, + const Facade::Cluster::time_blob_map_t& ref_time_blob_map) const +{ + // Delegate to the cluster's existing method which implements the proper logic + // for checking spatial relationships with the complex time_blob_map structure + return m_cluster.is_point_spatially_related_to_time_blobs(point_idx, ref_time_blob_map, true); +} + + +// These methods need cluster-specific implementation: +size_t Steiner::Grapher::find_closest_vertex_to_point(const Point& point) const +{ + // Find the vertex index closest to the given 3D point + // check scope ??? + auto closest_idx = m_cluster.get_closest_point_index(point); + + return closest_idx; +} + + + +// Overloaded version for specific blobs (equivalent to prototype's mcells parameter) +Steiner::Grapher::vertex_set Steiner::Grapher::find_peak_point_indices( + const std::vector& target_blobs, const std::string& graph_name, + bool disable_dead_mix_cell, int nlevel) +{ + vertex_set peak_points; + + if (target_blobs.empty()) { + return peak_points; + } + + // Get the blob-to-points mapping + auto cell_points_map = form_cell_points_map(); + + // Collect indices only from target blobs + vertex_set all_indices; + for (const auto* blob : target_blobs) { + auto it = cell_points_map.find(blob); + if (it != cell_points_map.end()) { + all_indices.insert(it->second.begin(), it->second.end()); + } + } + + if (all_indices.empty()) { + return peak_points; + } + + // Rest of the implementation is similar to the above version + // but only operates on the filtered point indices + + // Calculate charges and find candidates + std::map map_index_charge; + std::set, std::greater>> candidates_set; + + const double charge_threshold = 4000.0; + + for (size_t point_idx : all_indices) { + auto [charge_quality, charge] = m_cluster.calc_charge_wcp(point_idx, charge_threshold, disable_dead_mix_cell); + + map_index_charge[point_idx] = charge; + + if (charge > charge_threshold && charge_quality) { + candidates_set.insert(std::make_pair(charge, point_idx)); + } + } + + // std::cout << "Xin2: candidates_set size: " << candidates_set.size() << std::endl; + + if (candidates_set.empty()) { + return peak_points; + } + + // Get access to the underlying boost graph + // NOTE: This assumes there's a way to get the graph - you'll need to implement + // get_graph() method or similar in your Cluster interface + const auto& graph = m_cluster.get_graph(graph_name); // You need to implement this + + std::set peak_indices; + std::set non_peak_indices; + + // Core algorithm from prototype: process candidates in order of decreasing charge + for (const auto& [current_charge, current_index] : candidates_set) { + + // Find all vertices within nlevel hops using graph traversal (prototype logic) + std::set total_vertices_found; + total_vertices_found.insert(current_index); + + // Breadth-first exploration for nlevel steps + std::set vertices_to_be_examined; + vertices_to_be_examined.insert(current_index); + + for (int level = 0; level < nlevel; ++level) { + std::set vertices_saved_for_next; + + for (size_t temp_current_index : vertices_to_be_examined) { + // Get adjacent vertices using boost graph interface + auto [neighbors_begin, neighbors_end] = boost::adjacent_vertices(temp_current_index, graph); + + for (auto neighbor_it = neighbors_begin; neighbor_it != neighbors_end; ++neighbor_it) { + size_t neighbor_index = *neighbor_it; + + if (total_vertices_found.find(neighbor_index) == total_vertices_found.end()) { + total_vertices_found.insert(neighbor_index); + vertices_saved_for_next.insert(neighbor_index); + } + } + } + vertices_to_be_examined = vertices_saved_for_next; + } + total_vertices_found.erase(current_index); + + // Peak selection logic (following prototype) + if (peak_indices.empty()) { + // First candidate becomes a peak + peak_indices.insert(current_index); + + // Mark neighbors with lower charge as non-peaks + for (size_t neighbor_idx : total_vertices_found) { + if (map_index_charge.find(neighbor_idx) == map_index_charge.end()) continue; + + if (current_charge > map_index_charge[neighbor_idx]) { + non_peak_indices.insert(neighbor_idx); + } + } + } else { + // Skip if already classified + if (peak_indices.find(current_index) != peak_indices.end() || + non_peak_indices.find(current_index) != non_peak_indices.end()) { + continue; + } + + bool flag_insert = true; + + // Check against neighbors + for (size_t neighbor_idx : total_vertices_found) { + if (map_index_charge.find(neighbor_idx) == map_index_charge.end()) continue; + + if (current_charge > map_index_charge[neighbor_idx]) { + non_peak_indices.insert(neighbor_idx); + } else if (current_charge < map_index_charge[neighbor_idx]) { + flag_insert = false; + break; + } + } + + if (flag_insert) { + peak_indices.insert(current_index); + } + } + } + + // Connected components analysis to merge nearby peaks (prototype logic) + if (peak_indices.size() > 1) { + std::vector vec_peak_indices(peak_indices.begin(), peak_indices.end()); + peak_indices.clear(); + + const size_t N = vec_peak_indices.size(); + + // Create temporary graph for peak connectivity + boost::adjacency_list> + temp_graph(N); + + // Check connectivity in original graph and replicate in temp graph + for (size_t j = 0; j < N; ++j) { + for (size_t k = 0; k < N; ++k) { + size_t index1 = j; + size_t index2 = k; + + // Check if corresponding vertices are connected in original graph + if (boost::edge(vec_peak_indices[index1], vec_peak_indices[index2], graph).second) { + boost::add_edge(index1, index2, temp_graph); + } + } + } + + // Find connected components + std::vector component(boost::num_vertices(temp_graph)); + const int num = boost::connected_components(temp_graph, &component[0]); + + // For each component, find the point closest to center of mass + std::vector min_dis(num, 1e9); + std::vector points(num, WireCell::Point(0, 0, 0)); + std::vector min_index(num, 0); + std::vector ncounts(num, 0); + + // Calculate center of mass for each component + for (size_t i = 0; i < component.size(); ++i) { + ncounts[component[i]]++; + auto point = m_cluster.point3d(vec_peak_indices[i]); + points[component[i]] = points[component[i]] + WireCell::Vector(point.x(), point.y(), point.z()); + } + + // Average the positions + for (int i = 0; i < num; ++i) { + if (ncounts[i] > 0) { + points[i] = points[i] * (1.0 / ncounts[i]); + } + } + + // Find closest point to center for each component + for (size_t i = 0; i < component.size(); ++i) { + auto point = m_cluster.point3d(vec_peak_indices[i]); + + double dis = pow(points[component[i]].x() - point.x(), 2) + + pow(points[component[i]].y() - point.y(), 2) + + pow(points[component[i]].z() - point.z(), 2); + + if (dis < min_dis[component[i]]) { + min_dis[component[i]] = dis; + min_index[component[i]] = vec_peak_indices[i]; + } + } + + // Add the representative points to the result + for (int i = 0; i < num; ++i) { + peak_indices.insert(min_index[i]); + } + } + + return peak_indices; +} + + +Steiner::Grapher::vertex_set Steiner::Grapher::find_steiner_terminals(const std::string& graph_name, bool disable_dead_mix_cell) +{ + vertex_set steiner_terminals; + + // Get the blob-to-points mapping + auto cell_points_map = form_cell_points_map(); + + if (cell_points_map.empty()) { + return steiner_terminals; + } + + // Process each blob individually (following prototype pattern) + for (const auto& [blob, point_indices] : cell_points_map) { + // Create a single-blob vector for processing + std::vector single_blob = {blob}; + + // Find peak points for this specific blob + auto blob_peaks = find_peak_point_indices(single_blob, graph_name, disable_dead_mix_cell); + + // Add to overall steiner terminals set + steiner_terminals.insert(blob_peaks.begin(), blob_peaks.end()); + } + + return steiner_terminals; +} + + + +Steiner::Grapher::blob_vertex_map Steiner::Grapher::form_cell_points_map() +{ + blob_vertex_map cell_points; + + // Get the 3D scoped view with x,y,z coordinates + const auto& sv = m_cluster.sv3d(); // This is default scoped view with 3D coordinates ... based on index, can connect to raw then 2D ... + const auto& nodes = sv.nodes(); // These are the blob nodes + const auto& skd = sv.kd(); // K-d tree with point data + + // Check if we have valid data + if (nodes.empty() || skd.npoints() == 0) { + return cell_points; // Return empty map if no data + } + + // The major indices tell you which blob each point belongs to + const auto& majs = skd.major_indices(); // blob index for each point + + // Iterate through all points and assign them to their respective blobs + for (size_t point_idx = 0; point_idx < skd.npoints(); ++point_idx) { + size_t blob_idx = majs[point_idx]; + + // Bounds check + if (blob_idx >= nodes.size()) { + continue; // Skip invalid blob indices + } + + // Get the blob facade from the node + const auto* blob = nodes[blob_idx]->value.facade(); + if (!blob) { + continue; // Skip if facade creation failed + } + + // Initialize the set for this blob if it doesn't exist + if (cell_points.find(blob) == cell_points.end()) { + cell_points[blob] = vertex_set(); + } + + // Add this point index to the blob's set + cell_points[blob].insert(point_idx); + } + + return cell_points; +} + +void Steiner::Grapher::establish_same_blob_steiner_edges(const std::string& graph_name, + bool disable_dead_mix_cell) +{ + if (!m_cluster.has_graph(graph_name)) { + log->error("Graph '{}' does not exist in cluster", graph_name); + return; + } + + auto& graph = m_cluster.get_graph(graph_name); + edge_set added_edges; + + // Step 1: Find Steiner terminals using the existing implementation + vertex_set steiner_terminals = find_steiner_terminals(graph_name, disable_dead_mix_cell); + + log->debug("Found {} Steiner terminals for same-blob edge establishment", steiner_terminals.size()); + + // Step 2: Get the blob-to-points mapping (equivalent to map_mcell_all_indices in prototype) + auto cell_points_map = form_cell_points_map(); + + if (cell_points_map.empty()) { + log->warn("No blob-to-points mapping available for Steiner edge establishment"); + return; + } + + log->debug("Processing {} blobs for same-blob edges", cell_points_map.size()); + + // std::cout << "Xin3: " << " Graph vertices: " << boost::num_vertices(graph) << ", edges: " << boost::num_edges(graph) << std::endl; + + + // Step 3: For each blob, add edges between all pairs of points (following prototype logic) + for (const auto& [blob, point_indices] : cell_points_map) { + if (point_indices.size() < 2) { + continue; // Need at least 2 points to make edges + } + + log->debug("Processing blob with {} points", point_indices.size()); + + // Convert set to vector for easier iteration + std::vector points_vec(point_indices.begin(), point_indices.end()); + + // Add edges between all pairs of points in the same blob (following prototype) + for (size_t i = 0; i < points_vec.size(); ++i) { + vertex_type index1 = points_vec[i]; + bool flag_index1 = (steiner_terminals.find(index1) != steiner_terminals.end()); + + for (size_t j = i + 1; j < points_vec.size(); ++j) { + vertex_type index2 = points_vec[j]; + bool flag_index2 = (steiner_terminals.find(index2) != steiner_terminals.end()); + + // Calculate base distance between points + double distance = calculate_distance(index1, index2); + + // Determine edge weight based on terminal status (following prototype logic) + double edge_weight = 0.0; + bool add_edge = false; + + if (flag_index1 && flag_index2) { + // Both are steiner terminals: weight = distance * 0.8 + edge_weight = distance * 0.8; + add_edge = true; + } else if (flag_index1 || flag_index2) { + // One is steiner terminal: weight = distance * 0.9 + edge_weight = distance * 0.9; + add_edge = true; + } + // If neither is a steiner terminal, don't add edge (add_edge stays false) + + if (add_edge) { + // Add edge with calculated weight + if (!boost::edge(index1, index2, graph).second) { + auto [edge, success] = boost::add_edge(index1, index2, edge_weight, graph); + if (success) { + added_edges.insert(edge); + log->debug("Added same-blob edge: {} -- {} (distance: {:.3f} cm, weight: {:.3f}, terminals: {}/{})", + index1, index2, distance / units::cm, edge_weight / units::cm, + flag_index1 ? "T" : "N", flag_index2 ? "T" : "N"); + } + } + } + } + } + } + + // std::cout << "Xin3: " << " Graph vertices: " << boost::num_vertices(graph) << ", edges: " << boost::num_edges(graph) << std::endl; + + + // Store the added edges for later removal + store_added_edges(graph_name, added_edges); + + // Invalidate any cached GraphAlgorithms that use this graph + invalidate_graph_algorithms_cache(graph_name); + + // log->info("Added {} same-blob edges to graph '{}' from {} total points ({} steiner terminals)", + // added_edges.size(), graph_name, + // std::accumulate(cell_points_map.begin(), cell_points_map.end(), 0, + // [](int sum, const auto& pair) { return sum + pair.second.size(); }), + // steiner_terminals.size()); +} + + +void Steiner::Grapher::remove_same_blob_steiner_edges(const std::string& graph_name) +{ + if (!m_cluster.has_graph(graph_name)) { + log->warn("Graph '{}' does not exist, cannot remove edges", graph_name); + return; + } + + auto it = m_added_edges_by_graph.find(graph_name); + if (it == m_added_edges_by_graph.end() || it->second.empty()) { + log->debug("No edges to remove for graph '{}'", graph_name); + return; + } + + auto& graph = m_cluster.get_graph(graph_name); + const auto& edges_to_remove = it->second; + size_t removed_count = 0; + + // Remove the edges + for (const auto& edge : edges_to_remove) { + boost::remove_edge(edge, graph); + ++removed_count; + } + + // Clear the tracking for this graph + it->second.clear(); + + // Invalidate any cached GraphAlgorithms that use this graph + invalidate_graph_algorithms_cache(graph_name); + + (void)removed_count; + // log->info("Removed {} same-blob Steiner edges from graph '{}'", removed_count, graph_name); +} + +void Steiner::Grapher::invalidate_graph_algorithms_cache(const std::string& graph_name) +{ + // Use the new public method we'll add to Cluster + m_cluster.clear_graph_algorithms_cache(graph_name); +} + +void Steiner::Grapher::store_added_edges(const std::string& graph_name, const edge_set& edges) +{ + // Add to existing set if graph already has tracked edges + auto& tracked_edges = m_added_edges_by_graph[graph_name]; + tracked_edges.insert(edges.begin(), edges.end()); +} + +bool Steiner::Grapher::same_blob(vertex_type v1, vertex_type v2) const +{ + const auto* blob1 = get_blob_for_vertex(v1); + const auto* blob2 = get_blob_for_vertex(v2); + return (blob1 && blob2 && blob1 == blob2); +} + +double Steiner::Grapher::calculate_distance(vertex_type v1, vertex_type v2) const +{ + // Use cluster's point3d method to get 3D coordinates + // This is the standard way to access point coordinates in the toolkit + auto point1 = m_cluster.point3d(v1); + auto point2 = m_cluster.point3d(v2); + + // Calculate Euclidean distance + double dx = point2.x() - point1.x(); + double dy = point2.y() - point1.y(); + double dz = point2.z() - point1.z(); + + return std::sqrt(dx*dx + dy*dy + dz*dz); +} + +const Facade::Blob* Steiner::Grapher::get_blob_for_vertex(vertex_type vertex) const +{ + const auto& sv = m_cluster.sv3d(); + const auto& nodes = sv.nodes(); + const auto& skd = sv.kd(); + + if (vertex >= skd.npoints()) { + return nullptr; + } + + const auto& majs = skd.major_indices(); + size_t blob_idx = majs[vertex]; + + if (blob_idx >= nodes.size()) { + return nullptr; + } + + return nodes[blob_idx]->value.facade(); +} + + + +namespace WireCell::Clus::Graphs::Weighted{ + + +double calculate_charge_weighted_distance( + double geometric_distance, + double charge_source, + double charge_target, + const ChargeWeightingConfig& config) +{ + if (!config.enable_weighting) { + return geometric_distance; + } + + // Apply prototype charge weighting formula + double weight_factor = config.factor1 + config.factor2 * + (0.5 * config.Q0 / (charge_source + config.Q0) + + 0.5 * config.Q0 / (charge_target + config.Q0)); + + return geometric_distance * weight_factor; +} + +std::map calculate_vertex_charges( + const vertex_set& vertices, + const PointCloud::Dataset& pc, + const WireCell::Clus::Facade::Cluster& cluster, + double charge_cut = 4000.0, + bool disable_dead_mix_cell = true) +{ + std::map charges; + + if (pc.size_major() == 0) { + return charges; + } + + // Calculate charge for each vertex using the existing cluster method + for (auto vtx : vertices) { + if (vtx < pc.size_major()) { + // Use the existing calc_charge_wcp method from Facade::Cluster + auto charge_result = cluster.calc_charge_wcp(vtx, charge_cut, disable_dead_mix_cell); + charges[vtx] = charge_result.second; + } + } + + return charges; +} + +void establish_same_blob_steiner_edges_steiner_graph(EnhancedSteinerResult& result, const WireCell::Clus::Facade::Cluster& cluster) { + using blob_vertex_map = std::map>; + blob_vertex_map cell_points_map; + + // Get the 3D scoped view to access blob information + const auto& sv = cluster.sv3d(); + const auto& nodes = sv.nodes(); + const auto& skd = sv.kd(); + const auto& majs = skd.major_indices(); + + // Loop over all (old_index, new_index) pairs in the mapping + for (const auto& [old_index, new_index] : result.old_to_new_index) { + // Get the blob for this vertex using the same logic as get_blob_for_vertex + size_t blob_idx = majs[old_index]; + const auto* blob = nodes[blob_idx]->value.facade(); + cell_points_map[blob].insert(new_index); + } + + const auto& coords = cluster.get_default_scope().coords; + + // std::cout << coords.at(0) << " " << coords.at(1) << " " << coords.at(2) << std::endl; + + // Get 3D coordinates from the subset point cloud + const auto& x_arr = result.point_cloud.get(coords.at(0))->elements(); + const auto& y_arr = result.point_cloud.get(coords.at(1))->elements(); + const auto& z_arr = result.point_cloud.get(coords.at(2))->elements(); + + for (const auto& [blob, point_indices] : cell_points_map) { + // Convert set to vector for easier iteration + std::vector points_vec(point_indices.begin(), point_indices.end()); + + // Add edges between all pairs of points in the same blob + for (size_t i = 0; i < points_vec.size(); ++i) { + vertex_type index1 = points_vec[i]; + Point point1(x_arr[index1], y_arr[index1], z_arr[index1]); + for (size_t j = i + 1; j < points_vec.size(); ++j) { + vertex_type index2 = points_vec[j]; + Point point2(x_arr[index2], y_arr[index2], z_arr[index2]); + + if (result.flag_steiner_terminal[index1] || result.flag_steiner_terminal[index2]){ + // Calculate base distance between points + double distance = (point1-point2).magnitude(); + + // Add edge with calculated weight + if (!boost::edge(index1, index2, result.graph).second) { + boost::add_edge(index1, index2, distance, result.graph); + } + } + } + } + } +} + + +// Updated enhanced steiner graph function to use the new charge calculation +EnhancedSteinerResult create_enhanced_steiner_graph( + const graph_type& base_graph, + const vertex_set& terminal_vertices, + const PointCloud::Dataset& original_pc, + const WireCell::Clus::Facade::Cluster& cluster, // Added cluster parameter + const ChargeWeightingConfig& charge_config, + bool disable_dead_mix_cell + ) +{ + using namespace WireCell::Clus::Graphs::Weighted; + + EnhancedSteinerResult result; + + // Step 1: Create Voronoi tessellation + std::vector terminal_vector(terminal_vertices.begin(), terminal_vertices.end()); + auto vor = voronoi(base_graph, terminal_vector); + + // Step 2: Build complete terminal distance map (matches prototype map_saved_edge) + auto edge_weight = get(boost::edge_weight, base_graph); + std::map> map_saved_edge; + std::vector all_terminal_connecting_edges; + + // Find best edges between all terminal pairs (matches prototype logic exactly) + auto [edge_iter, edge_end] = boost::edges(base_graph); + for (auto fine_edge : boost::make_iterator_range(edge_iter, edge_end)) { + const vertex_type fine_tail = boost::source(fine_edge, base_graph); + const vertex_type fine_head = boost::target(fine_edge, base_graph); + const double fine_distance = edge_weight[fine_edge]; + + const vertex_type term_tail = vor.terminal[fine_tail]; + const vertex_type term_head = vor.terminal[fine_head]; + + // Skip edges within same terminal region + if (term_tail == term_head) { + continue; + } + + // Calculate total distance: path_to_terminal + edge + path_to_terminal + const double total_distance = vor.distance[fine_tail] + fine_distance + vor.distance[fine_head]; + const vertex_pair term_vp = make_vertex_pair(term_tail, term_head); + + // Check if this is the best edge for this terminal pair (matches prototype logic) + auto it = map_saved_edge.find(term_vp); + if (it == map_saved_edge.end()) { + // Try reverse pair + vertex_pair reverse_vp = make_vertex_pair(term_head, term_tail); + auto reverse_it = map_saved_edge.find(reverse_vp); + if (reverse_it == map_saved_edge.end()) { + // First edge for this terminal pair + map_saved_edge[term_vp] = std::make_pair(total_distance, fine_edge); + } else if (total_distance < reverse_it->second.first) { + // Better than existing reverse pair + map_saved_edge.erase(reverse_it); + map_saved_edge[term_vp] = std::make_pair(total_distance, fine_edge); + } + } else if (total_distance < it->second.first) { + // Better than existing edge for this pair + it->second = std::make_pair(total_distance, fine_edge); + } + } + + // Step 3: Extract all selected terminal connecting edges (matches prototype terminal_edge) + for (const auto& [term_pair, edge_info] : map_saved_edge) { + all_terminal_connecting_edges.push_back(edge_info.second); + } + + // std::cout << "Terminal connecting edges: " << total.size() << std::endl; + + // Step 4: Build complete edge set by including all paths (matches prototype unique_edges logic) + vertex_set selected_vertices; + std::vector tree_edges; + + // For each terminal connecting edge, include it and all edges on paths back to terminals + for (auto edge : all_terminal_connecting_edges) { + // Add the direct connecting edge + tree_edges.push_back(edge); + + // Add all edges on paths from edge endpoints back to their terminals + // This matches the prototype's vpred walking logic exactly + for (auto endpoint : {boost::source(edge, base_graph), boost::target(edge, base_graph)}) { + vertex_type current_vtx = endpoint; + + // Walk back to terminal, adding all edges on the path + while (vor.terminal[current_vtx] != current_vtx) { + auto path_edge = vor.last_edge[current_vtx]; + tree_edges.push_back(path_edge); + current_vtx = boost::source(path_edge, base_graph); + } + } + } + + // Step 5: Remove duplicates and collect all vertices (matches prototype boost::unique logic) + std::sort(tree_edges.begin(), tree_edges.end()); + tree_edges.erase(std::unique(tree_edges.begin(), tree_edges.end()), tree_edges.end()); + + // std::cout << "Total unique edges in tree: " << tree_edges.size() << std::endl; + + // Collect all vertices from the unique edges + for (auto edge : tree_edges) { + selected_vertices.insert(boost::source(edge, base_graph)); + selected_vertices.insert(boost::target(edge, base_graph)); + } + + // Step 6: Create index mappings (same as before) + std::vector selected_vector(selected_vertices.begin(), selected_vertices.end()); + for (size_t i = 0; i < selected_vector.size(); ++i) { + result.old_to_new_index[selected_vector[i]] = i; + result.new_to_old_index[i] = selected_vector[i]; + } + + // Step 7: Create flag_steiner_terminal (same as before) + result.flag_steiner_terminal.resize(selected_vector.size()); + for (size_t i = 0; i < selected_vector.size(); ++i) { + vertex_type old_idx = selected_vector[i]; + result.flag_steiner_terminal[i] = (terminal_vertices.find(old_idx) != terminal_vertices.end()); + } + + // Step 8: Calculate charges for vertices (same as before) + if (original_pc.size_major() > 0 && charge_config.enable_weighting) { + result.vertex_charges = calculate_vertex_charges( + selected_vertices, + original_pc, + cluster, + 4000.0, // charge_cut from prototype + disable_dead_mix_cell + ); + } + + // Step 9: Create subset point cloud (same as before) + if (original_pc.size_major() > 0) { + std::vector subset_indices(selected_vertices.begin(), selected_vertices.end()); + result.point_cloud = original_pc.subset(subset_indices); + + // Add the flag_steiner_terminal boolean array to point cloud + // Convert std::vector to std::vector to avoid std::vector specialization issues + std::vector steiner_flags_uint8(result.flag_steiner_terminal.begin(), result.flag_steiner_terminal.end()); + PointCloud::Array steiner_flag_array(steiner_flags_uint8); + result.point_cloud.add("flag_steiner_terminal", std::move(steiner_flag_array)); + } + + // Step 10: Create reduced graph with ALL unique edges (this is the key fix) + result.graph = graph_type(selected_vector.size()); + + // Add ALL edges from tree_edges, not just the terminal connecting ones + for (auto edge : tree_edges) { + vertex_type old_source = boost::source(edge, base_graph); + vertex_type old_target = boost::target(edge, base_graph); + + // These should all be in selected set by construction, but check anyway + if (result.old_to_new_index.find(old_source) == result.old_to_new_index.end() || + result.old_to_new_index.find(old_target) == result.old_to_new_index.end()) { + continue; + } + + vertex_type new_source = result.old_to_new_index[old_source]; + vertex_type new_target = result.old_to_new_index[old_target]; + + double geometric_distance = edge_weight[edge]; + + // Apply charge weighting if enabled (exact prototype formula) + double final_distance = geometric_distance; + if (charge_config.enable_weighting && !result.vertex_charges.empty()) { + double charge_source = result.vertex_charges.count(old_source) ? + result.vertex_charges[old_source] : 0.0; + double charge_target = result.vertex_charges.count(old_target) ? + result.vertex_charges[old_target] : 0.0; + + // Prototype formula: dis * (factor1 + factor2 * (0.5*Q0/(Qs+Q0) + 0.5*Q0/(Qt+Q0))) + double Q0 = charge_config.Q0; + double factor1 = charge_config.factor1; + double factor2 = charge_config.factor2; + + double weight_factor = factor1 + factor2 * + (0.5 * Q0 / (charge_source + Q0) + 0.5 * Q0 / (charge_target + Q0)); + + final_distance = geometric_distance * weight_factor; + } + if (!boost::edge(new_source, new_target, result.graph).second) { + // Add the edge with the final weighted distance + boost::add_edge(new_source, new_target, final_distance, result.graph); + } + } + + // std::cout << "Final graph - vertices: " << boost::num_vertices(result.graph) + // << ", edges: " << boost::num_edges(result.graph) << std::endl; + + result.steiner_terminal_indices = terminal_vertices; + return result; +} + +} diff --git a/clus/src/SteinerGrapher.h b/clus/src/SteinerGrapher.h new file mode 100644 index 000000000..ed5d2e3f5 --- /dev/null +++ b/clus/src/SteinerGrapher.h @@ -0,0 +1,335 @@ +/** This provides a "workspace" class for creating the steiner graph. + + It is meant to be equivalent to the Steiner-related SUBSET of WCP's + PR3DCluster methods and data. + + See also SteinerFunctions.h for any free functions. +*/ + +#ifndef WIRECELLCLUS_STEINER +#define WIRECELLCLUS_STEINER + +#include "WireCellClus/Graphs.h" +#include "WireCellClus/Facade_Cluster.h" +#include "WireCellClus/Facade_Blob.h" +#include "WireCellClus/IPCTransform.h" + +#include "WireCellIface/IBlobSampler.h" +#include "WireCellIface/IDetectorVolumes.h" +#include "WireCellIface/IPCTreeMutate.h" + +#include "WireCellUtil/Logging.h" + +#include +#include + +namespace WireCell::Clus::Steiner { + + + class Grapher { + public: + + // This holds various "global" and const info sources. See + // CreateSteinerGraph for an example of how it is provided. + struct Config { + IDetectorVolumes::pointer dv; + WireCell::Clus::IPCTransformSet::pointer pcts; + IPCTreeMutate::pointer retile; + /// do we even need samplers? + // std::map> samplers; + }; + Log::logptr_t log; + + /// Construct with an existing cluster facade. Caller must assure the + /// underlying cluster node is kept live. + Grapher(Facade::Cluster& cluster, const Config& cfg, Log::logptr_t log); + Grapher() = delete; + + /// Construct a Grapher with some cluster and take the rest of what we + /// need from the other grapher. + Grapher(Facade::Cluster& cluster, const Grapher& other); + + /// + /// Types + /// + + /// Forward some types from Graphs.h + using graph_type = WireCell::Clus::Graphs::Weighted::graph_type; + using vertex_type = WireCell::Clus::Graphs::Weighted::vertex_type; + using edge_type = WireCell::Clus::Graphs::Weighted::edge_type; + using vertex_set = WireCell::Clus::Graphs::Weighted::vertex_set; + using edge_set = WireCell::Clus::Graphs::Weighted::edge_set; + using edge_weight_type = WireCell::Clus::Graphs::Weighted::edge_weight_type; + + /// A type that maps blobs to graph vertices + using blob_vertex_map = std::map; + + + /// + /// Basic data accessors. + /// + + Facade::Cluster& cluster() { return m_cluster; } + const Facade::Cluster& cluster() const { return m_cluster; } + Config config() const { return m_config; } + + + /// + /// Helper methods - these are general purpose, primitive. + /// + + /// + /// Some special graph access. See also Facade::Mixins::Graphs in + /// Facade_Mixins.h for more graph acessors. + /// + + /// Get a graph, possibly making it on the fly if flavor is one of the + /// 3 reserved names. + graph_type& get_graph(const std::string& flavor = "basic"); + const graph_type& get_graph(const std::string& flavor = "basic") const ; + + /// Remove the flavor of graph from the other Grapher and move it to + /// this one. Give a non-empty value for "our_flavor" to store the + /// transferred graph under a different name. + void transfer_graph(Grapher& other, + const std::string& flavor = "basic", + std::string our_flavor = ""); + + + /// + /// Some special PC access. + /// + + /// Return a PC with the given name from our cluster node's local PCs. + /// If it does not exist, one is derived from the default scoped view, + /// saved to the given name, and a reference is returned. + PointCloud::Dataset& get_point_cloud(const std::string& name = "default"); + + /// Store a point cloud by std::move() in our cluster's local PCs. + void put_point_cloud(PointCloud::Dataset&& pc, const std::string& name = "default"); + + /// Store a point cloud by copy in our cluster's local PCs. + void put_point_cloud(const PointCloud::Dataset& pc, const std::string& name = "default"); + + /// Remove the named point cloud from the other Grapher and move it to + /// this one. Give a non-empty value for "our_name" to store the + /// transferred PC under a different name. + void transfer_pc(Grapher& other, + const std::string& name = "default", + const std::string& our_name = ""); + + + /// + /// The real main entry method + /// + + + + /// + /// Intermediate algorithm methods + /// + + + vertex_set find_peak_point_indices(const std::vector& target_blobs, const std::string& graph_name, + bool disable_dead_mix_cell = true, int nlevel = 1); + + blob_vertex_map form_cell_points_map(); + vertex_set find_steiner_terminals(const std::string& graph_name, bool disable_dead_mix_cell=true); + + /// Establish edges between points in the same blob (mcell) with weighted connectivity + /// This modifies the given graph and tracks added edges for later removal + /// Uses find_steiner_terminals() to determine edge weights: + /// - Both terminals: distance * 0.8 + /// - One terminal: distance * 0.9 + /// - Neither terminal: no edge added + void establish_same_blob_steiner_edges(const std::string& graph_name, + bool disable_dead_mix_cell=true); + + /// Remove previously added same-blob Steiner edges + void remove_same_blob_steiner_edges(const std::string& graph_name); + + /// Create Steiner tree with optional reference cluster and path constraints + /// This is the main entry point equivalent to prototype's Create_steiner_tree + void create_steiner_tree( + const Facade::Cluster* reference_cluster = nullptr, + const std::vector& path_point_indices = {}, + const std::string& graph_name = "basic_pid", + const std::string& steiner_graph_name = "steiner_graph", + bool disable_dead_mix_cell = true, + const std::string& steiner_pc_name = "steiner_pc" + ); + + + /// Get the flag indicating which vertices in the steiner graph are terminals + const std::vector& get_flag_steiner_terminal() const { + return m_flag_steiner_terminal; + } + + /// Get mapping from original to new vertex indices + const std::map& get_old_to_new_mapping() const { + return m_old_to_new_index; + } + + /// Get mapping from new to original vertex indices + const std::map& get_new_to_old_mapping() const { + return m_new_to_old_index; + } + + /// Check if a vertex in the steiner graph is a terminal + bool is_steiner_terminal(vertex_type steiner_vertex) const { + if (steiner_vertex >= m_flag_steiner_terminal.size()) { + return false; + } + return m_flag_steiner_terminal[steiner_vertex]; + } + + /// Get original vertex index from steiner graph vertex index + vertex_type get_original_vertex(vertex_type steiner_vertex) const { + auto it = m_new_to_old_index.find(steiner_vertex); + if (it != m_new_to_old_index.end()) { + return it->second; + } + return SIZE_MAX; // Invalid index + } + + /// Get steiner graph vertex index from original vertex index + vertex_type get_steiner_vertex(vertex_type original_vertex) const { + auto it = m_old_to_new_index.find(original_vertex); + if (it != m_old_to_new_index.end()) { + return it->second; + } + return SIZE_MAX; // Invalid index + } + + + private: + // The Grapher "wraps" a Cluster. As the Cluster is a *facade* of an + // underlying PC tree node, we do not own the Cluster and we rely on + // whoever owns us to keep the underlying cluster node alive. as long as + // we are alive. + Facade::Cluster& m_cluster; + + // This holds various "global" info sources + const Config& m_config; + + + // XIN: add any more data and methods you need here. + /// Track edges added by each graph modification operation + /// Maps graph name to set of edges added to that graph + std::map m_added_edges_by_graph; + + /// Helper to invalidate GraphAlgorithms cache for a specific graph + void invalidate_graph_algorithms_cache(const std::string& graph_name); + + /// Helper to store added edges for later removal + void store_added_edges(const std::string& graph_name, const edge_set& edges); + + /// Helper to check if two vertices (points) belong to the same blob + bool same_blob(vertex_type v1, vertex_type v2) const; + + /// Helper to calculate distance between two vertices + double calculate_distance(vertex_type v1, vertex_type v2) const; + + /// Helper to get blob for a given vertex (point index) + const Facade::Blob* get_blob_for_vertex(vertex_type vertex) const; + + + // additional helper functions + + /// Filter steiner terminals based on spatial relationship with reference cluster + vertex_set filter_by_reference_cluster( + const vertex_set& terminals, + const Facade::Cluster* reference_cluster + ) const; + + /// Filter steiner terminals based on path constraints + vertex_set filter_by_path_constraints( + const vertex_set& terminals, + const std::vector& path_point_indices + ) const; + + /// Get extreme points considering reference cluster constraints + vertex_set get_extreme_points_for_reference( + const Facade::Cluster* reference_cluster + ) const; + + + /// Check if a point is spatially related to reference cluster's time-blob mapping + bool is_point_spatially_related_to_reference( + size_t point_idx, + const Facade::Cluster::time_blob_map_t& ref_time_blob_map + ) const; + + + + // temporary ... + size_t find_closest_vertex_to_point(const Point& point) const; + + /// Create steiner subset point cloud with proper wire indices + /// (matches prototype point_cloud_steiner creation) + PointCloud::Dataset create_steiner_subset_pc_with_indices( + const vertex_set& steiner_indices) const; + + /// Flag indicating which vertices in the reduced steiner graph are actual terminals + /// vs. intermediate Steiner points (matches prototype flag_steiner_terminal) + std::vector m_flag_steiner_terminal; + + /// Mapping from original graph vertex indices to reduced steiner graph indices + /// (matches prototype map_old_new_indices) + std::map m_old_to_new_index; + + /// Mapping from reduced steiner graph indices to original graph indices + /// (matches prototype map_new_old_indices) + std::map m_new_to_old_index; + + /// Set of vertices that are steiner graph terminals (for edge creation logic) + vertex_set m_steiner_graph_terminal_indices; + }; + + +} + +namespace WireCell::Clus::Graphs::Weighted { + /// Calculate charge-weighted distance between two vertices + /// (matches prototype edge weighting logic) + double calculate_charge_weighted_distance( + double geometric_distance, + double charge_source, + double charge_target, + const ChargeWeightingConfig& config = ChargeWeightingConfig{}); + + /// Calculate vertex charges using cluster facade method + std::map calculate_vertex_charges( + const vertex_set& vertices, + const PointCloud::Dataset& pc, + const WireCell::Clus::Facade::Cluster& cluster, + double charge_cut, + bool disable_dead_mix_cell); + + /// Enhanced steiner graph creation with full prototype functionality + struct EnhancedSteinerResult { + graph_type graph; // reduced vertex graph + PointCloud::Dataset point_cloud; // subset point cloud + std::vector flag_steiner_terminal; // terminal flags + std::map old_to_new_index; // index mappings + std::map new_to_old_index; // reverse mappings + vertex_set steiner_terminal_indices; // original terminal set + std::map vertex_charges; // calculated charges + }; + + /// Create steiner graph with full prototype matching functionality + EnhancedSteinerResult create_enhanced_steiner_graph( + const graph_type& base_graph, + const vertex_set& terminal_vertices, + const PointCloud::Dataset& original_pc, + const WireCell::Clus::Facade::Cluster& cluster, + const ChargeWeightingConfig& charge_config = ChargeWeightingConfig{}, + bool disable_dead_mix_cell = true + ); + + void establish_same_blob_steiner_edges_steiner_graph(EnhancedSteinerResult& result, + const WireCell::Clus::Facade::Cluster& cluster); +} + + +#endif diff --git a/clus/src/SteinerGrapher_helpers.cxx b/clus/src/SteinerGrapher_helpers.cxx new file mode 100644 index 000000000..03919176a --- /dev/null +++ b/clus/src/SteinerGrapher_helpers.cxx @@ -0,0 +1,83 @@ +#include "WireCellUtil/Exceptions.h" +#include "SteinerGrapher.h" + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; + +Steiner::Grapher::Grapher(Cluster& cluster, const Steiner::Grapher::Config& cfg, Log::logptr_t log) + : log(log), m_cluster(cluster), m_config(cfg) +{ + +} + +Steiner::Grapher::Grapher(Cluster& cluster, const Steiner::Grapher& other) + : log(other.log), m_cluster(cluster), m_config(other.m_config) +{ + +} + + +void Steiner::Grapher::put_point_cloud(PointCloud::Dataset&& pc, const std::string& name) +{ + m_cluster.local_pcs().emplace(name, pc); +} +void Steiner::Grapher::put_point_cloud(const PointCloud::Dataset& pc, const std::string& name) +{ + m_cluster.local_pcs().emplace(name, pc); +} + + +PointCloud::Dataset& Steiner::Grapher::get_point_cloud(const std::string& name) +{ + if (m_cluster.has_pc(name)) { + return m_cluster.get_pc(name); + } + + // Fixme? configure the scope? for now, the default. + const auto& sv = m_cluster.sv(); + const auto& scope = m_cluster.get_default_scope(); + + // put_point_cloud(sv.flat_coords(), name); + put_point_cloud(sv.flat_pc(scope.pcname, {scope.coords.at(0),scope.coords.at(1),scope.coords.at(2),"wpid"}),name); + + // Note, if more than the x,y,z coordinates are needed we would replace + // flat_coords() with something like: + // sv.flat_pc("3d", {"x","y","z","wpid","uwire_index", "vwire_index", "wwire_index"); + + // Return the in-place reference + return m_cluster.get_pc(name); +} + +Steiner::Grapher::graph_type& Steiner::Grapher::get_graph(const std::string& flavor) +{ + // If graph of given flavor does not exist, the Cluster knows how to make + // three "reserved" flavors, "basic", "ctpc" and "relaxed". + return m_cluster.find_graph(flavor, m_config.dv, m_config.pcts); // throws if no flavor +} +void Steiner::Grapher::transfer_graph(Steiner::Grapher& other, const std::string& flavor, + std::string our_flavor) +{ + if (our_flavor.empty()) { + our_flavor = flavor; + } + + // This does a move. + m_cluster.give_graph(our_flavor, other.cluster().take_graph(flavor)); +} + +void Steiner::Grapher::transfer_pc(Steiner::Grapher& other, const std::string& name, + const std::string& our_name) +{ + // We do this for the possible side-effect of creating the local PC from the + // scoped PC. + other.get_point_cloud(name); + if (our_name.empty()) { + m_cluster.local_pcs().insert(other.cluster().local_pcs().extract(name)); + return; + } + + auto map_node = other.cluster().local_pcs().extract(name); + map_node.key() = our_name; // C++17 + m_cluster.local_pcs().insert(std::move(map_node)); +} diff --git a/clus/src/TaggerCheckSTM.cxx b/clus/src/TaggerCheckSTM.cxx new file mode 100644 index 000000000..402a27d8c --- /dev/null +++ b/clus/src/TaggerCheckSTM.cxx @@ -0,0 +1,2758 @@ +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" +#include "WireCellClus/ParticleDataSet.h" +#include "WireCellClus/FiducialUtils.h" +#include "WireCellIface/IConfigurable.h" +#include "WireCellUtil/NamedFactory.h" +#include "WireCellUtil/Logging.h" +#include "WireCellClus/PRGraph.h" +#include "WireCellClus/TrackFitting.h" +#include "WireCellClus/TrackFittingPresets.h" +#include "WireCellClus/PRSegmentFunctions.h" + +#include "WireCellIface/IScalarFunction.h" +#include "WireCellUtil/KSTest.h" + + + + +class TaggerCheckSTM; +WIRECELL_FACTORY(TaggerCheckSTM, TaggerCheckSTM, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; + +struct edge_base_t { + typedef boost::edge_property_tag kind; +}; + +/** + * Clustering function that checks the main cluster from clustering_recovering_bundle + * for Short Track Muon (STM) characteristics and sets the STM flag when conditions are met. + * This function works on clusters that have already been processed by clustering_recovering_bundle. + */ +class TaggerCheckSTM : public IConfigurable, public Clus::IEnsembleVisitor, private Clus::NeedDV, private Clus::NeedPCTS, private Clus::NeedRecombModel, private Clus::NeedParticleData { +public: + TaggerCheckSTM() { + // Initialize with default preset + m_track_fitter = TrackFittingPresets::create_with_current_values(); + } + virtual ~TaggerCheckSTM() {} + + virtual void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedPCTS::configure(config); + NeedRecombModel::configure(config); + NeedParticleData::configure(config); + + m_grouping_name = get(config, "grouping", "live"); + + m_trackfitting_config_file = get(config, "trackfitting_config_file", ""); + + if (!m_trackfitting_config_file.empty()) { + std::cout << "TaggerCheckSTM: Loading TrackFitting config from: " << m_trackfitting_config_file << std::endl; + load_trackfitting_config(m_trackfitting_config_file); + } else { + std::cout << "TaggerCheckSTM: No TrackFitting config file specified, using defaults" << std::endl; + } + + } + + virtual Configuration default_configuration() const { + Configuration cfg; + cfg["grouping"] = m_grouping_name; + cfg["detector_volumes"] = "DetectorVolumes"; + cfg["pc_transforms"] = "PCTransformSet"; + cfg["recombination_model"] = "BoxRecombination"; + cfg["particle_dataset"] = "ParticleDataSet"; + + cfg["trackfitting_config_file"] = ""; + + return cfg; + } + + virtual void visit(Ensemble& ensemble) const { + + // Configure the track fitter with detector volume + m_track_fitter.set_detector_volume(m_dv); + m_track_fitter.set_pc_transforms(m_pcts); + + // Get the specified grouping (default: "live") + auto groupings = ensemble.with_name(m_grouping_name); + if (groupings.empty()) { + return; + } + + auto& grouping = *groupings.at(0); + + // Find clusters that have the main_cluster flag (set by clustering_recovering_bundle) + Cluster* main_cluster = nullptr; + + for (auto* cluster : grouping.children()) { + if (cluster->get_flag(Flags::main_cluster)) { + main_cluster = cluster; + } + } + + std::cout << "TaggerCheckSTM: Found " << (main_cluster ? 1 : 0) + << " main clusters to check for STM conditions." << std::endl; + + // For each main cluster, find its associated clusters + std::map> main_to_associated; + if (main_cluster) { + std::vector associated_clusters; + + // Find all clusters with the associated_cluster flag + for (auto* cluster : grouping.children()) { + if (cluster->get_flag(Flags::associated_cluster)) { + associated_clusters.push_back(cluster); + } + } + + main_to_associated[main_cluster] = associated_clusters; + + // std::cout << "TaggerCheckSTM: Main cluster " << main_cluster->ident() + // << " has " << associated_clusters.size() << " associated clusters: "; + // for (auto* assoc : associated_clusters) { + // std::cout << assoc->ident() << " "; + // } + // std::cout << std::endl; + } + + // Process each main cluster + size_t stm_count = 0; + + // // validation check ... temporary ... + // { + // auto boundary_indices = main_cluster->get_two_boundary_steiner_graph_idx("steiner_graph", "steiner_pc", true); + + // const auto& steiner_pc = main_cluster->get_pc("steiner_pc"); + // const auto& coords = main_cluster->get_default_scope().coords; + // const auto& x_coords = steiner_pc.get(coords.at(0))->elements(); + // const auto& y_coords = steiner_pc.get(coords.at(1))->elements(); + // const auto& z_coords = steiner_pc.get(coords.at(2))->elements(); + + // // Add the two boundary points as additional extreme point groups + // geo_point_t boundary_point_first(x_coords[boundary_indices.first], + // y_coords[boundary_indices.first], + // z_coords[boundary_indices.first]); + // geo_point_t boundary_point_second(x_coords[boundary_indices.second], + // y_coords[boundary_indices.second], + // z_coords[boundary_indices.second]); + // geo_point_t first_wcp = boundary_point_first; + // geo_point_t last_wcp = boundary_point_second; + + // std::cout << "End Points: " << first_wcp << " " << last_wcp << std::endl; + // last_wcp = geo_point_t(215.532, -95.1674, 211.193); + + // auto path_points = do_rough_path(*main_cluster, first_wcp, last_wcp); + + // // Create segment for tracking + // auto segment = create_segment_for_cluster(*main_cluster, path_points); + + // // geo_point_t test_p(10,10,10); + // // const auto& fit_seg_dpc = segment->dpcloud("main"); + // // auto closest_result = fit_seg_dpc->kd3d().knn(1, test_p); + // // double closest_3d_distance = sqrt(closest_result[0].second); + // // auto closest_2d_u = fit_seg_dpc->get_closest_2d_point_info(test_p, 0, 0, 0); + // // auto closest_2d_v = fit_seg_dpc->get_closest_2d_point_info(test_p, 1, 0, 0); + // // auto closest_2d_w = fit_seg_dpc->get_closest_2d_point_info(test_p, 2, 0, 0); + // // std::cout << closest_3d_distance << " " << std::get<0>(closest_2d_u) << " " << std::get<0>(closest_2d_v) << " " << std::get<0>(closest_2d_w) << std::endl; + // // std::cout << std::get<2>(closest_2d_u) << " " << std::get<2>(closest_2d_v) << " " << std::get<2>(closest_2d_w) << std::endl; + + // m_track_fitter.add_segment(segment); + // m_track_fitter.do_single_tracking(segment, true, true, false, true); + // // Extract fit results from the segment + // const auto& fits = segment->fits(); + + // // Print position, dQ, and dx for each fit point + // std::cout << "Fit results for " << fits.size() << " points:" << std::endl; + // for (size_t i = 0; i < fits.size(); ++i) { + // const auto& fit = fits[i]; + // std::cout << " Point " << i << ": position=(" + // << fit.point.x()/units::cm << ", " << fit.point.y()/units::cm << ", " << fit.point.z()/units::cm + // << "), dQ=" << fit.dQ << ", dx=" << fit.dx/units::cm << std::endl; + // } + // std::cout << std::endl; + + // std::cout << "After search other tracks" << std::endl; + // std::vector> fitted_segments; + // fitted_segments.push_back(segment); + // search_other_tracks(*main_cluster, fitted_segments); + + // std::cout << fitted_segments.size() << std::endl; + // // { + // // // Extract fit results from the segment + // // const auto& fits = fitted_segments.back()->fits(); + + // // // Print position, dQ, and dx for each fit point + // // std::cout << "Fit results for " << fits.size() << " points:" << std::endl; + // // for (size_t i = 0; i < fits.size(); ++i) { + // // const auto& fit = fits[i]; + // // std::cout << " Point " << i << ": position=(" + // // << fit.point.x()/units::cm << ", " << fit.point.y()/units::cm << ", " << fit.point.z()/units::cm + // // << "), dQ=" << fit.dQ << ", dx=" << fit.dx/units::cm << std::endl; + // // } + // // std::cout << std::endl; + // // } + // bool flag_other_tracks = check_other_tracks(*main_cluster, fitted_segments); + // std::cout << "Check other Tracks: " << flag_other_tracks << std::endl; + + // bool flag_other_clusters = check_other_clusters(*main_cluster, main_to_associated[main_cluster]); + // std::cout << "Check other Clusters: " << flag_other_clusters << std::endl; + + // geo_point_t mid_point(0,0,0); + // auto adjusted_path_points = adjust_rough_path(*main_cluster, mid_point); + // std::cout << "Adjust path " << mid_point << std::endl; + + // int kink_num = find_first_kink(segment); + // std::cout << "Kink " << kink_num << std::endl; + + // bool flag_proton = detect_proton(segment, kink_num, fitted_segments); + // std::cout << "Proton " << flag_proton << std::endl; + + // bool flag_eval_stm = eval_stm(segment, kink_num, 5*units::cm, 0., 35*units::cm, true); + // std::cout << "eval_stm " << flag_eval_stm << std::endl; + + // } + + bool flag_stm = check_stm_conditions(*main_cluster, main_to_associated[main_cluster] ); + std::cout << "STM tagger: " << " " << flag_stm << std::endl; + if (flag_stm) { + main_cluster->set_flag(Flags::STM); + stm_count++; + } + + (void)stm_count; + + // hack ... + { + auto segs = m_track_fitter.get_segments(); + clustering_points_segments(segs,m_dv); + } + + } + +private: + std::string m_grouping_name{"live"}; + std::string m_trackfitting_config_file; // Path to TrackFitting config file + mutable TrackFitting m_track_fitter; + + void load_trackfitting_config(const std::string& config_file) { + try { + // Load JSON file + std::ifstream file(config_file); + if (!file.is_open()) { + std::cerr << "TaggerCheckSTM: Cannot open config file: " << config_file << std::endl; + return; + } + + Json::Value root; + Json::CharReaderBuilder builder; + std::string errs; + + if (!Json::parseFromStream(builder, file, &root, &errs)) { + std::cerr << "TaggerCheckSTM: Failed to parse JSON: " << errs << std::endl; + return; + } + + // Apply each parameter from the JSON file + for (const auto& param_name : root.getMemberNames()) { + if (param_name.substr(0, 1) == "_") continue; // Skip comments + + try { + double value = root[param_name].asDouble(); + m_track_fitter.set_parameter(param_name, value); + std::cout << "TaggerCheckSTM: Set " << param_name << " = " << value << std::endl; + } catch (const std::exception& e) { + std::cerr << "TaggerCheckSTM: Failed to set parameter " << param_name + << ": " << e.what() << std::endl; + } + } + + std::cout << "TaggerCheckSTM: Successfully loaded TrackFitting configuration" << std::endl; + + } catch (const std::exception& e) { + std::cerr << "TaggerCheckSTM: Exception loading config: " << e.what() << std::endl; + std::cerr << "TaggerCheckSTM: Using default TrackFitting parameters" << std::endl; + } + } + + std::vector do_rough_path(const Cluster& cluster,geo_point_t& first_point, geo_point_t& last_point) const{ + // 1. Get Steiner point cloud and graph + // const auto& steiner_pc = cluster.get_pc("steiner_pc"); + // const auto& steiner_graph = cluster.get_graph("steiner_graph"); + + // 2. Find the closest point indices in the Steiner point cloud + + // Find closest indices in the steiner point cloud + auto first_knn_results = cluster.kd_steiner_knn(1, first_point, "steiner_pc"); + auto last_knn_results = cluster.kd_steiner_knn(1, last_point, "steiner_pc"); + + auto first_index = first_knn_results[0].first; // Get the index from the first result + auto last_index = last_knn_results[0].first; // Get the index from the first result + + // 4. Use Steiner graph to find the shortest path + const std::vector& path_indices = + cluster.graph_algorithms("steiner_graph").shortest_path(first_index, last_index); + + std::vector path_points; + const auto& steiner_pc = cluster.get_pc("steiner_pc"); + const auto& coords = cluster.get_default_scope().coords; + const auto& x_coords = steiner_pc.get(coords.at(0))->elements(); + const auto& y_coords = steiner_pc.get(coords.at(1))->elements(); + const auto& z_coords = steiner_pc.get(coords.at(2))->elements(); + + for (size_t idx : path_indices) { + path_points.emplace_back(x_coords[idx], y_coords[idx], z_coords[idx]); + } + return path_points; + } + + // return a vector of point, also the mid_p is also a return point ... + std::vector adjust_rough_path(const Cluster& cluster, geo_point_t& mid_p) const{ + + const geo_point_t drift_dir_abs(1,0,0); + // use the m_track_fitter ... + auto fine_tracking_path = m_track_fitter.get_fine_tracking_path(); + auto dQ = m_track_fitter.get_dQ(); + auto dx = m_track_fitter.get_dx(); + + mid_p.at(0) = fine_tracking_path.at(0).first.x(); + mid_p.at(1) = fine_tracking_path.at(0).first.y(); + mid_p.at(2) = fine_tracking_path.at(0).first.z(); + + // Initialize variables + int save_i = 0; + bool flag_crawl = false; + + // Initialize angle vectors + std::vector refl_angles(fine_tracking_path.size(), 0); + std::vector para_angles(fine_tracking_path.size(), 0); + + // First part: Calculate reflection and parallel angles for each point + for (size_t i = 0; i != fine_tracking_path.size(); i++) { + double angle1 = 0; // reflection angle + double angle2 = 0; // parallel angle + + // Calculate angles using vectors to neighboring points at different distances + for (int j = 0; j != 6; j++) { + WireCell::Vector v10(0, 0, 0); // Vector from previous point + WireCell::Vector v20(0, 0, 0); // Vector to next point + + // Backward vector (from point i-j-1 to point i) + if (i > j) { + v10 = WireCell::Vector(fine_tracking_path.at(i).first.x() - fine_tracking_path.at(i-j-1).first.x(), + fine_tracking_path.at(i).first.y() - fine_tracking_path.at(i-j-1).first.y(), + fine_tracking_path.at(i).first.z() - fine_tracking_path.at(i-j-1).first.z()); + } + + // Forward vector (from point i to point i+j+1) + if (i + j + 1 < fine_tracking_path.size()) { + v20 = WireCell::Vector(fine_tracking_path.at(i+j+1).first.x() - fine_tracking_path.at(i).first.x(), + fine_tracking_path.at(i+j+1).first.y() - fine_tracking_path.at(i).first.y(), + fine_tracking_path.at(i+j+1).first.z() - fine_tracking_path.at(i).first.z()); + } + + if (j == 0) { + // For the first iteration, set initial values + if (v10.magnitude() > 0 && v20.magnitude() > 0) { + angle1 = std::acos(v10.dot(v20) / (v10.magnitude() * v20.magnitude())) / 3.1415926 * 180.0; + } + // Calculate angles with drift direction + if (v10.magnitude() > 0) { + double angle_v10 = std::acos(v10.dot(drift_dir_abs) / v10.magnitude()) / 3.1415926 * 180.0; + angle2 = std::abs(angle_v10 - 90.0); + } + if (v20.magnitude() > 0) { + double angle_v20 = std::acos(v20.dot(drift_dir_abs) / v20.magnitude()) / 3.1415926 * 180.0; + angle2 = std::max(angle2, std::abs(angle_v20 - 90.0)); + } + } else { + // For subsequent iterations, take minimum values + if (v10.magnitude() != 0 && v20.magnitude() != 0) { + double temp_angle1 = std::acos(v10.dot(v20) / (v10.magnitude() * v20.magnitude())) / 3.1415926 * 180.0; + angle1 = std::min(temp_angle1, angle1); + + double angle_v10 = std::acos(v10.dot(drift_dir_abs) / v10.magnitude()) / 3.1415926 * 180.0; + double angle_v20 = std::acos(v20.dot(drift_dir_abs) / v20.magnitude()) / 3.1415926 * 180.0; + double temp_angle2 = std::max(std::abs(angle_v10 - 90.0), std::abs(angle_v20 - 90.0)); + angle2 = std::min(temp_angle2, angle2); + } + } + } + + refl_angles.at(i) = angle1; + para_angles.at(i) = angle2; + + // std::cout << i << " " << angle1 << " " << angle2 << std::endl; + } + + // Second part: Analyze charge and find breakpoints + for (int i = 0; i != fine_tracking_path.size(); i++) { + double min_dQ_dx = dQ.at(i) / dx.at(i); + + // Find minimum dQ/dx in the next 5 points + for (size_t j = 1; j != 6; j++) { + if (i + j < fine_tracking_path.size()) { + if (dQ.at(i + j) / dx.at(i + j) < min_dQ_dx) { + min_dQ_dx = dQ.at(i + j) / dx.at(i + j); + } + } + } + + // Calculate sum of reflection angles in a local window + double sum_angles = 0; + double nsum = 0; + + for (int j = -2; j != 3; j++) { + if (i + j >= 0 && i + j < fine_tracking_path.size()) { + if (para_angles.at(i + j) > 10) { + sum_angles += pow(refl_angles.at(i + j), 2); + nsum++; + } + } + } + + if (nsum != 0) { + sum_angles = sqrt(sum_angles / nsum); + } + + // std::cout << i << " " << min_dQ_dx << " " << para_angles.at(i) << " " << refl_angles.at(i) <<" " << sum_angles << std::endl; + + // First breakpoint condition: Low charge with significant angles + if (min_dQ_dx < 1000 && para_angles.at(i) > 10 && refl_angles.at(i) > 25) { + std::cout << "Mid_Point_Break: " << i << " " << refl_angles.at(i) << " " + << para_angles.at(i) << " " << min_dQ_dx << " " + << fine_tracking_path.at(i).first.x() << " " + << fine_tracking_path.at(i).first.y() << " " + << fine_tracking_path.at(i).first.z() << std::endl; + flag_crawl = true; + save_i = i; + break; + } + // Second breakpoint condition: Higher angle thresholds with geometric constraints + else if (para_angles.at(i) > 15 && refl_angles.at(i) > 27 && sum_angles > 12.5) { + // Calculate angle between vectors from start to point and point to end + WireCell::Vector v10(fine_tracking_path.at(i).first.x() - fine_tracking_path.front().first.x(), + fine_tracking_path.at(i).first.y() - fine_tracking_path.front().first.y(), + fine_tracking_path.at(i).first.z() - fine_tracking_path.front().first.z()); + + WireCell::Vector v20(fine_tracking_path.back().first.x() - fine_tracking_path.at(i).first.x(), + fine_tracking_path.back().first.y() - fine_tracking_path.at(i).first.y(), + fine_tracking_path.back().first.z() - fine_tracking_path.at(i).first.z()); + + double angle3 = 0; + if (v10.magnitude() > 0 && v20.magnitude() > 0) { + angle3 = std::acos(v10.dot(v20) / (v10.magnitude() * v20.magnitude())) / 3.1415926 * 180.0; + } + + // Skip if the angle is too small (nearly straight line) + if (angle3 < 20) continue; + + std::cout << "Mid_Point_Break: " << i << " " << refl_angles.at(i) << " " + << para_angles.at(i) << " " << angle3 << " " << min_dQ_dx << " " + << fine_tracking_path.at(i).first.x() << " " + << fine_tracking_path.at(i).first.y() << " " + << fine_tracking_path.at(i).first.z() << std::endl; + flag_crawl = true; + save_i = i; + break; + } + } + + std::vector out_path_points; + + // std::cout <<"flag crawl " << flag_crawl << std::endl; + + if (flag_crawl){ + // Start to Crawl + const double step_dis = 1.0 * units::cm; + + // Get point clouds and coordinate arrays from cluster + const auto& steiner_pc = cluster.get_pc("steiner_pc"); + const auto& coords = cluster.get_default_scope().coords; + const auto& steiner_x = steiner_pc.get(coords.at(0))->elements(); + const auto& steiner_y = steiner_pc.get(coords.at(1))->elements(); + const auto& steiner_z = steiner_pc.get(coords.at(2))->elements(); + + + // Initialization - get starting point from fine tracking path + geo_point_t p(fine_tracking_path.at(save_i).first.x(), + fine_tracking_path.at(save_i).first.y(), + fine_tracking_path.at(save_i).first.z()); + + // Find closest point in steiner point cloud + auto curr_knn_results = cluster.kd_steiner_knn(1, p, "steiner_pc"); + size_t curr_index = curr_knn_results[0].first; + geo_point_t curr_wcp(steiner_x[curr_index], steiner_y[curr_index], steiner_z[curr_index]); + + // Calculate previous point direction + geo_point_t prev_p(0, 0, 0); + int num_p = 0; + for (size_t i = 1; i != 6; i++) { + if (save_i >= i) { + prev_p.at(0) += fine_tracking_path.at(save_i - i).first.x(); + prev_p.at(1) += fine_tracking_path.at(save_i - i).first.y(); + prev_p.at(2) += fine_tracking_path.at(save_i - i).first.z(); + num_p++; + } + } + prev_p.at(0) /= num_p; + prev_p.at(1) /= num_p; + prev_p.at(2) /= num_p; + + // Calculate initial direction + WireCell::Vector dir(p.at(0) - prev_p.at(0), p.at(1) - prev_p.at(1), p.at(2) - prev_p.at(2)); + dir = dir.norm(); + + bool flag_continue = true; + while (flag_continue) { + flag_continue = false; + + for (int i = 0; i != 3; i++) { + // Calculate test point + geo_point_t test_p(curr_wcp.at(0) + dir.x() * step_dis * (i + 1), + curr_wcp.at(1) + dir.y() * step_dis * (i + 1), + curr_wcp.at(2) + dir.z() * step_dis * (i + 1)); + // Try normal point cloud first + auto search_result = cluster.get_closest_wcpoint(test_p); + geo_point_t next_wcp = search_result.second; + WireCell::Vector dir1(next_wcp.at(0) - curr_wcp.at(0), + next_wcp.at(1) - curr_wcp.at(1), + next_wcp.at(2) - curr_wcp.at(2)); + + // Check angle constraint (30 degrees) + if (dir1.magnitude() != 0 && (std::acos(dir1.dot(dir) / dir1.magnitude()) / 3.1415926 * 180.0 < 30)) { + flag_continue = true; + curr_wcp = next_wcp; + dir = dir1 + dir * 5.0 * units::cm; // momentum trick + dir = dir.norm(); + break; + } + + // Try steiner point cloud + auto next_knn_steiner = cluster.kd_steiner_knn(1, test_p, "steiner_pc"); + size_t next_index = next_knn_steiner[0].first; + next_wcp.at(0) = steiner_x[next_index]; + next_wcp.at(1) = steiner_y[next_index]; + next_wcp.at(2) = steiner_z[next_index]; + WireCell::Vector dir2(next_wcp.at(0) - curr_wcp.at(0), + next_wcp.at(1) - curr_wcp.at(1), + next_wcp.at(2) - curr_wcp.at(2)); + + // Check angle constraint (30 degrees) + if (dir2.magnitude() != 0 && (std::acos(dir2.dot(dir) / dir2.magnitude()) / 3.1415926 * 180.0 < 30)) { + flag_continue = true; + curr_wcp = next_wcp; + dir = dir2 + dir * 5.0 * units::cm; // momentum trick + dir = dir.norm(); + break; + } + } + } + + // Find first and last points in steiner point cloud + geo_point_t first_p(fine_tracking_path.front().first.x(), + fine_tracking_path.front().first.y(), + fine_tracking_path.front().first.z()); + auto first_knn_results = cluster.kd_steiner_knn(1, first_p, "steiner_pc"); + size_t first_index = first_knn_results[0].first; + + geo_point_t last_p(fine_tracking_path.back().first.x(), + fine_tracking_path.back().first.y(), + fine_tracking_path.back().first.z()); + auto last_knn_results = cluster.kd_steiner_knn(1, last_p, "steiner_pc"); + size_t last_index = last_knn_results[0].first; + + // Update current point to closest steiner point + auto curr_steiner_knn = cluster.kd_steiner_knn(1, curr_wcp, "steiner_pc"); + curr_index = curr_steiner_knn[0].first; + + mid_p = curr_wcp; + + + + // Calculate distance from current to last point + double dis = std::sqrt(std::pow(steiner_x[curr_index] - steiner_x[last_index], 2) + + std::pow(steiner_y[curr_index] - steiner_y[last_index], 2) + + std::pow(steiner_z[curr_index] - steiner_z[last_index], 2)); + + std::cout << "First, Center: " << steiner_x[first_index] << " " << steiner_y[first_index] + << " " << steiner_z[first_index] << " " << steiner_x[curr_index] << " " + << steiner_y[curr_index] << " " << steiner_z[curr_index] << " " << dis/units::cm << std::endl; + + if (dis > 1.0 * units::cm) { + // Find path from first to current point + const std::vector& path1_indices = + cluster.graph_algorithms("steiner_graph").shortest_path(first_index, curr_index); + + // Find path from current to last point + const std::vector& path2_indices = + cluster.graph_algorithms("steiner_graph").shortest_path(curr_index, last_index); + + std::list path2_indices_list(path2_indices.begin(), path2_indices.end()); + // Combine paths, removing duplicate middle point + // Copy first path to temporary storage + std::vector temp_path_indices = path1_indices; + + // Find overlapping portion between end of path1 and beginning of path2 + int count = 0; + auto it1 = temp_path_indices.rbegin(); // reverse iterator for temp path + for (auto it = path2_indices.begin(); it != path2_indices.end() && it1 != temp_path_indices.rend(); ++it, ++it1) { + if (*it == *it1) { + count++; + } else { + break; + } + } + + // std::cout << temp_path_indices.size() << " " << count << " " << path2_indices.size() << std::endl; + + // Remove from end of temp_path_indices + for (int i = 0; i != count; i++) { + if (i!=count-1){ + temp_path_indices.pop_back(); + } + path2_indices_list.pop_front(); + } + + // Add first path (without overlapping end) + for (size_t idx : temp_path_indices) { + out_path_points.emplace_back(steiner_x[idx], steiner_y[idx], steiner_z[idx]); + } + + // Add second path (without overlapping beginning) + for (size_t idx : path2_indices_list) { + out_path_points.emplace_back(steiner_x[idx], steiner_y[idx], steiner_z[idx]); + } + + } + } else { + + } + + return out_path_points; + } + + int find_first_kink(std::shared_ptr segment) const{ + // Implement your logic to find the first kink in the cluster + + auto& cluster = *segment->cluster(); + + // Get FiducialUtils from the grouping + auto fiducial_utils = cluster.grouping()->get_fiducialutils(); + if (!fiducial_utils) { + std::cout << "TaggerCheckSTM: No FiducialUtils available in find_first_kink" << std::endl; + return -1; + } + const auto transform = m_pcts->pc_transform(cluster.get_scope_transform(cluster.get_default_scope())); + double cluster_t0 = cluster.get_cluster_t0(); + + // Extract fit results from the segment + const auto& fits = segment->fits(); + + // Convert fit data to vectors matching the TrackFitting interface + std::vector>> fine_tracking_path; + std::vector dQ, dx, pu, pv, pw, pt; + std::vector> paf; + + for (const auto& fit : fits) { + fine_tracking_path.emplace_back(fit.point, segment); + dQ.push_back(fit.dQ); + dx.push_back(fit.dx); + pu.push_back(fit.pu); + pv.push_back(fit.pv); + pw.push_back(fit.pw); + pt.push_back(fit.pt); + paf.push_back(fit.paf); + } + + if (fine_tracking_path.empty()) { + return -1; + } + + // Define drift direction (X direction in detector coordinates) + WireCell::Vector drift_dir_abs(1.0, 0.0, 0.0); + + // Initialize angle vectors + std::vector refl_angles(fine_tracking_path.size(), 0); + std::vector para_angles(fine_tracking_path.size(), 0); + std::vector ave_angles(fine_tracking_path.size(), 0); + std::vector max_numbers(fine_tracking_path.size(), -1); + + // Calculate reflection and parallel angles for each point + for (size_t i = 0; i != fine_tracking_path.size(); i++) { + double angle1 = 0; + double angle2 = 0; + + for (int j = 0; j != 6; j++) { + WireCell::Vector v10(0, 0, 0); + WireCell::Vector v20(0, 0, 0); + + // Backward vector (from point i-j-1 to point i) + if (i > j) { + v10 = WireCell::Vector(fine_tracking_path.at(i).first.x() - fine_tracking_path.at(i-j-1).first.x(), + fine_tracking_path.at(i).first.y() - fine_tracking_path.at(i-j-1).first.y(), + fine_tracking_path.at(i).first.z() - fine_tracking_path.at(i-j-1).first.z()); + } + + // Forward vector (from point i to point i+j+1) + if (i + j + 1 < fine_tracking_path.size()) { + v20 = WireCell::Vector(fine_tracking_path.at(i+j+1).first.x() - fine_tracking_path.at(i).first.x(), + fine_tracking_path.at(i+j+1).first.y() - fine_tracking_path.at(i).first.y(), + fine_tracking_path.at(i+j+1).first.z() - fine_tracking_path.at(i).first.z()); + } + + if (j == 0) { + // For the first iteration, set initial values + if (v10.magnitude() > 0 && v20.magnitude() > 0) { + angle1 = std::acos(v10.dot(v20) / (v10.magnitude() * v20.magnitude())) / 3.1415926 * 180.0; + } + // Calculate angles with drift direction + if (v10.magnitude() > 0) { + double angle_v10 = std::acos(v10.dot(drift_dir_abs) / v10.magnitude()) / 3.1415926 * 180.0; + angle2 = std::abs(angle_v10 - 90.0); + } + if (v20.magnitude() > 0) { + double angle_v20 = std::acos(v20.dot(drift_dir_abs) / v20.magnitude()) / 3.1415926 * 180.0; + angle2 = std::max(angle2, std::abs(angle_v20 - 90.0)); + } + } else { + // For subsequent iterations, take minimum values + if (v10.magnitude() != 0 && v20.magnitude() != 0) { + double temp_angle1 = std::acos(v10.dot(v20) / (v10.magnitude() * v20.magnitude())) / 3.1415926 * 180.0; + angle1 = std::min(temp_angle1, angle1); + + double angle_v10 = std::acos(v10.dot(drift_dir_abs) / v10.magnitude()) / 3.1415926 * 180.0; + double angle_v20 = std::acos(v20.dot(drift_dir_abs) / v20.magnitude()) / 3.1415926 * 180.0; + double temp_angle2 = std::max(std::abs(angle_v10 - 90.0), std::abs(angle_v20 - 90.0)); + angle2 = std::min(temp_angle2, angle2); + } + } + } + + refl_angles.at(i) = angle1; + para_angles.at(i) = angle2; + + // std::cout << i << " " << angle1 << " " << angle2 << std::endl; + } + + // Calculate average angles in a 5-point window + for (int i = 0; i != fine_tracking_path.size(); i++) { + double sum_angles = 0; + double nsum = 0; + double max_angle = 0; + int max_num = -1; + + for (int j = -2; j != 3; j++) { + if (i + j >= 0 && i + j < fine_tracking_path.size()) { + if (para_angles.at(i + j) > 12) { + sum_angles += pow(refl_angles.at(i + j), 2); + nsum++; + if (refl_angles.at(i + j) > max_angle) { + max_angle = refl_angles.at(i + j); + max_num = i + j; + } + } + } + } + + if (nsum != 0) sum_angles = sqrt(sum_angles / nsum); + ave_angles.at(i) = sum_angles; + max_numbers.at(i) = max_num; + + // std::cout << i << " " << sum_angles << " " << max_num << std::endl; + } + + // Look for kink candidates + for (int i = 0; i != fine_tracking_path.size(); i++) { + geo_point_t current_point(fine_tracking_path.at(i).first.x(), + fine_tracking_path.at(i).first.y(), + fine_tracking_path.at(i).first.z()); + + // Check basic angle conditions and fiducial volume + if ((refl_angles.at(i) > 20 && ave_angles.at(i) > 10) && + fiducial_utils->inside_fiducial_volume(current_point)) { + + // Calculate angle between start-to-kink and kink-to-end vectors + WireCell::Vector v10(fine_tracking_path.at(i).first.x() - fine_tracking_path.front().first.x(), + fine_tracking_path.at(i).first.y() - fine_tracking_path.front().first.y(), + fine_tracking_path.at(i).first.z() - fine_tracking_path.front().first.z()); + WireCell::Vector v20(fine_tracking_path.back().first.x() - fine_tracking_path.at(i).first.x(), + fine_tracking_path.back().first.y() - fine_tracking_path.at(i).first.y(), + fine_tracking_path.back().first.z() - fine_tracking_path.at(i).first.z()); + + double angle3 = 0; + if (v10.magnitude() > 0 && v20.magnitude() > 0) { + angle3 = std::acos(v10.dot(v20) / (v10.magnitude() * v20.magnitude())) / 3.1415926 * 180.0; + } + + double angle3p = angle3; + if (i + 1 != fine_tracking_path.size()) { + WireCell::Vector v11(fine_tracking_path.at(i+1).first.x() - fine_tracking_path.front().first.x(), + fine_tracking_path.at(i+1).first.y() - fine_tracking_path.front().first.y(), + fine_tracking_path.at(i+1).first.z() - fine_tracking_path.front().first.z()); + WireCell::Vector v21(fine_tracking_path.back().first.x() - fine_tracking_path.at(i+1).first.x(), + fine_tracking_path.back().first.y() - fine_tracking_path.at(i+1).first.y(), + fine_tracking_path.back().first.z() - fine_tracking_path.at(i+1).first.z()); + if (v11.magnitude() > 0 && v21.magnitude() > 0) { + angle3p = std::acos(v11.dot(v21) / (v11.magnitude() * v21.magnitude())) / 3.1415926 * 180.0; + } + } + + // std::cout << i << " " << angle3 << " " << angle3p << " " << v10.magnitude()/units::cm << " " << v20.magnitude()/units::cm << std::endl; + + + // need to calculate current_point_raw ... + WireCell::Point current_point_raw= transform->backward(current_point, cluster_t0, paf.at(i).second, paf.at(i).first); + + // Apply selection criteria + if ((angle3 < 20 && ave_angles.at(i) < 20) || + (angle3 < 12.5 && fiducial_utils->inside_dead_region(current_point_raw, paf.at(i).first, paf.at(i).second, 2)) || + angle3 < 7.5 || i <= 4) continue; + + if ((angle3 > 30 && (refl_angles.at(i) > 25.5 && ave_angles.at(i) > 12.5)) || + (angle3 > 40 && angle3 > angle3p && v10.magnitude() > 5*units::cm && v20.magnitude() > 5*units::cm)) { + + // // Special handling for shortened Y region + // if (pw.at(i) > 7135-5 && pw.at(i) < 7264+5) { + // bool flag_bad = false; + // // Check for dead channels around this position + // // This would need access to ch_mcell_set_map equivalent in toolkit + // // For now, apply stricter angle cuts in this region + // if (pw.at(i) > 7135 && pw.at(i) < 7264) { + // if (refl_angles.at(i) < 27 || ave_angles.at(i) < 15) continue; + // } + // } + + // Calculate charge density before and after kink + double sum_fQ = 0; + double sum_fx = 0; + double sum_bQ = 0; + double sum_bx = 0; + + for (int k = 0; k != 10; k++) { + if (i >= k + 1) { + sum_fQ += dQ.at(i - k - 1); + sum_fx += dx.at(i - k - 1); + } + if (i + k + 1 < dQ.size()) { + sum_bQ += dQ.at(i + k + 1); + sum_bx += dx.at(i + k + 1); + } + } + + sum_fQ /= (sum_fx / units::cm + 1e-9) * 50e3; + sum_bQ /= (sum_bx / units::cm + 1e-9) * 50e3; + + // Final selection criteria + if ((sum_fQ > 0.6 && sum_bQ > 0.6) || + (sum_fQ + sum_bQ > 1.4 && (sum_fQ > 0.8 || sum_bQ > 0.8) && + v10.magnitude() > 10*units::cm && v20.magnitude() > 10*units::cm)) { + + if (i + 2 < dQ.size()) { + std::cout << "Kink: " << i << " " << refl_angles.at(i) << " " << para_angles.at(i) + << " " << ave_angles.at(i) << " " << max_numbers.at(i) << " " << angle3 + << " " << dQ.at(i)/dx.at(i)*units::cm/50e3 << " " << pu.at(i) + << " " << pv.at(i) << " " << pw.at(i) << std::endl; + return max_numbers.at(i); + } + } + } + } + } + + for (int i=0;i!=fine_tracking_path.size();i++){ + // std::cout << i << " " << refl_angles.at(i) << " " << ave_angles.at(i) << " " << inside_fiducial_volume(fine_tracking_path.at(i)) << std::endl; + + geo_point_t current_point(fine_tracking_path.at(i).first.x(), + fine_tracking_path.at(i).first.y(), + fine_tracking_path.at(i).first.z()); + + if ((refl_angles.at(i) > 20 && ave_angles.at(i) > 15) && + fiducial_utils->inside_fiducial_volume(current_point)) { + + WireCell::Vector v10(fine_tracking_path.at(i).first.x() - fine_tracking_path.front().first.x(), + fine_tracking_path.at(i).first.y() - fine_tracking_path.front().first.y(), + fine_tracking_path.at(i).first.z() - fine_tracking_path.front().first.z()); + WireCell::Vector v20(fine_tracking_path.back().first.x() - fine_tracking_path.at(i).first.x(), + fine_tracking_path.back().first.y() - fine_tracking_path.at(i).first.y(), + fine_tracking_path.back().first.z() - fine_tracking_path.at(i).first.z()); + + double angle3 = 0; + if (v10.magnitude() > 0 && v20.magnitude() > 0) { + angle3 = std::acos(v10.dot(v20) / (v10.magnitude() * v20.magnitude())) / 3.1415926 * 180.0; + } + + // Convert to raw coordinates for dead region check + WireCell::Point current_point_raw = transform->backward(current_point, cluster_t0, paf.at(i).second, paf.at(i).first); + + if ((angle3 < 20 && ave_angles.at(i) < 20) || + (angle3 < 12.5 && fiducial_utils->inside_dead_region(current_point_raw, paf.at(i).first, paf.at(i).second, 2)) || + angle3 < 7.5 || i <= 4) continue; + + if (angle3 > 30){ + // // shorted Y ... + // if (pw.at(i) > 7135 && pw.at(i) < 7264){ + // bool flag_bad = false; + // for (int k=-1;k!=2;k++){ + // if (grouping->is_wire_dead(paf.at(i).first, paf.at(i).second, 1, std::round(pv.at(i)+k), std::round(pt.at(i)))){ + // flag_bad = true; + // break; + // } + // } + // if (flag_bad) continue; + // } + bool flag_bad_u = false; + { + for (int k=-1;k!=2;k++){ + if (cluster.grouping()->is_wire_dead(paf.at(i).first, paf.at(i).second, 0, std::round(pu.at(i)+k), std::round(pt.at(i)))){ + flag_bad_u = true; + break; + } + } + } + bool flag_bad_v = false; + { + for (int k=-1;k!=2;k++){ + if (cluster.grouping()->is_wire_dead(paf.at(i).first, paf.at(i).second, 1, std::round(pv.at(i)+k), std::round(pt.at(i)))){ + flag_bad_v = true; + break; + } + } + } + bool flag_bad_w = false; + { + for (int k=-1;k!=2;k++){ + if (cluster.grouping()->is_wire_dead(paf.at(i).first, paf.at(i).second, 2, std::round(pw.at(i)+k), std::round(pt.at(i)))){ + flag_bad_w = true; + break; + } + } + } + + double sum_fQ = 0; + double sum_fx = 0; + double sum_bQ = 0; + double sum_bx = 0; + for (int k=0;k!=10;k++){ + if (i>=k+1){ + sum_fQ += dQ.at(i-k-1); + sum_fx += dx.at(i-k-1); + } + if (i+k+1 < dQ.size()){ + sum_bQ += dQ.at(i+k+1); + sum_bx += dx.at(i+k+1); + } + } + sum_fQ /= (sum_fx/units::cm+1e-9)*50e3; + sum_bQ /= (sum_bx/units::cm+1e-9)*50e3; + //std::cout << sum_fQ << " " << sum_bQ << std::endl; + if (std::abs(sum_fQ-sum_bQ) < 0.07*(sum_fQ+sum_bQ) && (flag_bad_u||flag_bad_v||flag_bad_w)) continue; + + if (sum_fQ > 0.6 && sum_bQ > 0.6 ){ + if (i+2 segment, int kink_num, std::vector>& fitted_segments) const{ + auto& cluster = *segment->cluster(); + // Get FiducialUtils from the grouping + auto fiducial_utils = cluster.grouping()->get_fiducialutils(); + if (!fiducial_utils) { + std::cout << "TaggerCheckSTM: No FiducialUtils available in find_first_kink" << std::endl; + return -1; + } + const auto transform = m_pcts->pc_transform(cluster.get_scope_transform(cluster.get_default_scope())); + + // Extract fit results from the segment + const auto& fits = segment->fits(); + + // Convert fit data to vectors matching the TrackFitting interface + std::vector>> fine_tracking_path; + std::vector dQ, dx; + std::vector> paf; + + for (const auto& fit : fits) { + fine_tracking_path.emplace_back(fit.point, segment); + dQ.push_back(fit.dQ); + dx.push_back(fit.dx); + paf.push_back(fit.paf); + } + + // std::cout << fitted_segments.size() << " " << fine_tracking_path.size() << std::endl; + + if (fine_tracking_path.empty()) { + return false; + } + + // Extract points vector for compatibility with prototype algorithm + std::vector pts; + for (const auto& path_point : fine_tracking_path) { + pts.push_back(path_point.first); + } + + // Determine end point + WireCell::Point end_p; + if (kink_num == pts.size()) { + end_p = pts.back(); + } else { + end_p = pts.at(kink_num); + } + + // Calculate main track direction vector + geo_point_t p1(pts.front().x() - pts.back().x(), + pts.front().y() - pts.back().y(), + pts.front().z() - pts.back().z()); + + // Check fitted segments for Michel electrons and delta rays + for (size_t i = 1; i < fitted_segments.size(); i++) { + const auto& seg_fits = fitted_segments[i]->fits(); + if (seg_fits.empty()) continue; + + double dis = sqrt(pow(end_p.x() - seg_fits.front().point.x(), 2) + + pow(end_p.y() - seg_fits.front().point.y(), 2) + + pow(end_p.z() - seg_fits.front().point.z(), 2)); + + // Protection against Michel electron + double seg_length = segment_track_length(fitted_segments[i]); + double seg_dQ_dx = segment_median_dQ_dx(fitted_segments[i]) * units::cm / 50000; + + + // std::cout << i << " " << dis/units::cm << " " << seg_length/units::cm << " " << seg_dQ_dx << std::endl; + + if (dis < 1*units::cm && seg_length > 4*units::cm && seg_dQ_dx > 0.5) { + return false; + } + + // Check for delta rays + if (dis > 10*units::cm && seg_length > 4*units::cm ) { + geo_point_t p2(seg_fits.back().point.x() - seg_fits.front().point.x(), + seg_fits.back().point.y() - seg_fits.front().point.y(), + seg_fits.back().point.z() - seg_fits.front().point.z()); + + // Get closest point on the segment to point p + const auto& fit_seg_dpc = segment->dpcloud("main"); + auto closest_result_back = fit_seg_dpc->kd3d().knn(1, seg_fits.back().point); + size_t closest_index_back = closest_result_back[0].first; + + auto closest_result_front = fit_seg_dpc->kd3d().knn(1, seg_fits.front().point); + size_t closest_index_front = closest_result_front[0].first; + + // Access the actual 3D points at the found indices + const auto& dpc_points = fit_seg_dpc->get_points(); + const auto& closest_point_back = dpc_points[closest_index_back]; + const auto& closest_point_front = dpc_points[closest_index_front]; + + // Access 3D coordinates + geo_point_t back_point(closest_point_back.x, closest_point_back.y, closest_point_back.z); + geo_point_t front_point(closest_point_front.x, closest_point_front.y, closest_point_front.z); + + geo_point_t p3(0,0,0); + if (pow(end_p.x() - back_point.x(), 2) + pow(end_p.y() - back_point.y(), 2) + pow(end_p.z() - back_point.z(), 2) < + pow(end_p.x() - front_point.x(), 2) + pow(end_p.y() - front_point.y(), 2) + pow(end_p.z() - front_point.z(), 2)) { + p3.set(front_point.x() - back_point.x(), front_point.y() - back_point.y(), front_point.z() - back_point.z()); + } else { + p3.set(back_point.x() - front_point.x(), back_point.y() - front_point.y(), back_point.z() - front_point.z()); + } + + // std::cout << p2 << " " << p3 << std::endl; + + // Judge direction for delta ray detection + if ((p2.angle(p1)/3.1415926*180. < 20 || + (p3.angle(p1)/3.1415926*180. < 15 && p3.magnitude() > 4*units::cm && p2.angle(p1)/3.1415926*180. < 35)) && + seg_dQ_dx > 0.8) { + std::cout << "Delta Ray Dir: " << p2.angle(p1)/3.1415926*180. << " " + << p3.angle(p1)/3.1415926*180. << " " << p3.magnitude()/units::cm << " " + << dis/units::cm << " " << seg_length/units::cm << std::endl; + return true; + } + } + } + + // Calculate cumulative distances and dQ/dx along track + std::vector L(pts.size(), 0); + std::vector dQ_dx(pts.size(), 0); + double dis = 0; + L[0] = dis; + dQ_dx[0] = dQ[0] / (dx[0] / units::cm + 1e-9); + + for (size_t i = 1; i != pts.size(); i++) { + dis += sqrt(pow(pts[i].x() - pts[i-1].x(), 2) + pow(pts[i].y() - pts[i-1].y(), 2) + pow(pts[i].z() - pts[i-1].z(), 2)); + L[i] = dis; + dQ_dx[i] = dQ[i] / (dx[i] / units::cm + 1e-9); + } + + double end_L; + double max_num; + if (kink_num == pts.size()) { + end_L = L.back(); + max_num = L.size(); + } else { + end_L = L[kink_num] - 0.5*units::cm; + max_num = kink_num; + } + + // Find the maximum bin + double max_bin = -1; + double max_sum = 0; + for (size_t i = 0; i != L.size(); i++) { + double sum = 0; + double nsum = 0; + double temp_max_bin = i; + double temp_max_val = dQ_dx[i]; + + if (L[i] < end_L + 0.5*units::cm && L[i] > end_L - 40*units::cm && i < max_num) { + sum += dQ_dx[i]; nsum++; + if (i >= 2) { + sum += dQ_dx[i-2]; nsum++; + if (dQ_dx[i-2] > temp_max_val && i-2 < max_num) { + temp_max_val = dQ_dx[i-2]; + temp_max_bin = i-2; + } + } + if (i >= 1) { + sum += dQ_dx[i-1]; nsum++; + if (dQ_dx[i-1] > temp_max_val && i-1 < max_num) { + temp_max_val = dQ_dx[i-1]; + temp_max_bin = i-1; + } + } + if (i+1 < L.size()) { + sum += dQ_dx[i+1]; nsum++; + if (dQ_dx[i+1] > temp_max_val && i+1 < max_num) { + temp_max_val = dQ_dx[i+1]; + temp_max_bin = i+1; + } + } + if (i+2 < L.size()) { + sum += dQ_dx[i+2]; nsum++; + if (dQ_dx[i+2] > temp_max_val && i+2 < max_num) { + temp_max_val = dQ_dx[i+2]; + temp_max_bin = i+2; + } + } + sum /= nsum; + if (sum > max_sum) { + max_sum = sum; + max_bin = temp_max_bin; + } + } + } + + end_L = L[max_bin] + 0.2*units::cm; + int ncount = 0, ncount_p = 0; + std::vector vec_x, vec_xp; + std::vector vec_y, vec_yp; + + for (size_t i = 0; i != L.size(); i++) { + if (end_L - L[i] < 35*units::cm && end_L - L[i] > 3*units::cm) { + vec_x.push_back(end_L - L[i]); + vec_y.push_back(dQ_dx[i]); + + // std::cout << ncount << " " << vec_x.back() << " " << vec_y.back() << std::endl; + + ncount++; + } + + + + if (end_L - L[i] < 20*units::cm) { + vec_xp.push_back(end_L - L[i]); + vec_yp.push_back(dQ_dx[i]); + ncount_p++; + } + } + + if (ncount >= 5) { + // Create reference vectors for comparison + std::vector muon_ref(ncount); + std::vector const_ref(ncount, 50e3); + std::vector muon_ref_p(ncount_p); + + for (size_t i = 0; i != ncount; i++) { + muon_ref[i] = particle_data()->get_dEdx_function("muon")->scalar_function((vec_x[i])/units::cm); + } + for (size_t i = 0; i != ncount_p; i++) { + muon_ref_p[i] = particle_data()->get_dEdx_function("muon")->scalar_function((vec_xp[i])/units::cm); + } + + // Perform KS-like tests using kslike_compare + double ks1 = WireCell::kslike_compare(vec_y, muon_ref); + double ratio1 = std::accumulate(muon_ref.begin(), muon_ref.end(), 0.0) / + (std::accumulate(vec_y.begin(), vec_y.end(), 0.0) + 1e-9); + double ks2 = WireCell::kslike_compare(vec_y, const_ref); + double ratio2 = std::accumulate(const_ref.begin(), const_ref.end(), 0.0) / + (std::accumulate(vec_y.begin(), vec_y.end(), 0.0) + 1e-9); + double ks3 = WireCell::kslike_compare(vec_yp, muon_ref_p); + double ratio3 = std::accumulate(vec_yp.begin(), vec_yp.end(), 0.0) / + (std::accumulate(muon_ref_p.begin(), muon_ref_p.end(), 0.0) + 1e-9); + + std::cout << "End proton detection: " << ks1 << " " << ks2 << " " << ratio1 << " " << ratio2 + << " " << ks3 << " " << ratio3 << " " << ks1-ks2 + (fabs(ratio1-1)-fabs(ratio2-1))/1.5*0.3 + << " " << dQ_dx[max_bin]/50e3 << " " << dQ_dx.size() - max_bin << " " << std::endl; + + if (ks1-ks2 + (fabs(ratio1-1)-fabs(ratio2-1))/1.5*0.3 > 0.02 && dQ_dx[max_bin]/50e3 > 2.3 && + (dQ_dx.size() - max_bin <= 3 || (ks2 < 0.05 && dQ_dx.size() - max_bin <= 12))) { + if (dQ_dx.size()-max_bin <= 1 && dQ_dx[max_bin]/50e3 > 2.5 && ks2 < 0.035 && fabs(ratio2-1) < 0.1) + return true; + if (dQ_dx.size()-max_bin <= 1 && ((dQ_dx[max_bin]/50e3 < 3.0 && ((ks1 < 0.06 && ks2 > 0.03) || (ks1 < 0.065 && ks2 > 0.04))) || (ks1 < 0.035 && dQ_dx[max_bin]/50e3 < 4.0))) + return false; + if (ks1-ks2 + (fabs(ratio1-1)-fabs(ratio2-1))/1.5*0.3 > 0.027) + return true; + } + + // Check for proton with very high dQ_dx + double track_medium_dQ_dx = segment_median_dQ_dx(fitted_segments[0]) * units::cm / 50000.; + std::cout << "End proton detection1: " << track_medium_dQ_dx << " " << dQ_dx[max_bin]/50e3 + << " " << ks3 << " " << ratio3 << std::endl; + + if (track_medium_dQ_dx < 1.0 && dQ_dx.at(max_bin)/50e3 > 3.5){ + if ((ks3 > 0.06 && ratio3 > 1.1 && ks1 > 0.045) || (ks3 > 0.1 && ks2 < 0.19) || (ratio3 > 1.3)) return true; + if ((ks2 < 0.045 && ks3 > 0.03) || (dQ_dx.at(max_bin)/50e3 > 4.3 && ks3 > 0.03)) return true; + }else if (track_medium_dQ_dx < 1 && dQ_dx.at(max_bin)/50e3 > 3.0){ + if (ks3 > 0.12 && ks1 > 0.03) return true; + } + } + + return false; + } + + bool eval_stm(std::shared_ptr segment, int kink_num, double peak_range = 40*units::cm, double offset_length = 0*units::cm, double com_range = 35*units::cm, bool flag_strong_check = false) const{ + auto& cluster = *segment->cluster(); + // Get FiducialUtils from the grouping + auto fiducial_utils = cluster.grouping()->get_fiducialutils(); + if (!fiducial_utils) { + std::cout << "TaggerCheckSTM: No FiducialUtils available in find_first_kink" << std::endl; + return -1; + } + const auto transform = m_pcts->pc_transform(cluster.get_scope_transform(cluster.get_default_scope())); + + // Extract fit results from the segment + const auto& fits = segment->fits(); + + // Convert fit data to vectors matching the TrackFitting interface + std::vector>> fine_tracking_path; + std::vector dQ, dx; + std::vector> paf; + + for (const auto& fit : fits) { + fine_tracking_path.emplace_back(fit.point, segment); + dQ.push_back(fit.dQ); + dx.push_back(fit.dx); + paf.push_back(fit.paf); + } + + if (fine_tracking_path.empty()) { + return false; + } + + // Extract points vector for compatibility with prototype algorithm + std::vector pts; + for (const auto& path_point : fine_tracking_path) { + pts.push_back(path_point.first); + } + + std::vector L(pts.size(), 0); + std::vector dQ_dx(pts.size(), 0); + double dis = 0; + L[0] = dis; + dQ_dx[0] = dQ[0] / (dx[0] / units::cm + 1e-9); + + for (size_t i = 1; i != pts.size(); i++) { + dis += sqrt(pow(pts[i].x() - pts[i-1].x(), 2) + + pow(pts[i].y() - pts[i-1].y(), 2) + + pow(pts[i].z() - pts[i-1].z(), 2)); + L[i] = dis; + dQ_dx[i] = dQ[i] / (dx[i] / units::cm + 1e-9); + } + + double end_L; + double max_num; + if (kink_num == pts.size()) { + end_L = L.back(); + max_num = L.size(); + } else { + end_L = L[kink_num] - 0.5 * units::cm; + max_num = kink_num; + } + + double max_bin = -1; + double max_sum = 0; + for (size_t i = 0; i != L.size(); i++) { + double sum = 0; + double nsum = 0; + double temp_max_bin = i; + double temp_max_val = dQ_dx[i]; + + if (L[i] < end_L + 0.5 * units::cm && L[i] > end_L - peak_range && i < max_num) { + sum += dQ_dx[i]; nsum++; + if (i >= 2) { + sum += dQ_dx[i-2]; nsum++; + if (dQ_dx[i-2] > temp_max_val && i-2 < max_num) { + temp_max_val = dQ_dx[i-2]; + temp_max_bin = i-2; + } + } + if (i >= 1) { + sum += dQ_dx[i-1]; nsum++; + if (dQ_dx[i-1] > temp_max_val && i-1 < max_num) { + temp_max_val = dQ_dx[i-1]; + temp_max_bin = i-1; + } + } + if (i+1 < L.size()) { + sum += dQ_dx[i+1]; nsum++; + if (dQ_dx[i+1] > temp_max_val && i+1 < max_num) { + temp_max_val = dQ_dx[i+1]; + temp_max_bin = i+1; + } + } + if (i+2 < L.size()) { + sum += dQ_dx[i+2]; nsum++; + if (dQ_dx[i+2] > temp_max_val && i+2 < max_num) { + temp_max_val = dQ_dx[i+2]; + temp_max_bin = i+2; + } + } + sum /= nsum; + if (sum > max_sum) { + max_sum = sum; + max_bin = temp_max_bin; + } + } + } + + if (max_bin == -1) + max_bin = max_num; + + end_L = L[max_bin] + 0.2 * units::cm; + int ncount = 0; + std::vector vec_x; + std::vector vec_y; + std::vector vec_res_x; + std::vector vec_res_y; + + for (size_t i = 0; i != L.size(); i++) { + if (end_L - L[i] < com_range && end_L - L[i] > 0) { + vec_x.push_back(end_L - L[i]); + vec_y.push_back(dQ_dx[i]); + ncount++; + } else if (L[i] > end_L) { + vec_res_x.push_back(L[i] - end_L); + vec_res_y.push_back(dQ_dx[i]); + } + } + + double ave_res_dQ_dx = 0; + double res_length = 0; + for (size_t i = 0; i != vec_res_y.size(); i++) { + ave_res_dQ_dx += vec_res_y[i]; + } + + if (vec_res_y.size() > 0) { + res_length = vec_res_x.back(); + ave_res_dQ_dx /= 1. * vec_res_y.size(); + } + + double res_length1 = 0, res_dis1 = 0; + if (max_bin + 3 < L.size()) { + res_length1 = L.back() - L[max_bin + 3]; + res_dis1 = sqrt(pow(pts.back().x() - pts[max_bin + 3].x(), 2) + + pow(pts.back().y() - pts[max_bin + 3].y(), 2) + + pow(pts.back().z() - pts[max_bin + 3].z(), 2)); + } + + // std::cout << "Test: " << res_length/units::cm << " " << ave_res_dQ_dx << " " << res_length1/units::cm << " " << res_dis1/units::cm << std::endl; + + // Create vectors for KS test instead of histograms + std::vector test_data(ncount); + std::vector ref_muon(ncount); + std::vector ref_flat(ncount); + + for (size_t i = 0; i != ncount; i++) { + test_data[i] = vec_y[i]; + ref_muon[i] = particle_data()->get_dEdx_function("muon")->scalar_function((vec_x[i] + offset_length) / units::cm); + ref_flat[i] = 50e3; + } + + double ks1 = WireCell::kslike_compare(test_data, ref_muon); + double ratio1 = std::accumulate(ref_muon.begin(), ref_muon.end(), 0.0) / + (std::accumulate(test_data.begin(), test_data.end(), 0.0) + 1e-9); + double ks2 = WireCell::kslike_compare(test_data, ref_flat); + double ratio2 = std::accumulate(ref_flat.begin(), ref_flat.end(), 0.0) / + (std::accumulate(test_data.begin(), test_data.end(), 0.0) + 1e-9); + + std::cout << "KS value: " << flag_strong_check << " " << ks1 << " " << ks2 << " " << ratio1 << " " << ratio2 << " " << ks1-ks2 + (fabs(ratio1-1)-fabs(ratio2-1))/1.5*0.3 << " " << res_dis1/(res_length1+1e-9) << " " << res_length /units::cm << " " << ave_res_dQ_dx/50000. << std::endl; + + if (ks1 - ks2 >= 0.0) return false; + if (sqrt(pow(ks2/0.06, 2) + pow((ratio2-1)/0.06, 2)) < 1.4 && + ks1 - ks2 + (fabs(ratio1-1) - fabs(ratio2-1))/1.5*0.3 > -0.02) return false; + + if (((res_length > 8*units::cm && ave_res_dQ_dx/50000. > 0.9 && res_length1 > 5*units::cm) || + (res_length1 > 1.5*units::cm && ave_res_dQ_dx/50000. > 2.3)) && res_dis1/(res_length1+1e-9) > 0.99) + return false; + + // If residual does not look like a michel electron + if ((res_length > 20 * units::cm && ave_res_dQ_dx/50000. > 1.2 && + ks1 - ks2 + (fabs(ratio1-1) - fabs(ratio2-1))/1.5*0.3 > -0.02) || + (res_length > 16 * units::cm && ave_res_dQ_dx > 72500) || + (res_length > 10 * units::cm && ave_res_dQ_dx > 72500 && + ks1 - ks2 + (fabs(ratio1-1) - fabs(ratio2-1))/1.5*0.3 > -0.05) || + (res_length > 10 * units::cm && ave_res_dQ_dx > 85000) || + (res_length > 6 * units::cm && ave_res_dQ_dx > 92500) || + (res_length > 6 * units::cm && ave_res_dQ_dx > 72500 && + ks1 - ks2 + (fabs(ratio1-1) - fabs(ratio2-1))/1.5*0.3 > -0.05) || + (res_length > 4 * units::cm && ave_res_dQ_dx/50000. > 1.4 && + ks1 - ks2 + (fabs(ratio1-1) - fabs(ratio2-1))/1.5*0.3 > 0.02) || + (res_length > 2*units::cm && ave_res_dQ_dx/50000. > 4.5)) + return false; + + if (!flag_strong_check) { + if (ks1 - ks2 < -0.02 && ((ks2 > 0.09 && fabs(ratio2-1) > 0.1) || ratio2 > 1.5 || ks2 > 0.2)) + return true; + if (ks1 - ks2 + (fabs(ratio1-1) - fabs(ratio2-1))/1.5*0.3 < 0) + return true; + } else { + if (ks1 - ks2 < -0.02 && (ks2 > 0.09 || ratio2 > 1.5) && ks1 < 0.05 && fabs(ratio1-1) < 0.1) + return true; + if (ks1 - ks2 + (fabs(ratio1-1) - fabs(ratio2-1))/1.5*0.3 < 0 && ks1 < 0.05 && fabs(ratio1-1) < 0.1) + return true; + } + + + return false; + } + + std::shared_ptr create_segment_for_cluster(WireCell::Clus::Facade::Cluster& cluster, + const std::vector& path_points) const{ + + // Step 3: Prepare segment data + std::vector wcpoints; + // const auto transform = m_pcts->pc_transform(cluster.get_scope_transform(cluster.get_default_scope())); + // Step 4: Create segment connecting the vertices + auto segment = PR::make_segment(); + + // create and associate Dynamic Point Cloud + for (const auto& point : path_points) { + PR::WCPoint wcp; + wcp.point = point; + wcpoints.push_back(wcp); + } + + // Step 5: Configure the segment + segment->wcpts(wcpoints).cluster(&cluster).dirsign(1); // direction: +1, 0, or -1 + + // auto& wcpts = segment->wcpts(); + // for (size_t i=0;i!=path_points.size(); i++){ + // std::cout << "A: " << i << " " << path_points.at(i) << " " << wcpts.at(i).point << std::endl; + // } + create_segment_point_cloud(segment, path_points, m_dv, "main"); + + return segment; + } + + void search_other_tracks(Cluster& cluster, std::vector>& fitted_segments, double search_range = 1.5*units::cm, double scaling_2d = 0.8) const{ + std::vector > other_segments; + + // Early return if no existing segment + if (fitted_segments.empty()) return; + + const auto& steiner_pc = cluster.get_pc("steiner_pc"); + const auto& coords = cluster.get_default_scope().coords; + const auto& x_coords = steiner_pc.get(coords.at(0))->elements(); + const auto& y_coords = steiner_pc.get(coords.at(1))->elements(); + const auto& z_coords = steiner_pc.get(coords.at(2))->elements(); + const auto& wpid_array = steiner_pc.get("wpid")->elements(); + + const size_t N = x_coords.size(); + if (N == 0) return; + + std::vector flag_tagged(N, false); + int num_tagged = 0; + + const auto transform = m_pcts->pc_transform(cluster.get_scope_transform(cluster.get_default_scope())); + double cluster_t0 = cluster.get_cluster_t0(); + + // Step 1: Tag points within search_range of existing tracks + for (size_t i = 0; i < N; i++) { + geo_point_t p(x_coords[i], y_coords[i], z_coords[i]); + double min_dis_u = 1e9, min_dis_v = 1e9, min_dis_w = 1e9; + + // Get closest 2D distances for each plane using DynamicPointCloud + // Get wire plane parameters for 2D projections + WirePlaneId wpid = wpid_array[i]; + int apa = wpid.apa(); + int face = wpid.face(); + + for (const auto& fit_seg : fitted_segments) { + // Get closest point on the segment to point p + const auto& fit_seg_dpc = fit_seg->dpcloud("main"); + + auto closest_result = fit_seg_dpc->kd3d().knn(1, p); + if (closest_result.empty()) continue; + + // size_t closest_index = closest_result[0].first; + double closest_3d_distance = sqrt(closest_result[0].second); + + if (closest_3d_distance < search_range) { + flag_tagged[i] = true; + num_tagged++; + break; + } + + // Check distances in each plane (U, V, W) + for (int plane = 0; plane < 3; plane++) { + auto closest_2d = fit_seg_dpc->get_closest_2d_point_info(p, plane, face, apa); + double dist_2d = std::get<0>(closest_2d); + + if (plane == 0 && dist_2d < min_dis_u) min_dis_u = dist_2d; + else if (plane == 1 && dist_2d < min_dis_v) min_dis_v = dist_2d; + else if (plane == 2 && dist_2d < min_dis_w) min_dis_w = dist_2d; + } + + // std::cout << i << " " << closest_3d_distance << " " << min_dis_u/units::cm << " " << min_dis_v/units::cm << " " << min_dis_w/units::cm << std::endl; + + } + + // Additional tagging based on 2D projections and dead channels + if (!flag_tagged[i]) { + // Check if point should be tagged based on 2D distances or dead channels + // figure out the raw_point ... + auto p_raw= transform->backward(p, cluster_t0, face, apa); + + // Note: Dead channel checking would require access to detector status + bool u_ok = (min_dis_u < scaling_2d * search_range || cluster.grouping()->get_closest_dead_chs(p_raw, 1, apa, face, 0)); // U plane + bool v_ok = (min_dis_v < scaling_2d * search_range || cluster.grouping()->get_closest_dead_chs(p_raw, 1, apa, face, 1)); // V plane + bool w_ok = (min_dis_w < scaling_2d * search_range || cluster.grouping()->get_closest_dead_chs(p_raw, 1, apa, face, 2)); // W plane + + if (u_ok && v_ok && w_ok) { + flag_tagged[i] = true; + } + } + } + // std::cout << num_tagged << " " << N << std::endl; + (void) num_tagged; + + // Step 2: Get Steiner_Graph and its terminals ... + const auto& steiner_graph = cluster.get_graph("steiner_graph"); + const auto& flag_steiner_terminal = steiner_pc.get("flag_steiner_terminal")->elements(); + + // Step 3: Identify terminal vertices from cluster boundary points + std::vector terminals; + std::map map_oindex_tindex; + for (size_t i = 0;i!=flag_steiner_terminal.size();i++){ + if (flag_steiner_terminal[i]){ + map_oindex_tindex[i] = terminals.size(); + terminals.push_back(i); + } + } + + // Step 4: Use cluster's graph for Voronoi computation + using namespace WireCell::Clus::Graphs::Weighted; + auto vor = voronoi(steiner_graph, terminals); + // Access nearest terminal for any vertex like this: + // auto nearest_terminal_for_vertex_i = vor.terminal[i]; + + // Step 5: Build terminal graph and find MST + using Base = boost::property; // Not edge_name_t! + using WeightProperty = boost::property; + using TerminalGraph = boost::adjacency_list; + + TerminalGraph terminal_graph(N); + std::map, std::pair> map_saved_edge; + + // Build terminal graph from Voronoi regions + auto edge_weight = get(boost::edge_weight, steiner_graph); + + for (auto w : boost::make_iterator_range(edges(steiner_graph))) { + size_t nearest_to_source = vor.terminal[source(w, steiner_graph)]; + size_t nearest_to_target = vor.terminal[target(w, steiner_graph)]; + + if (nearest_to_source != nearest_to_target) { + double weight = vor.distance[source(w, steiner_graph)] + + vor.distance[target(w, steiner_graph)] + + edge_weight[w]; // Don't forget the edge weight! + + // Convert terminal indices back to actual terminal vertices + auto edge_pair1 = std::make_pair(nearest_to_source, nearest_to_target); + auto edge_pair2 = std::make_pair(nearest_to_target, nearest_to_source); + + auto it1 = map_saved_edge.find(edge_pair1); + auto it2 = map_saved_edge.find(edge_pair2); + + if (it1 != map_saved_edge.end()) { + // Update (A,B) if better + if (weight < it1->second.first) { + it1->second = std::make_pair(weight, w); + } + } else if (it2 != map_saved_edge.end()) { + // Update (B,A) if better + if (weight < it2->second.first) { + it2->second = std::make_pair(weight, w); + } + } else { + // Create new entry + map_saved_edge[edge_pair1] = std::make_pair(weight, w); + } + } + } + + // Add edges with compound properties + for (const auto& [edge_pair, weight_info] : map_saved_edge) { + boost::add_edge(edge_pair.first, edge_pair.second, + WeightProperty(weight_info.first, Base(weight_info.second)), + terminal_graph); + } + + // Step 6: Find minimum spanning tree + std::vector::edge_descriptor> mst_edges; + boost::kruskal_minimum_spanning_tree(terminal_graph, std::back_inserter(mst_edges)); + + // Step 7: Create cluster graph and find connected components + TerminalGraph terminal_graph_cluster(terminals.size()); + std::map> map_connection; + + for (const auto& edge : mst_edges) { + size_t source_idx = boost::source(edge, terminal_graph); + size_t target_idx = boost::target(edge, terminal_graph); + + if (flag_tagged[source_idx] == flag_tagged[target_idx]) { + boost::add_edge(map_oindex_tindex[source_idx], map_oindex_tindex[target_idx], terminal_graph_cluster); + } else { + if (map_connection.find(source_idx)==map_connection.end()){ + std::set temp_results; + temp_results.insert(target_idx); + map_connection[source_idx] = temp_results; + }else{ + map_connection[source_idx].insert(target_idx); + } + if (map_connection.find(target_idx)==map_connection.end()){ + std::set temp_results; + temp_results.insert(source_idx); + map_connection[target_idx] = temp_results; + }else{ + map_connection[target_idx].insert(source_idx); + } + } + } + + // Step 8: Find connected components + std::vector component(boost::num_vertices(terminal_graph_cluster)); + const int num_components = boost::connected_components(terminal_graph_cluster, &component[0]); + std::vector ncounts(num_components, 0); + std::vector> sep_clusters(num_components); + + for (size_t i = 0; i < component.size(); ++i) { + ncounts[component[i]]++; + sep_clusters[component[i]].push_back(terminals[i]); + } + + // Step 9: Filter and create new segments for valid clusters + for (int comp_idx = 0; comp_idx < num_components; comp_idx++) { + // Skip if inside original track or just one point + if (flag_tagged[sep_clusters[comp_idx].front()] || ncounts[comp_idx] == 1) continue; + // Find connection point to existing track + size_t special_A = SIZE_MAX; + for (size_t j = 0; j < ncounts[comp_idx]; j++) { + if (map_connection.find(sep_clusters[comp_idx][j]) != map_connection.end()) { + special_A = sep_clusters[comp_idx][j]; + break; + } + } + if (special_A == SIZE_MAX) continue; + + // Find furthest point from special_A + size_t special_B = special_A; + double max_dis = 0; + int number_not_faked = 0; + double max_dis_u = 0, max_dis_v = 0, max_dis_w = 0; + + for (size_t j = 0; j < ncounts[comp_idx]; j++) { + size_t point_idx = sep_clusters[comp_idx][j]; + geo_point_t p1(x_coords[special_A], y_coords[special_A], z_coords[special_A]); + geo_point_t p2(x_coords[point_idx], y_coords[point_idx], z_coords[point_idx]); + + double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); + if (dis > max_dis) { + max_dis = dis; + special_B = point_idx; + } + + // Check if this track segment is "fake" (too close to existing tracks) + double min_dis_u = 1e9, min_dis_v = 1e9, min_dis_w = 1e9; + WirePlaneId wpid = wpid_array[point_idx]; + int apa = wpid.apa(); + int face = wpid.face(); + + for (const auto& fit_seg : fitted_segments) { + const auto& fit_seg_dpc = fit_seg->dpcloud("main"); + for (int plane = 0; plane < 3; plane++) { + auto closest_2d = fit_seg_dpc->get_closest_2d_point_info(p2, plane, face, apa); + double dist_2d = std::get<0>(closest_2d); + + if (plane == 0 && dist_2d < min_dis_u) min_dis_u = dist_2d; + else if (plane == 1 && dist_2d < min_dis_v) min_dis_v = dist_2d; + else if (plane == 2 && dist_2d < min_dis_w) min_dis_w = dist_2d; + } + } + + + auto p_raw= transform->backward(p2, cluster_t0, face, apa); + + int flag_num = 0; + if (min_dis_u > scaling_2d * search_range && (!cluster.grouping()->get_closest_dead_chs(p_raw, 1, apa, face, 0))) flag_num++; + if (min_dis_v > scaling_2d * search_range && (!cluster.grouping()->get_closest_dead_chs(p_raw, 1, apa, face, 1))) flag_num++; + if (min_dis_w > scaling_2d * search_range && (!cluster.grouping()->get_closest_dead_chs(p_raw, 1, apa, face, 2))) flag_num++; + + if (min_dis_u > max_dis_u && (!cluster.grouping()->get_closest_dead_chs(p_raw, 1, apa, face, 0))) max_dis_u = min_dis_u; + if (min_dis_v > max_dis_v && (!cluster.grouping()->get_closest_dead_chs(p_raw, 1, apa, face, 1))) max_dis_v = min_dis_v; + if (min_dis_w > max_dis_w && (!cluster.grouping()->get_closest_dead_chs(p_raw, 1, apa, face, 2))) max_dis_w = min_dis_w; + + if (flag_num >= 2) number_not_faked++; + } + + // Apply quality cuts (from prototype) + if (number_not_faked < 4 && (number_not_faked < 0.15 * ncounts[comp_idx] || number_not_faked == 1)) continue; + + bool quality_check = ((max_dis_u/units::cm > 4 || max_dis_v/units::cm > 4 || max_dis_w/units::cm > 4) && + max_dis_u + max_dis_v + max_dis_w > 7*units::cm) || + (number_not_faked > 4 && number_not_faked >= 0.75*ncounts[comp_idx]); + + if (!quality_check) continue; + + // Step 10: Create new segment for this cluster + std::vector path_indices; + + // Use cluster's shortest path algorithm to connect special_A to special_B + auto path_wcps = cluster.graph_algorithms("steiner_graph", m_dv, m_pcts).shortest_path(special_A, special_B); + + // Convert path to points + std::vector path_points; + for (size_t idx : path_wcps) { + path_points.push_back({x_coords[idx],y_coords[idx],z_coords[idx]}); + } + + auto new_segment = create_segment_for_cluster(cluster, path_points); + + m_track_fitter.add_segment(new_segment); + m_track_fitter.do_single_tracking(new_segment, true, true, false); + + fitted_segments.push_back(new_segment); + } + + // std::cout << "Fitted segments: " << fitted_segments.size() << std::endl; + + } + + bool check_other_clusters(Cluster& main_cluster, std::vector associated_clusters) const { + int number_clusters = 0; + double total_length = 0; + + // Iterate through all associated clusters + for (auto it = associated_clusters.begin(); it != associated_clusters.end(); it++) { + Cluster* cluster = *it; + + // Get the two boundary points (equivalent to get_two_boundary_wcps in prototype) + std::pair boundary_points = cluster->get_two_boundary_wcps(); + + // Calculate coverage_x (difference in x coordinates) + double coverage_x = boundary_points.first.x() - boundary_points.second.x(); + + // Get cluster length using the toolkit's built-in function + double length = sqrt(pow(boundary_points.first.x() - boundary_points.second.x(), 2) + + pow(boundary_points.first.y() - boundary_points.second.y(), 2) + + pow(boundary_points.first.z() - boundary_points.second.z(), 2)); + + // Get closest points between main cluster and current cluster + std::tuple results = main_cluster.get_closest_points(*cluster); + + // std::cout << "ABC: " << coverage_x/units::cm << " " << length/units::cm << " " << std::get<2>(results)/units::cm << std::endl; + + // Apply the same conditions as in the prototype: + // - Distance between clusters < 25 cm + // - Absolute coverage in X > 0.75 cm + // - Length > 5 cm + if (std::get<2>(results) < 25*units::cm && + fabs(coverage_x) > 0.75*units::cm && + length > 5*units::cm) { + number_clusters++; + total_length += length; + } + } + + // Apply the final condition from prototype + if (number_clusters > 0 && + (number_clusters/3. + total_length/(35*units::cm)/number_clusters) >= 1) { + std::cout << "Other clusters: " << number_clusters << " " + << (number_clusters/3. + total_length/(35*units::cm)/number_clusters) + << std::endl; + return true; + } + + return false; + } + + bool check_other_tracks(Cluster& cluster, std::vector>& fitted_segments) const { + if (fitted_segments.size() <= 1) return false; + + int ntracks = 0; + + geo_point_t drift_dir_abs(1, 0, 0); + + // Loop through segments starting from index 1 (skip main segment) + for (size_t i = 1; i < fitted_segments.size(); i++) { + auto segment = fitted_segments[i]; + // Use helper functions from PRSegmentFunctions.h + double track_length1 = segment_track_length(segment) / units::cm; + double track_medium_dQ_dx = segment_median_dQ_dx(segment) * units::cm / 50000.; + double track_length_threshold = segment_track_length_threshold(segment, 75000./units::cm) / units::cm; + + + + // Calculate direction vector (geometric path from front to back) + const auto& fits = segment->fits(); + if (fits.empty()) continue; // Skip if no fits available + + const auto& front_pt = fits.front().point; + const auto& back_pt = fits.back().point; + geo_point_t dir1( + front_pt.x() - back_pt.x(), + front_pt.y() - back_pt.y(), + front_pt.z() - back_pt.z() + ); + + // Calculate geometric length using helper function (maps to get_track_length(2)) + double straightness_ratio = dir1.magnitude() / segment_track_length(segment); + + // std::cout << track_length1 << " " << track_medium_dQ_dx << " " << track_length_threshold << " " << dir1 << " " << straightness_ratio << std::endl; + + // Main logic from prototype + if (track_length1 > 5 && track_medium_dQ_dx > 0.4) { + ntracks++; + } + if (track_length1 > 40 && track_medium_dQ_dx > 0.8) return true; + + double angle_deg = fabs(dir1.angle(drift_dir_abs) - 3.1415926 / 2.) * 180. / 3.1415926; + if (fabs(angle_deg - 90.0) < 7.5) continue; // Skip tracks nearly parallel to drift + + // Complex condition from prototype + if (((track_length1 > 5 && track_medium_dQ_dx > 0.7) && + ((track_medium_dQ_dx - 0.7)/0.1 > (19 - track_length1)/7.) && + straightness_ratio > 0.99) || + (track_length1 > 4 && track_medium_dQ_dx > 1.5 && straightness_ratio > 0.975)) { + return true; + } + + if (track_medium_dQ_dx > 1.5 && track_length1 > 8 && straightness_ratio < 0.9) continue; + + if ((track_medium_dQ_dx > 1.5 && track_length1 > 3) || + (track_medium_dQ_dx > 2.5 && track_length1 > 2.5) || + (track_length_threshold > 5 && ((track_length_threshold > 0.6 * track_length1) || track_length1 > 20))) { + + if (track_length1 < 5 && track_medium_dQ_dx < 2) continue; + else if (track_length1 < 25 && track_medium_dQ_dx < 1) continue; + else if (track_length1 < 10 && track_medium_dQ_dx < 85/50.) continue; + else if (track_length1 < 3.5 && track_medium_dQ_dx < 110/50.) continue; + else return true; + } + } + + if (ntracks >= 3) return true; + return false; + } + + /** + * Check if a cluster meets the conditions for STM (Short Track Muon) tagging. + * This is where you'll implement your specific STM detection algorithm. + * + * @param cluster The main cluster to analyze + * @return true if cluster should be flagged as STM + */ + bool check_stm_conditions(Cluster& cluster, std::vector associated_clusters) const { + // get all the angles ... + + // Get all the wire plane IDs from the grouping + const auto& wpids = cluster.grouping()->wpids(); + // Key: pair, Value: drift_dir, angle_u, angle_v, angle_w + std::map> wpid_params; + std::map > wpid_U_dir; + std::map > wpid_V_dir; + std::map > wpid_W_dir; + std::set apas; + compute_wireplane_params(wpids, m_dv, wpid_params, wpid_U_dir, wpid_V_dir, wpid_W_dir, apas); + + std::cout << "TaggerCheckSTM: Checking cluster with " << wpids.size() + << " wire plane IDs and " << apas.size() << " APAs." << std::endl; + + // Early exit if no steiner graph points + if (!cluster.has_pc("steiner_pc") || cluster.get_pc("steiner_pc").size() == 0) { + return false; + } + + // Get the main PCA axis direction + const auto& pca = cluster.get_pca(); + geo_vector_t main_dir = pca.axis.at(0); + + // need to set later accoding to APA and face + geo_point_t drift_dir, U_dir, V_dir, W_dir; + + std::vector candidate_exit_wcps; + std::set temp_set; + std::pair boundary_indices; + + // First round check - get boundary points from steiner graph + boundary_indices = cluster.get_two_boundary_steiner_graph_idx("steiner_graph", "steiner_pc", true); + + // Get extreme points + std::vector> out_vec_wcps = cluster.get_extreme_wcps(); + + // Get the steiner_pc to access actual points using boundary_indices + const auto& steiner_pc = cluster.get_pc("steiner_pc"); + const auto& coords = cluster.get_default_scope().coords; + const auto& x_coords = steiner_pc.get(coords.at(0))->elements(); + const auto& y_coords = steiner_pc.get(coords.at(1))->elements(); + const auto& z_coords = steiner_pc.get(coords.at(2))->elements(); + + // Add the two boundary points as additional extreme point groups + geo_point_t boundary_point_first(x_coords[boundary_indices.first], + y_coords[boundary_indices.first], + z_coords[boundary_indices.first]); + geo_point_t boundary_point_second(x_coords[boundary_indices.second], + y_coords[boundary_indices.second], + z_coords[boundary_indices.second]); + { + std::vector temp_wcps; + temp_wcps.push_back(boundary_point_first); + out_vec_wcps.push_back(temp_wcps); + } + { + std::vector temp_wcps; + temp_wcps.push_back(boundary_point_second); + out_vec_wcps.push_back(temp_wcps); + } + + // Get FiducialUtils from the grouping + auto fiducial_utils = cluster.grouping()->get_fiducialutils(); + if (!fiducial_utils) { + std::cout << "TaggerCheckSTM: No FiducialUtils available" << std::endl; + return false; + } + + // Boundary check + for (size_t i = 0; i != out_vec_wcps.size(); i++) { + bool flag_save = false; + + // Check all the points in this extreme point group + for (size_t j = 0; j != out_vec_wcps.at(i).size(); j++) { + geo_point_t p1 = out_vec_wcps.at(i).at(j); + + if (!fiducial_utils->inside_fiducial_volume(p1)) { + candidate_exit_wcps.push_back(out_vec_wcps.at(i).at(0)); + flag_save = true; + break; + } + } + + if (!flag_save) { + // Check direction using vhough_transform + geo_point_t p1 = out_vec_wcps.at(i).at(0); + geo_vector_t dir_vec = cluster.vhough_transform(p1, 30*units::cm); + dir_vec = dir_vec * (-1.0); // Reverse direction + + // Convert to geo_point_t for angle calculations + geo_point_t dir(dir_vec.x(), dir_vec.y(), dir_vec.z()); + + // Check U, V, and W angles + geo_point_t dir_1(0, dir.y(), dir.z()); + + // get apa and face from the point p1 ... + auto wpid_p1 = m_dv->contained_by(p1); + // Fill drift_dir, U_dir, V_dir, W_dir from the maps using wpid_p1 + auto it_params = wpid_params.find(wpid_p1); + if (it_params != wpid_params.end()) { + drift_dir = std::get<0>(it_params->second); + } else { + std::cerr << "TaggerCheckSTM: wpid_params not found for wpid_p1" << std::endl; + } + + auto it_U = wpid_U_dir.find(wpid_p1); + if (it_U != wpid_U_dir.end()) { + U_dir = it_U->second.first; + } else { + std::cerr << "TaggerCheckSTM: wpid_U_dir not found for wpid_p1" << std::endl; + } + + auto it_V = wpid_V_dir.find(wpid_p1); + if (it_V != wpid_V_dir.end()) { + V_dir = it_V->second.first; + } else { + std::cerr << "TaggerCheckSTM: wpid_V_dir not found for wpid_p1" << std::endl; + } + + auto it_W = wpid_W_dir.find(wpid_p1); + if (it_W != wpid_W_dir.end()) { + W_dir = it_W->second.first; + } else { + std::cerr << "TaggerCheckSTM: wpid_W_dir not found for wpid_p1" << std::endl; + } + + // std::cout << "TaggerCheckSTM: Checking angles for point " + // << p1 << " with wpid " << wpid_p1 << " " << drift_dir << " " << U_dir << " " << V_dir << " " << W_dir << std::endl; + + // Calculate angles with wire directions + double angle1 = acos(dir_1.dot(U_dir) / (dir_1.magnitude() * U_dir.magnitude())); + geo_point_t tempV1(fabs(dir.x()), + sqrt(dir.y()*dir.y() + dir.z()*dir.z()) * sin(angle1), 0); + double angle1_1 = acos(tempV1.dot(drift_dir) / (tempV1.magnitude() * drift_dir.magnitude())) / 3.1415926 * 180.; + + double angle2 = acos(dir_1.dot(V_dir) / (dir_1.magnitude() * V_dir.magnitude())); + geo_point_t tempV2(fabs(dir.x()), + sqrt(dir.y()*dir.y() + dir.z()*dir.z()) * sin(angle2), 0); + double angle2_1 = acos(tempV2.dot(drift_dir) / (tempV2.magnitude() * drift_dir.magnitude())) / 3.1415926 * 180.; + + double angle3 = acos(dir_1.dot(W_dir) / (dir_1.magnitude() * W_dir.magnitude())); + geo_point_t tempV3(fabs(dir.x()), + sqrt(dir.y()*dir.y() + dir.z()*dir.z()) * sin(angle3), 0); + double angle3_1 = acos(tempV3.dot(drift_dir) / (tempV3.magnitude() * drift_dir.magnitude())) / 3.1415926 * 180.; + + // std::cout << "Test: " << angle1 << " " << angle1_1 << " " << angle2 << " " << angle2_1 << " " << angle3 << " " << angle3_1 << std::endl; + + + if ((angle1_1 < 10 || angle2_1 < 10 || angle3_1 < 5)) { + if (!fiducial_utils->check_signal_processing(cluster, p1, dir_vec, 1*units::cm)) { + flag_save = true; + candidate_exit_wcps.push_back(out_vec_wcps.at(i).at(0)); + } + } + + if (!flag_save) { + // Calculate angle between direction and main axis + double main_angle = acos(dir_vec.dot(main_dir) / (dir_vec.magnitude() * main_dir.magnitude())); + double angle_deg = fabs((3.1415926/2. - main_angle) / 3.1415926 * 180.); + + if (angle_deg > 60) { + if (!fiducial_utils->check_dead_volume(cluster, p1, dir_vec, 1*units::cm)) { + flag_save = true; + candidate_exit_wcps.push_back(out_vec_wcps.at(i).at(0)); + } + } + } + } + } + + // Determine which boundary points are exit candidates + for (size_t i = 0; i != candidate_exit_wcps.size(); i++) { + double dis1 = (candidate_exit_wcps.at(i) - boundary_point_first).magnitude(); + double dis2 = (candidate_exit_wcps.at(i) - boundary_point_second).magnitude(); + + // std::cout << "Test: " << candidate_exit_wcps.at(i) << " " << dis1 << " " << dis2 << std::endl; + + // Check if essentially one of the extreme points + if (dis1 < dis2) { + if (dis1 < 1.0*units::cm) temp_set.insert(0); + } else { + if (dis2 < 1.0*units::cm) temp_set.insert(1); + } + } + + // Protection against two end point situation + if (temp_set.size() == 2) { + geo_point_t tp1 = boundary_point_first; + geo_point_t tp2 = boundary_point_second; + + temp_set.clear(); + + if (!fiducial_utils->inside_fiducial_volume(tp1)) temp_set.insert(0); + if (!fiducial_utils->inside_fiducial_volume(tp2)) temp_set.insert(1); + if (temp_set.size() == 0) { + temp_set.insert(0); + temp_set.insert(1); + } + } + + // Second round check if no candidates found + if (temp_set.size() == 0) { + candidate_exit_wcps.clear(); + + // Repeat the process with flag_cosmic = false + boundary_indices = cluster.get_two_boundary_steiner_graph_idx("steiner_graph", "steiner_pc", false); + out_vec_wcps = cluster.get_extreme_wcps(); + + // Get the steiner_pc to access actual points using boundary_indices + const auto& steiner_pc = cluster.get_pc("steiner_pc"); + const auto& coords = cluster.get_default_scope().coords; + const auto& x_coords = steiner_pc.get(coords.at(0))->elements(); + const auto& y_coords = steiner_pc.get(coords.at(1))->elements(); + const auto& z_coords = steiner_pc.get(coords.at(2))->elements(); + + // Add the two boundary points as additional extreme point groups + geo_point_t boundary_point_first(x_coords[boundary_indices.first], + y_coords[boundary_indices.first], + z_coords[boundary_indices.first]); + geo_point_t boundary_point_second(x_coords[boundary_indices.second], + y_coords[boundary_indices.second], + z_coords[boundary_indices.second]); + + // Add boundary points again + { + std::vector temp_wcps; + temp_wcps.push_back(boundary_point_first); + out_vec_wcps.push_back(temp_wcps); + } + { + std::vector temp_wcps; + temp_wcps.push_back(boundary_point_second); + out_vec_wcps.push_back(temp_wcps); + } + + // Repeat boundary check (same logic as above) + for (size_t i = 0; i != out_vec_wcps.size(); i++) { + bool flag_save = false; + + for (size_t j = 0; j != out_vec_wcps.at(i).size(); j++) { + geo_point_t p1 = out_vec_wcps.at(i).at(j); + + if (!fiducial_utils->inside_fiducial_volume(p1)) { + candidate_exit_wcps.push_back(out_vec_wcps.at(i).at(0)); + flag_save = true; + break; + } + } + + if (!flag_save) { + geo_point_t p1 = out_vec_wcps.at(i).at(0); + geo_vector_t dir_vec = cluster.vhough_transform(p1, 30*units::cm); + dir_vec = dir_vec * (-1.0); + + geo_point_t dir(dir_vec.x(), dir_vec.y(), dir_vec.z()); + geo_point_t dir_1(0, dir.y(), dir.z()); + + // get apa and face from the point p1 ... + auto wpid_p1 = m_dv->contained_by(p1); + // Fill drift_dir, U_dir, V_dir, W_dir from the maps using wpid_p1 + auto it_params = wpid_params.find(wpid_p1); + if (it_params != wpid_params.end()) { + drift_dir = std::get<0>(it_params->second); + } else { + std::cerr << "TaggerCheckSTM: wpid_params not found for wpid_p1" << std::endl; + } + + auto it_U = wpid_U_dir.find(wpid_p1); + if (it_U != wpid_U_dir.end()) { + U_dir = it_U->second.first; + } else { + std::cerr << "TaggerCheckSTM: wpid_U_dir not found for wpid_p1" << std::endl; + } + + auto it_V = wpid_V_dir.find(wpid_p1); + if (it_V != wpid_V_dir.end()) { + V_dir = it_V->second.first; + } else { + std::cerr << "TaggerCheckSTM: wpid_V_dir not found for wpid_p1" << std::endl; + } + + auto it_W = wpid_W_dir.find(wpid_p1); + if (it_W != wpid_W_dir.end()) { + W_dir = it_W->second.first; + } else { + std::cerr << "TaggerCheckSTM: wpid_W_dir not found for wpid_p1" << std::endl; + } + + + + + double angle1 = acos(dir_1.dot(U_dir) / (dir_1.magnitude() * U_dir.magnitude())); + geo_point_t tempV1(fabs(dir.x()), + sqrt(dir.y()*dir.y() + dir.z()*dir.z()) * sin(angle1), 0); + double angle1_1 = acos(tempV1.dot(drift_dir) / (tempV1.magnitude() * drift_dir.magnitude())) / 3.1415926 * 180.; + + double angle2 = acos(dir_1.dot(V_dir) / (dir_1.magnitude() * V_dir.magnitude())); + geo_point_t tempV2(fabs(dir.x()), + sqrt(dir.y()*dir.y() + dir.z()*dir.z()) * sin(angle2), 0); + double angle2_1 = acos(tempV2.dot(drift_dir) / (tempV2.magnitude() * drift_dir.magnitude())) / 3.1415926 * 180.; + + double angle3 = acos(dir_1.dot(W_dir) / (dir_1.magnitude() * W_dir.magnitude())); + geo_point_t tempV3(fabs(dir.x()), + sqrt(dir.y()*dir.y() + dir.z()*dir.z()) * sin(angle3), 0); + double angle3_1 = acos(tempV3.dot(drift_dir) / (tempV3.magnitude() * drift_dir.magnitude())) / 3.1415926 * 180.; + + // std::cout << "Test: " << angle1 << " " << angle1_1 << " " << angle2 << " " << angle2_1 << " " << angle3 << " " << angle3_1 << std::endl; + + + if ((angle1_1 < 10 || angle2_1 < 10 || angle3_1 < 5)) { + if (!fiducial_utils->check_signal_processing(cluster, p1, dir_vec, 1*units::cm)) { + flag_save = true; + candidate_exit_wcps.push_back(out_vec_wcps.at(i).at(0)); + } + } + + if (!flag_save) { + double main_angle = acos(dir_vec.dot(main_dir) / (dir_vec.magnitude() * main_dir.magnitude())); + double angle_deg = fabs((3.1415926/2. - main_angle) / 3.1415926 * 180.); + + if (angle_deg > 60) { + if (!fiducial_utils->check_dead_volume(cluster, p1, dir_vec, 1*units::cm)) { + flag_save = true; + candidate_exit_wcps.push_back(out_vec_wcps.at(i).at(0)); + } + } + } + } + } + + // Determine boundary points again + for (size_t i = 0; i != candidate_exit_wcps.size(); i++) { + double dis1 = (candidate_exit_wcps.at(i) - boundary_point_first).magnitude(); + double dis2 = (candidate_exit_wcps.at(i) - boundary_point_second).magnitude(); + + if (dis1 < dis2) { + if (dis1 < 1.0*units::cm) temp_set.insert(0); + } else { + if (dis2 < 1.0*units::cm) temp_set.insert(1); + } + } + + // Protection against two end point situation + if (temp_set.size() == 2) { + geo_point_t tp1 = boundary_point_first; + geo_point_t tp2 = boundary_point_second; + + temp_set.clear(); + + if (!fiducial_utils->inside_fiducial_volume(tp1)) temp_set.insert(0); + if (!fiducial_utils->inside_fiducial_volume(tp2)) temp_set.insert(1); + if (temp_set.size() == 0) { + temp_set.insert(0); + temp_set.insert(1); + } + } + } + + // Fully contained, so not a STM + if (candidate_exit_wcps.size() == 0) { + std::cout << "STMTagger: Mid Point: A" << std::endl; + return false; + } + + std::cout << "end_point: " << temp_set.size() << " " << candidate_exit_wcps.size() << std::endl; + + // Determine first and last points for further analysis + geo_point_t first_wcp, last_wcp; + bool flag_double_end = false; + + if (temp_set.size() != 0) { + if (*temp_set.begin() == 0) { + first_wcp = boundary_point_first; + last_wcp = boundary_point_second; + } else { + first_wcp = boundary_point_second; + last_wcp = boundary_point_first; + } + if (temp_set.size() == 2) flag_double_end = true; + } else { + if (candidate_exit_wcps.size() == 1) { + first_wcp = candidate_exit_wcps.at(0); + + geo_vector_t dir1 = boundary_point_first - candidate_exit_wcps.at(0); + geo_vector_t dir2 = boundary_point_second - candidate_exit_wcps.at(0); + double dis1 = dir1.magnitude(); + double dis2 = dir2.magnitude(); + + double angle_between = acos(dir1.dot(dir2) / (dis1 * dis2)); + + if (angle_between > 120./180.*3.1415926 && dis1 > 20*units::cm && dis2 > 20*units::cm) { + std::cout << "Mid Point: B" << std::endl; + return false; + } else { + if (dis1 < dis2) { + last_wcp = boundary_point_second; + } else { + last_wcp = boundary_point_first; + } + } + } else { + std::cout << "Mid Point: C" << std::endl; + return false; + } + } + + bool flag_other_clusters = check_other_clusters(cluster, associated_clusters); + + std::cout << "STM analysis: flag_double_end=" << flag_double_end + << ", flag_other_clusters=" << flag_other_clusters << std::endl; + + // Forward check + { + if (flag_double_end) std::cout << "Forward check!" << std::endl; + + // Do rough path tracking + auto path_points = do_rough_path(cluster, first_wcp, last_wcp); + + { + // Create segment for tracking + auto segment = create_segment_for_cluster(cluster, path_points); + m_track_fitter.add_segment(segment); + m_track_fitter.do_single_tracking(segment, false); + // Extract fit results from the segment + const auto& fits = segment->fits(); + if (fits.size() <=3) return false; + } + + std::cout << "Finish first round of fitting" << std::endl; + + geo_point_t mid_point(0,0,0); + auto adjusted_path_points = adjust_rough_path(cluster, mid_point); + if (adjusted_path_points.size()==0) adjusted_path_points = path_points; + auto adjusted_segment = create_segment_for_cluster(cluster, adjusted_path_points); + m_track_fitter.clear_segments(); + m_track_fitter.add_segment(adjusted_segment); + m_track_fitter.do_single_tracking(adjusted_segment); + + std::cout << "Finish second round of fitting" << std::endl; + + // // hack to test recombination model usage ... + // double kine_energy = segment_cal_kine_dQdx(adjusted_segment, m_recomb_model); + // std::cout << "Kine energy: " << kine_energy/units::MeV << std::endl; + // // check ... + + + std::vector>> fine_tracking_path; + std::vector dQ, dx; + std::vector> paf; + + const auto& fits = adjusted_segment->fits(); + for (const auto& fit : fits) { + fine_tracking_path.emplace_back(fit.point, adjusted_segment); + dQ.push_back(fit.dQ); + dx.push_back(fit.dx); + paf.push_back(fit.paf); + } + + // Extract points for compatibility + std::vector pts; + for (const auto& path_point : fine_tracking_path) { + pts.push_back(path_point.first); + } + + // std::cout << "Collect points " << pts.size() << std::endl; + + int kink_num = find_first_kink(adjusted_segment); + + // std::cout << "Kink Number: " << kink_num << std::endl; + + double left_L = 0; + double left_Q = 0; + double exit_L = 0; + double exit_Q = 0; + + for (size_t i=0; i != kink_num && i < dx.size(); i++){ + exit_L += dx.at(i); + exit_Q += dQ.at(i); + } + for (size_t i = kink_num; i < dx.size(); i++){ + left_L += dx.at(i); + left_Q += dQ.at(i); + } + + std::cout << "Left: " << exit_L/units::cm << " " << left_L/units::cm << " " + << (left_Q/(left_L/units::cm+1e-9))/50e3 << " " + << (exit_Q/(exit_L/units::cm+1e-9)/50e3) << std::endl; + + // TGM (Through-Going Muon) check + if ((!fiducial_utils->inside_fiducial_volume(pts.front())) && (!fiducial_utils->inside_fiducial_volume(pts.back()))){ + + bool flag_TGM_anode = false; + if ((pts.back().x() < 2*units::cm || pts.front().x() < 2*units::cm) && + kink_num >= 0 && kink_num < pts.size()) { + if (pts.at(kink_num).x() < 6*units::cm){ + geo_point_t v10(pts.back().x()-pts.at(kink_num).x(), + pts.back().y()-pts.at(kink_num).y(), + pts.back().z()-pts.at(kink_num).z()); + geo_point_t v20(pts.front().x()-pts.at(kink_num).x(), + pts.front().y()-pts.at(kink_num).y(), + pts.front().z()-pts.at(kink_num).z()); + + if ((fabs(v10.angle(drift_dir)/3.1415926*180.-90)<12.5 && v10.magnitude()>15*units::cm) || + (fabs(v20.angle(drift_dir)/3.1415926*180.-90)<12.5 && v20.magnitude()>15*units::cm)) { + flag_TGM_anode = true; + } + } + } + + if ((exit_L < 3*units::cm || left_L < 3*units::cm) || flag_TGM_anode){ + std::cout << "TGM: " << pts.front() << " " << pts.back() << std::endl; + cluster.set_flag(Flags::TGM); + return true; + } + + } + else if ((!fiducial_utils->inside_fiducial_volume(pts.front())) && left_L < 3*units::cm){ + // Check dead volume + WireCell::Point p1 = pts.back(); + geo_point_t dir_vec = cluster.vhough_transform(p1, 30*units::cm); + dir_vec *= -1; + + if (!fiducial_utils->check_dead_volume(cluster, p1, dir_vec, 1*units::cm)){ + if (exit_L < 3*units::cm || left_L < 3*units::cm){ + std::cout << "TGM: " << pts.front() << " " << pts.back() << std::endl; + cluster.set_flag(Flags::TGM); + return true; + } + } + } + + // STM evaluation logic + if (left_L > 40*units::cm || (left_L > 7.5*units::cm && (left_Q/(left_L/units::cm+1e-9))/50e3 > 2.0)){ + if (!flag_double_end){ + std::cout << "Mid Point A " << " Fid " + << " " << mid_point << " " << left_L << " " + << (left_Q/(left_L/units::cm+1e-9)/50e3) << std::endl; + return false; + } + } else { + bool flag_fix_end = false; + if (exit_L < 35*units::cm || ((left_Q/(left_L/units::cm+1e-9))/50e3 > 2.0 && left_L > 2*units::cm)) { + flag_fix_end = true; + } + + // Readjust parameters for short tracks + if ((left_L < 8*units::cm && (left_Q/(left_L/units::cm+1e-9)/50e3)< 1.5) || + (left_L < 6*units::cm && (left_Q/(left_L/units::cm+1e-9)/50e3) < 1.7) || + (left_L < 5*units::cm && (left_Q/(left_L/units::cm+1e-9)/50e3) < 1.8) || + (left_L < 3*units::cm && (left_Q/(left_L/units::cm+1e-9)/50e3) < 1.9)){ + left_L = 0; + kink_num = dQ.size(); + exit_L = 40*units::cm; + flag_fix_end = false; + } + + bool flag_pass = false; + + if (!flag_other_clusters){ + if (left_L < 40*units::cm) { + if (flag_fix_end){ + flag_pass = eval_stm(adjusted_segment, kink_num, 5*units::cm, 0., 35*units::cm) || + eval_stm(adjusted_segment, kink_num, 5*units::cm, 3.*units::cm, 35*units::cm); + } else { + flag_pass = eval_stm(adjusted_segment, kink_num, 40*units::cm - left_L, 0., 35*units::cm) || + eval_stm(adjusted_segment, kink_num, 40*units::cm - left_L, 3.*units::cm, 35*units::cm); + } + + if (!flag_pass){ + if (flag_fix_end){ + flag_pass = eval_stm(adjusted_segment, kink_num, 5*units::cm, 0., 15*units::cm) || + eval_stm(adjusted_segment, kink_num, 5*units::cm, 3.*units::cm, 15*units::cm); + } else { + flag_pass = eval_stm(adjusted_segment, kink_num, 40*units::cm - left_L, 0., 15*units::cm) || + eval_stm(adjusted_segment, kink_num, 40*units::cm - left_L, 3.*units::cm, 15*units::cm); + } + } + } + + if (left_L < 20*units::cm){ + if (!flag_pass){ + if (flag_fix_end){ + flag_pass = eval_stm(adjusted_segment, kink_num, 5*units::cm, 0., 35*units::cm) || + eval_stm(adjusted_segment, kink_num, 5*units::cm, 3.*units::cm, 35*units::cm); + } else { + flag_pass = eval_stm(adjusted_segment, kink_num, 20*units::cm - left_L, 0., 35*units::cm) || + eval_stm(adjusted_segment, kink_num, 20*units::cm - left_L, 3.*units::cm, 35*units::cm); + } + } + + if (!flag_pass){ + if (flag_fix_end){ + flag_pass = eval_stm(adjusted_segment, kink_num, 5*units::cm, 0., 15*units::cm) || + eval_stm(adjusted_segment, kink_num, 5*units::cm, 3.*units::cm, 15*units::cm); + } else { + flag_pass = eval_stm(adjusted_segment, kink_num, 20*units::cm - left_L, 0., 15*units::cm) || + eval_stm(adjusted_segment, kink_num, 20*units::cm - left_L, 3.*units::cm, 15*units::cm); + } + } + } + } else { + if (flag_fix_end) { + flag_pass = eval_stm(adjusted_segment, kink_num, 5*units::cm, 0., 35*units::cm, true); + } else { + flag_pass = eval_stm(adjusted_segment, kink_num, 40*units::cm, 0., 35*units::cm, true); + } + } + + if (flag_pass) { + std::vector> fitted_segments; + fitted_segments.push_back(adjusted_segment); + search_other_tracks(cluster, fitted_segments); + + if (check_other_tracks(cluster, fitted_segments)){ + std::cout << "Mid Point Tracks" << std::endl; + return false; + } + + if (!detect_proton(adjusted_segment, kink_num, fitted_segments)) return true; + } + } + } + + // Backward check (if double-ended) + if (flag_double_end){ + std::cout << "Backward check!" << std::endl; + + // Do backward path tracking + auto path_points = do_rough_path(cluster, last_wcp, first_wcp); + { + m_track_fitter.clear_segments(); + auto segment = create_segment_for_cluster(cluster, path_points); + m_track_fitter.add_segment(segment); + m_track_fitter.do_single_tracking(segment, false); + + } + geo_point_t mid_point(0,0,0); + auto adjusted_path_points = adjust_rough_path(cluster, mid_point); + if (adjusted_path_points.size()==0) adjusted_path_points = path_points; + auto adjusted_segment = create_segment_for_cluster(cluster, adjusted_path_points); + m_track_fitter.clear_segments(); + m_track_fitter.add_segment(adjusted_segment); + m_track_fitter.do_single_tracking(adjusted_segment); + + std::vector>> fine_tracking_path; + std::vector dQ, dx; + std::vector> paf; + + const auto& fits = adjusted_segment->fits(); + for (const auto& fit : fits) { + fine_tracking_path.emplace_back(fit.point, adjusted_segment); + dQ.push_back(fit.dQ); + dx.push_back(fit.dx); + paf.push_back(fit.paf); + } + + // Extract points for compatibility + std::vector pts; + for (const auto& path_point : fine_tracking_path) { + pts.push_back(path_point.first); + } + + int kink_num = find_first_kink(adjusted_segment); + + double left_L = 0; + double left_Q = 0; + double exit_L = 0; + double exit_Q = 0; + + for (size_t i=0; i != kink_num && i < dx.size(); i++){ + exit_L += dx.at(i); + exit_Q += dQ.at(i); + } + for (size_t i = kink_num; i != dx.size(); i++){ + left_L += dx.at(i); + left_Q += dQ.at(i); + } + + std::cout << "Left: " << exit_L/units::cm << " " << left_L/units::cm << " " + << (left_Q/(left_L/units::cm+1e-9))/50e3 << " " + << (exit_Q/(exit_L/units::cm+1e-9)/50e3) << std::endl; + + // TGM check for backward direction + if ((!fiducial_utils->inside_fiducial_volume(pts.front())) && + (!fiducial_utils->inside_fiducial_volume(pts.back()))){ + + bool flag_TGM_anode = false; + + if ((pts.back().x() < 2*units::cm || pts.front().x() < 2*units::cm) && + kink_num >= 0 && kink_num < pts.size()) { + if (pts.at(kink_num).x() < 6*units::cm){ + geo_point_t v10(pts.back().x()-pts.at(kink_num).x(), + pts.back().y()-pts.at(kink_num).y(), + pts.back().z()-pts.at(kink_num).z()); + geo_point_t v20(pts.front().x()-pts.at(kink_num).x(), + pts.front().y()-pts.at(kink_num).y(), + pts.front().z()-pts.at(kink_num).z()); + + if ((fabs(v10.angle(drift_dir)/3.1415926*180.-90)<12.5 && v10.magnitude()>15*units::cm) || + (fabs(v20.angle(drift_dir)/3.1415926*180.-90)<12.5 && v20.magnitude()>15*units::cm)) { + flag_TGM_anode = true; + } + } + } + + if ((exit_L < 3*units::cm || left_L < 3*units::cm) || flag_TGM_anode){ + std::cout << "TGM: " << pts.front() << " " << pts.back() << std::endl; + cluster.set_flag(Flags::TGM); + return true; + } + } + + if (left_L > 40*units::cm || (left_L > 7.5*units::cm && (left_Q/(left_L/units::cm+1e-9))/50e3 > 2.0)){ + std::cout << "Mid Point A " << " Fid" + << " " << mid_point << " " << left_L << " " + << (left_Q/(left_L/units::cm+1e-9)/50e3) << std::endl; + return false; + } else { + bool flag_fix_end = false; + if (exit_L < 35*units::cm || ((left_Q/(left_L/units::cm+1e-9))/50e3 > 2.0 && left_L > 2*units::cm)) { + flag_fix_end = true; + } + + if ((left_L < 8*units::cm && (left_Q/(left_L/units::cm+1e-9)/50e3)< 1.5) || + (left_L < 6*units::cm && (left_Q/(left_L/units::cm+1e-9)/50e3) < 1.7) || + (left_L < 3*units::cm && (left_Q/(left_L/units::cm+1e-9)/50e3) < 1.9)){ + left_L = 0; + kink_num = dQ.size(); + exit_L = 40*units::cm; + flag_fix_end = false; + } + + bool flag_pass = false; + if (!flag_other_clusters){ + if (left_L < 40*units::cm) { + if (flag_fix_end){ + flag_pass = eval_stm(adjusted_segment, kink_num, 5*units::cm, 0., 35*units::cm) || + eval_stm(adjusted_segment, kink_num, 5*units::cm, 3.*units::cm, 35*units::cm); + } else { + flag_pass = eval_stm(adjusted_segment, kink_num, 40*units::cm - left_L, 0., 35*units::cm) || + eval_stm(adjusted_segment, kink_num, 40*units::cm - left_L, 3.*units::cm, 35*units::cm); + } + + if (!flag_pass){ + if (flag_fix_end){ + flag_pass = eval_stm(adjusted_segment, kink_num, 5*units::cm, 0., 15*units::cm) || + eval_stm(adjusted_segment, kink_num, 5*units::cm, 3.*units::cm, 15*units::cm); + } else { + flag_pass = eval_stm(adjusted_segment, kink_num, 40*units::cm - left_L, 0., 15*units::cm) || + eval_stm(adjusted_segment, kink_num, 40*units::cm - left_L, 3.*units::cm, 15*units::cm); + } + } + } + + if (left_L < 20*units::cm){ + if (!flag_pass){ + if (flag_fix_end){ + flag_pass = eval_stm(adjusted_segment, kink_num, 5*units::cm, 0., 35*units::cm) || + eval_stm(adjusted_segment, kink_num, 5*units::cm, 3.*units::cm, 35*units::cm); + } else { + flag_pass = eval_stm(adjusted_segment, kink_num, 20*units::cm - left_L, 0., 35*units::cm) || + eval_stm(adjusted_segment, kink_num, 20*units::cm - left_L, 3.*units::cm, 35*units::cm); + } + } + + if (!flag_pass){ + if (flag_fix_end){ + flag_pass = eval_stm(adjusted_segment, kink_num, 5*units::cm, 0., 15*units::cm) || + eval_stm(adjusted_segment, kink_num, 5*units::cm, 3.*units::cm, 15*units::cm); + } else { + flag_pass = eval_stm(adjusted_segment, kink_num, 20*units::cm - left_L, 0., 15*units::cm) || + eval_stm(adjusted_segment, kink_num, 20*units::cm - left_L, 3.*units::cm, 15*units::cm); + } + } + } + } else { + if (flag_fix_end) { + flag_pass = eval_stm(adjusted_segment, kink_num, 5*units::cm, 0., 35*units::cm, true); + } else { + flag_pass = eval_stm(adjusted_segment, kink_num, 40*units::cm, 0., 35*units::cm, true); + } + } + + if (flag_pass) { + std::vector> fitted_segments; + fitted_segments.push_back(adjusted_segment); + search_other_tracks(cluster, fitted_segments); + + if (check_other_tracks(cluster, fitted_segments)){ + std::cout << "Mid Point Tracks" << std::endl; + return false; + } + + if (!detect_proton(adjusted_segment, kink_num, fitted_segments)) return true; + } + } + } + + std::cout << "Mid Point " << std::endl; + + + + + + + + // // std::cout << "STMTagger tracking " << first_wcp << " " << last_wcp << std::endl; + + // // temporary tracking implementation ... + // auto path_points = do_rough_path(cluster, first_wcp, last_wcp); + // // Optional: Print path info for debugging + // std::cout << "TaggerCheckSTM: Steiner path: " << path_points.size() << " points from index " << first_wcp << " " <wcpts(); + // // for (size_t i=0;i!=path_points.size(); i++){ + // // std::cout << i << " " << path_points.at(i) << " " << wcpts.at(i).point << std::endl; + // // } + // m_track_fitter.add_segment(segment); + + // auto ch = m_track_fitter.get_channel_for_wire(0,0,1,50); + // auto test_results = m_track_fitter.get_wires_for_channel(0,ch); + // std::cout << ch << " " << test_results.size() << " wires. " << " " << std::get<0>(test_results.front()) << " " << std::get<1>(test_results.front()) << " " << std::get<2>(test_results.front()) << std::endl; + + // m_track_fitter.do_single_tracking(segment); + + + // geo_point_t mid_point(0,0,0); + // adjust_rough_path(cluster, mid_point); + + // auto kink_num = find_first_kink(segment); + + // check_other_clusters(cluster, associated_clusters); + + // std::vector> fitted_segments; + // fitted_segments.push_back(segment); + // search_other_tracks(cluster, fitted_segments); + + // detect_proton(segment, kink_num, fitted_segments); + + // eval_stm(segment, kink_num); + + // // missing check other tracks ... + // m_track_fitter.prepare_data(); + // m_track_fitter.fill_global_rb_map(); + // auto organized_path = m_track_fitter.organize_orig_path(segment); + // // auto test = m_track_fitter.examine_end_ps_vec(segment, organized_path, true, true); + // auto test_path = organized_path; + // m_track_fitter.organize_ps_path(segment, test_path, 1.2*units::cm, 0.6*units::cm); + // std::cout << "TaggerCheckSTM: Organized path: " << organized_path.size() << " points." << " original " << segment->wcpts().size() << " " << test_path.size() << std::endl; + // // std::cout << m_track_fitter.get_pc_transforms() << " " << m_track_fitter.get_detector_volume() << std::endl; + // // WireCell::Point p = organized_path.front(); + // // TrackFitting::PlaneData temp_2dut, temp_2dvt, temp_2dwt; + // // m_track_fitter.form_point_association(segment, p, temp_2dut, temp_2dvt, temp_2dwt, 1.0*units::cm, 3, 20); + // // m_track_fitter.examine_point_association(segment, p, temp_2dut, temp_2dvt, temp_2dwt, true); + // // std::cout << "2D Association: " << temp_2dut.associated_2d_points.size() << " " << temp_2dut.quantity << " " << temp_2dvt.associated_2d_points.size() << " " << temp_2dvt.quantity << " " << temp_2dwt.associated_2d_points.size() << " " << temp_2dwt.quantity << std::endl; + // std::vector>> ptss; + // for (const auto& p : organized_path) { + // ptss.emplace_back(p, segment); + // } + // m_track_fitter.form_map(ptss); + // m_track_fitter.trajectory_fit(ptss); + // m_track_fitter.dQ_dx_fit(); + // std::cout << m_track_fitter.get_parameter("DL") << std::endl; + // std::cout << "TaggerCheckSTM: Formed map with " << organized_path.size() << " points." << std::endl; + + return false; + } + + +}; diff --git a/clus/src/TrackFitting.cxx b/clus/src/TrackFitting.cxx new file mode 100644 index 000000000..aba82dd82 --- /dev/null +++ b/clus/src/TrackFitting.cxx @@ -0,0 +1,6865 @@ +#include "WireCellClus/TrackFitting.h" +#include "WireCellClus/TrackFitting_Util.h" +#include "WireCellClus/PRSegmentFunctions.h" + +#include "WireCellUtil/Logging.h" + + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; + +using geo_point_t = WireCell::Point; + + +TrackFitting::TrackFitting(FittingType fitting_type) + : m_fitting_type(fitting_type) +{ + +} + +// ============================================================================ +// Parameter management methods +// ============================================================================ + +void TrackFitting::set_parameter(const std::string& name, double value) { + // Map parameter names to struct members + if (name == "DL") { + m_params.DL = value; + } else if (name == "DT") { + m_params.DT = value; + } else if (name == "col_sigma_w_T") { + m_params.col_sigma_w_T = value; + } else if (name == "ind_sigma_u_T") { + m_params.ind_sigma_u_T = value; + } else if (name == "ind_sigma_v_T") { + m_params.ind_sigma_v_T = value; + } else if (name == "rel_uncer_ind") { + m_params.rel_uncer_ind = value; + } else if (name == "rel_uncer_col") { + m_params.rel_uncer_col = value; + } else if (name == "add_uncer_ind") { + m_params.add_uncer_ind = value; + } else if (name == "add_uncer_col") { + m_params.add_uncer_col = value; + } else if (name == "add_sigma_L") { + m_params.add_sigma_L = value; + } else if (name == "low_dis_limit") { + m_params.low_dis_limit = value; + } else if (name == "end_point_limit") { + m_params.end_point_limit = value; + } else if (name == "time_tick_cut") { + m_params.time_tick_cut = value; + } else if (name == "rel_charge_uncer") { + m_params.rel_charge_uncer = value; + } else if (name == "add_charge_uncer") { + m_params.add_charge_uncer = value; + } else if (name == "default_charge_th") { + m_params.default_charge_th = value; + } else if (name == "default_charge_err") { + m_params.default_charge_err = value; + } else if (name == "scaling_quality_th") { + m_params.scaling_quality_th = value; + } else if (name == "scaling_ratio") { + m_params.scaling_ratio = value; + } else if (name == "area_ratio1") { + m_params.area_ratio1 = value; + } else if (name == "area_ratio2") { + m_params.area_ratio2 = value; + } else if (name == "skip_default_ratio_1") { + m_params.skip_default_ratio_1 = value; + } else if (name == "skip_ratio_cut") { + m_params.skip_ratio_cut = value; + } else if (name == "skip_ratio_1_cut") { + m_params.skip_ratio_1_cut = value; + } else if (name == "skip_angle_cut_1") { + m_params.skip_angle_cut_1 = value; + } else if (name == "skip_angle_cut_2") { + m_params.skip_angle_cut_2 = value; + } else if (name == "skip_angle_cut_3") { + m_params.skip_angle_cut_3 = value; + } else if (name == "skip_dis_cut") { + m_params.skip_dis_cut = value; + } else if (name == "default_dQ_dx") { + m_params.default_dQ_dx = value; + } else if (name == "end_point_factor") { + m_params.end_point_factor = value; + } else if (name == "mid_point_factor") { + m_params.mid_point_factor = value; + } else if (name == "nlevel") { + m_params.nlevel = static_cast(value); + } else if (name == "charge_cut") { + m_params.charge_cut = value; + } else if (name == "share_charge_err") { + m_params.share_charge_err = value; + } else if (name == "min_drift_time") { + m_params.min_drift_time = value; + } else if (name == "search_range") { + m_params.search_range = value; + } else if (name == "dead_ind_weight") { + m_params.dead_ind_weight = value; + } else if (name == "dead_col_weight") { + m_params.dead_col_weight = value; + } else if (name == "close_ind_weight") { + m_params.close_ind_weight = value; + } else if (name == "close_col_weight") { + m_params.close_col_weight = value; + } else if (name == "overlap_th") { + m_params.overlap_th = value; + } else if (name == "dx_norm_length") { + m_params.dx_norm_length = value; + } else if (name == "lambda") { + m_params.lambda = value; + } else if (name == "div_sigma") { + m_params.div_sigma = value; + } else { + raise("TrackFitting: Unknown parameter name '%s'", name.c_str()); + } +} + +double TrackFitting::get_parameter(const std::string& name) const { + // Map parameter names to struct members + if (name == "DL") { + return m_params.DL; + } else if (name == "DT") { + return m_params.DT; + } else if (name == "col_sigma_w_T") { + return m_params.col_sigma_w_T; + } else if (name == "ind_sigma_u_T") { + return m_params.ind_sigma_u_T; + } else if (name == "ind_sigma_v_T") { + return m_params.ind_sigma_v_T; + } else if (name == "rel_uncer_ind") { + return m_params.rel_uncer_ind; + } else if (name == "rel_uncer_col") { + return m_params.rel_uncer_col; + } else if (name == "add_uncer_ind") { + return m_params.add_uncer_ind; + } else if (name == "add_uncer_col") { + return m_params.add_uncer_col; + } else if (name == "add_sigma_L") { + return m_params.add_sigma_L; + } else if (name == "low_dis_limit") { + return m_params.low_dis_limit; + } else if (name == "end_point_limit") { + return m_params.end_point_limit; + } else if (name == "time_tick_cut") { + return m_params.time_tick_cut; + } else if (name == "rel_charge_uncer") { + return m_params.rel_charge_uncer; + } else if (name == "add_charge_uncer") { + return m_params.add_charge_uncer; + } else if (name == "default_charge_th") { + return m_params.default_charge_th; + } else if (name == "default_charge_err") { + return m_params.default_charge_err; + } else if (name == "scaling_quality_th") { + return m_params.scaling_quality_th; + } else if (name == "scaling_ratio") { + return m_params.scaling_ratio; + } else if (name == "area_ratio1") { + return m_params.area_ratio1; + } else if (name == "area_ratio2") { + return m_params.area_ratio2; + } else if (name == "skip_default_ratio_1") { + return m_params.skip_default_ratio_1; + } else if (name == "skip_ratio_cut") { + return m_params.skip_ratio_cut; + } else if (name == "skip_ratio_1_cut") { + return m_params.skip_ratio_1_cut; + } else if (name == "skip_angle_cut_1") { + return m_params.skip_angle_cut_1; + } else if (name == "skip_angle_cut_2") { + return m_params.skip_angle_cut_2; + } else if (name == "skip_angle_cut_3") { + return m_params.skip_angle_cut_3; + } else if (name == "skip_dis_cut") { + return m_params.skip_dis_cut; + } else if (name == "default_dQ_dx") { + return m_params.default_dQ_dx; + } else if (name == "end_point_factor") { + return m_params.end_point_factor; + } else if (name == "mid_point_factor") { + return m_params.mid_point_factor; + } else if (name == "nlevel") { + return static_cast(m_params.nlevel); + } else if (name == "charge_cut") { + return m_params.charge_cut; + } else if (name == "share_charge_err") { + return m_params.share_charge_err; + } else if (name == "min_drift_time") { + return m_params.min_drift_time; + } else if (name == "search_range") { + return m_params.search_range; + } else if (name == "dead_ind_weight") { + return m_params.dead_ind_weight; + } else if (name == "dead_col_weight") { + return m_params.dead_col_weight; + } else if (name == "close_ind_weight") { + return m_params.close_ind_weight; + } else if (name == "close_col_weight") { + return m_params.close_col_weight; + } else if (name == "overlap_th") { + return m_params.overlap_th; + } else if (name == "dx_norm_length") { + return m_params.dx_norm_length; + } else if (name == "lambda") { + return m_params.lambda; + } else if (name == "div_sigma") { + return m_params.div_sigma; + } else { + raise("TrackFitting: Unknown parameter name '%s'", name.c_str()); + return 0; + } +} + + + +void TrackFitting::clear_graph(){ + m_graph = nullptr; + m_clusters.clear(); + m_blobs.clear(); +} + + +void TrackFitting::clear_segments(){ + m_segments.clear(); + m_clusters.clear(); + m_blobs.clear(); +} + +void TrackFitting::add_graph(std::shared_ptr graph){ + m_graph = graph; + + if (!m_graph){ + return; + } + + // Get edges from the graph and extract segments from them + auto edge_range = boost::edges(*m_graph); + std::set> segments_set; + + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + if (edge_bundle.segment) { + segments_set.insert(edge_bundle.segment); + m_clusters.insert(edge_bundle.segment->cluster()); + } + } + + if (m_grouping == nullptr && !segments_set.empty()){ + auto first_segment = *segments_set.begin(); + m_grouping = first_segment->cluster()->grouping(); + BuildGeometry(); + } + + for (auto& cluster: m_clusters){ + for (auto& blob: cluster->children()){ + m_blobs.insert(blob); + } + } + + std::cout << "TrackFitting: Added graph with " << segments_set.size() << " segments." << " " << m_clusters.size() << " " << m_blobs.size() << std::endl; +} + + +void TrackFitting::add_segment(std::shared_ptr segment){ + m_segments.insert(segment); + m_clusters.insert(segment->cluster()); + + if (m_grouping == nullptr){ + m_grouping = segment->cluster()->grouping(); + + BuildGeometry(); + } + + for (auto& cluster: m_clusters){ + for (auto& blob: cluster->children()){ + m_blobs.insert(blob); + } + } + + std::cout << "TrackFitting: Added segment with " << segment->wcpts().size() << " points." << " " << m_clusters.size() << " " << m_blobs.size() << std::endl; +} + +void TrackFitting::BuildGeometry(){ + // Get all the wire plane IDs from the grouping + const auto& wpids = m_grouping->wpids(); + compute_wireplane_params(wpids, m_dv, wpid_params, wpid_U_dir, wpid_V_dir, wpid_W_dir, apas); + + // Clear existing maps + wpid_offsets.clear(); + wpid_slopes.clear(); + // Get all unique APA/face combinations + std::set> apa_face_combinations; + + // loop over wpids ... + for (const auto& wpid : wpids) { + double time_slice_width = //m_dv->metadata(wpid)["nticks_live_slice"].asDouble() * + m_dv->metadata(wpid)["tick_drift"].asDouble(); + + WirePlaneId wpid_u(kUlayer, wpid.face(), wpid.apa()); + WirePlaneId wpid_v(kVlayer, wpid.face(), wpid.apa()); + WirePlaneId wpid_w(kWlayer, wpid.face(), wpid.apa()); + + double pitch_u = m_dv->pitch_vector(wpid_u).magnitude(); + double pitch_v = m_dv->pitch_vector(wpid_v).magnitude(); + double pitch_w = m_dv->pitch_vector(wpid_w).magnitude(); + + wpid_geoms[wpid] = std::make_tuple(time_slice_width, pitch_u, pitch_v, pitch_w); + // std::cout << "Geometry: " << time_slice_width/units::cm << " " << pitch_u/units::cm << " " << pitch_v/units::cm << " " << pitch_w/units::cm << std::endl; + + apa_face_combinations.insert({wpid.apa(), wpid.face()}); + } + + // Process each APA/face combination + for (const auto& [apa, face] : apa_face_combinations) { + try { + // Get anode interface for this APA/face + auto anode = m_grouping->get_anode(apa); + if (!anode) { + std::cerr << "TrackFitting: Could not get anode for APA " << apa << std::endl; + continue; + } + + auto iface = anode->faces()[face]; + if (!iface) { + std::cerr << "TrackFitting: Could not get face " << face << " for APA " << apa << std::endl; + continue; + } + + // Get geometry parameters from grouping + const auto& pitch_mags = m_grouping->pitch_mags(); + const auto& proj_centers = m_grouping->proj_centers(); + + // Get wire angles for this APA/face + const auto [angle_u, angle_v, angle_w] = m_grouping->wire_angles(apa, face); + std::vector angles = {angle_u, angle_v, angle_w}; + + // Get time/drift parameters from grouping cache + double time_offset = m_grouping->get_time_offset().at(apa).at(face); + double drift_speed = m_grouping->get_drift_speed().at(apa).at(face); + double tick = m_grouping->get_tick().at(apa).at(face); + + // Get drift direction and origin from anode face + double xsign = iface->dirx(); + double xorig = iface->planes()[2]->wires().front()->center().x(); + + // Create WirePlaneId for this APA/face combination + WirePlaneId wpid(kAllLayers, face, apa); + + // Calculate slopes and offsets for each plane + std::pair slope_yu_zu, slope_yv_zv, slope_yw_zw; + double offset_u, offset_v, offset_w, offset_t; + + // U plane (plane index 0) + double pitch_u = pitch_mags.at(apa).at(face).at(0); + double center_u = proj_centers.at(apa).at(face).at(0); + offset_u = -(center_u + 0.5 * pitch_u) / pitch_u; + slope_yu_zu = {-sin(angles[0]) / pitch_u, cos(angles[0]) / pitch_u}; + + // V plane (plane index 1) + double pitch_v = pitch_mags.at(apa).at(face).at(1); + double center_v = proj_centers.at(apa).at(face).at(1); + offset_v = -(center_v + 0.5 * pitch_v) / pitch_v; + slope_yv_zv = {-sin(angles[1]) / pitch_v, cos(angles[1]) / pitch_v}; + + // W plane (plane index 2) + double pitch_w = pitch_mags.at(apa).at(face).at(2); + double center_w = proj_centers.at(apa).at(face).at(2); + offset_w = -(center_w + 0.5 * pitch_w) / pitch_w; + slope_yw_zw = {-sin(angles[2]) / pitch_w, cos(angles[2]) / pitch_w}; + + // Time conversion parameters + // From drift2time: time = (drift - xorig)/(xsign * drift_speed) - time_offset + // tick_index = round(time / tick) + double slope_t = 1.0 / (xsign * drift_speed * tick); + offset_t = -(xorig / (xsign * drift_speed) + time_offset) / tick; + + // Store in maps + wpid_offsets[wpid] = std::make_tuple(offset_t, offset_u, offset_v, offset_w); + wpid_slopes[wpid] = std::make_tuple( + slope_t, // T slope (for x direction) + slope_yu_zu, // U plane slopes (y, z) + slope_yv_zv, // V plane slopes (y, z) + slope_yw_zw // W plane slopes (y, z) + ); + + // // Debug output (optional - can be removed) + // std::cout << "TrackFitting: Initialized geometry for APA " << apa + // << " Face " << face << std::endl; + // std::cout << " Offsets: T=" << offset_t << " U=" << offset_u + // << " V=" << offset_v << " W=" << offset_w << std::endl; + // std::cout << " Slopes: T=" << slope_t + // << " U=(" << slope_yu_zu.first << "," << slope_yu_zu.second << ")" + // << " V=(" << slope_yv_zv.first << "," << slope_yv_zv.second << ")" + // << " W=(" << slope_yw_zw.first << "," << slope_yw_zw.second << ")" << std::endl; + + } catch (const std::exception& e) { + std::cerr << "TrackFitting: Error initializing geometry for APA " << apa + << " Face " << face << ": " << e.what() << std::endl; + } + } + + // std::cout << "TrackFitting: Geometry initialization complete. Processed " + // << wpid_offsets.size() << " wire plane configurations." << std::endl; + + +} + +IAnodePlane::pointer TrackFitting::get_anode(int apa_ident) const { + if (!m_grouping) { + std::cerr << "TrackFitting: No grouping available to get anode" << std::endl; + return nullptr; + } + + try { + return m_grouping->get_anode(apa_ident); + } catch (const std::exception& e) { + std::cerr << "TrackFitting: Error getting anode " << apa_ident << ": " << e.what() << std::endl; + return nullptr; + } +} + +std::map TrackFitting::get_all_anodes() const { + std::map result; + + if (!m_grouping) { + return result; + } + + // Get all unique APAs from the clusters + std::set apa_idents; + // Extract APAs from cluster's wire plane IDs + auto wpids = m_grouping->wpids(); + for (const auto& wpid : wpids) { + apa_idents.insert(wpid.apa()); + } + + // Get anode for each APA + for (int apa_ident : apa_idents) { + auto anode = get_anode(apa_ident); + if (anode) { + result[apa_ident] = anode; + } + } + + return result; +} + +int TrackFitting::get_channel_for_wire(int apa, int face, int plane, int wire) const { + m_cache_stats.total_lookups++; + + PlaneKey plane_key = std::make_tuple(apa, face, plane); + + // Check hot cache first (O(1) for frequently accessed planes) + auto hot_it = m_hot_cache.find(plane_key); + if (hot_it != m_hot_cache.end()) { + if (wire >= 0 && wire < static_cast(hot_it->second.size())) { + m_cache_stats.hot_hits++; + return hot_it->second[wire]; + } + return -1; // Wire index out of bounds + } + + // Check cold cache (individual wire lookups) + WireKey wire_key = std::make_tuple(apa, face, plane, wire); + auto cold_it = m_cold_cache.find(wire_key); + if (cold_it != m_cold_cache.end()) { + m_cache_stats.cold_hits++; + + // Update access count for this plane + m_access_count[plane_key]++; + + // Promote to hot cache if threshold reached + if (m_access_count[plane_key] >= HOT_THRESHOLD) { + cache_entire_plane(apa, face, plane); + } + + return cold_it->second; + } + + // Cache miss - fetch from anode and cache result + int channel = fetch_channel_from_anode(apa, face, plane, wire); + if (channel != -1) { + m_cold_cache[wire_key] = channel; + m_access_count[plane_key]++; + m_cache_stats.cold_entries_count++; + } + + return channel; +} + +std::vector> TrackFitting::get_wires_for_channel(int apa, int channel_number) const { + std::vector> result; + + auto anode = get_anode(apa); + if (!anode) { + return result; + } + + // Get all wires for this channel (handles wrapped wires) + auto wires = anode->wires(channel_number); + + for (const auto& wire : wires) { + auto wpid = wire->planeid(); + result.emplace_back(wpid.face(), wpid.index(), wire->index()); + } + + return result; +} + +void TrackFitting::clear_cache() const { + m_hot_cache.clear(); + m_cold_cache.clear(); + m_access_count.clear(); + m_cache_stats = {0, 0, 0, 0, 0}; +} + +TrackFitting::CacheStats TrackFitting::get_cache_stats() const { + auto stats = m_cache_stats; + stats.hot_planes_count = m_hot_cache.size(); + stats.cold_entries_count = m_cold_cache.size(); + return stats; +} + +void TrackFitting::cache_entire_plane(int apa, int face, int plane) const { + auto anode = get_anode(apa); + if (!anode) return; + + const auto& faces = anode->faces(); + if (face >= static_cast(faces.size()) || !faces[face]) return; + + const auto& planes = faces[face]->planes(); + if (plane >= static_cast(planes.size())) return; + + const auto& wires = planes[plane]->wires(); + PlaneKey plane_key = std::make_tuple(apa, face, plane); + + // Cache entire plane (this is the "hot" cache promotion) + auto& hot_vec = m_hot_cache[plane_key]; + hot_vec.resize(wires.size()); + for (size_t i = 0; i < wires.size(); ++i) { + hot_vec[i] = wires[i]->channel(); + } + + // Remove individual wire entries from cold cache to save memory + for (size_t i = 0; i < wires.size(); ++i) { + WireKey wire_key = std::make_tuple(apa, face, plane, static_cast(i)); + if (m_cold_cache.erase(wire_key)) { + m_cache_stats.cold_entries_count--; + } + } + + m_cache_stats.hot_planes_count++; + + // std::cout << "TrackFitting: Promoted plane (" << apa << "," << face << "," << plane + // << ") to hot cache with " << wires.size() << " wires" << std::endl; +} + +int TrackFitting::fetch_channel_from_anode(int apa, int face, int plane, int wire) const { + auto anode = get_anode(apa); + if (!anode) return -1; + + const auto& faces = anode->faces(); + if (face >= static_cast(faces.size()) || !faces[face]) return -1; + + const auto& planes = faces[face]->planes(); + if (plane >= static_cast(planes.size())) return -1; + + const auto& wires = planes[plane]->wires(); + if (wire >= static_cast(wires.size())) return -1; + + return wires[wire]->channel(); +} + + + +void TrackFitting::prepare_data() { + if (m_charge_data.size()!=0) return; + + // Process every Facade::Cluster in m_clusters + for (auto& cluster : m_clusters) { + // Get boundary range using get_uvwt_range which returns map> + auto uvwt_ranges = cluster->get_uvwt_range(); + + // Get the grouping from the cluster + auto grouping = cluster->grouping(); + + // Process each wpid (wire plane ID) separately + for (const auto& [wpid, range_tuple] : uvwt_ranges) { + int apa = wpid.apa(); + int face = wpid.face(); + + // Get the ranges for this wpid + // auto [u_size, v_size, w_size, t_size] = range_tuple; + + // Get min/max values for this specific apa/face + auto [u_min, v_min, w_min, t_min] = cluster->get_uvwt_min(apa, face); + auto [u_max, v_max, w_max, t_max] = cluster->get_uvwt_max(apa, face); + + u_min -= 5; v_min -=5; w_min-=5; + u_max += 5; v_max +=5; w_max+=5; + t_min -= 20; + t_max += 20; + // std::cout << "U Limits: " << u_min << " " << u_max << std::endl; + // std::cout << "V Limits: " << v_min << " " << v_max << std::endl; + // std::cout << "W Limits: " << w_min << " " << w_max << std::endl; + + // Process each plane (0=U, 1=V, 2=W) + for (int plane = 0; plane < 3; ++plane) { + int wire_min, wire_max, time_min, time_max; + + // Set the wire range based on plane + switch (plane) { + case 0: wire_min = u_min; wire_max = u_max; break; + case 1: wire_min = v_min; wire_max = v_max; break; + case 2: wire_min = w_min; wire_max = w_max; break; + } + time_min = t_min; + time_max = t_max; + + // Get charge information for this plane + auto charge_map = grouping->get_overlap_good_ch_charge( + time_min, time_max, wire_min, wire_max, apa, face, plane); + + // Process each charge entry + for (const auto& [time_wire, charge_data] : charge_map) { + int time_slice = time_wire.first; + int wire_index = time_wire.second; + double charge = charge_data.first; + double charge_err = charge_data.second; + + int channel = fetch_channel_from_anode(apa, face, plane, wire_index); + + // Create key for m_charge_data + CoordReadout data_key(apa, time_slice, channel); + + int flag = 1; // Default flag for all-live-channel case + + // Check for negative charge + if (charge < 0) { + charge = 0; + charge_err = 1000; + flag = 2; + } + + // Save to m_charge_data + m_charge_data[data_key] = {charge, charge_err, flag}; + } + } + } + } + + for (auto& cluster : m_clusters) { + // Get the grouping from the cluster + auto grouping = cluster->grouping(); + // Handle dead channels - loop over all Facade::Blobs in cluster + for (const auto* blob : cluster->children()) { + auto wpid = blob->wpid(); + int apa = wpid.apa(); + int face = wpid.face(); + + // Check each plane for dead channels + for (int plane = 0; plane < 3; ++plane) { + // Check if this plane is bad for this blob + if (grouping->is_blob_plane_bad(blob, plane)) { + // Get blob properties + double blob_charge = blob->charge(); + + // Get wire range for this plane + int wire_min, wire_max; + switch (plane) { + case 0: + wire_min = blob->u_wire_index_min(); + wire_max = blob->u_wire_index_max(); + break; + case 1: + wire_min = blob->v_wire_index_min(); + wire_max = blob->v_wire_index_max(); + break; + case 2: + wire_min = blob->w_wire_index_min(); + wire_max = blob->w_wire_index_max(); + break; + } + + int num_wires = wire_max - wire_min; + if (num_wires <= 0) continue; + + // Get time range + int time_min = blob->slice_index_min(); + // int time_max = blob->slice_index_max(); + + // Process each dead pixel + int time_slice = time_min; + for (int wire_index = wire_min; wire_index < wire_max; ++wire_index) { + int channel = fetch_channel_from_anode(apa, face, plane, wire_index); + CoordReadout data_key(apa, time_slice, channel); + + // Check if content exists + auto it = m_charge_data.find(data_key); + + if (it == m_charge_data.end()) { + // No existing content + double charge = blob_charge / num_wires; + double charge_err = sqrt(pow(charge * m_params.rel_charge_uncer, 2) + pow(m_params.add_charge_uncer, 2)); + m_charge_data[data_key] = {charge, charge_err, 0}; + } else if (it->second.flag == 0) { + // Existing content with flag = 0 + double new_charge = blob_charge / num_wires; + double new_charge_err = sqrt(pow(new_charge * m_params.rel_charge_uncer, 2) + pow(m_params.add_charge_uncer, 2)); + + it->second.charge += new_charge; + it->second.charge_err = sqrt(pow(it->second.charge_err, 2) + pow(new_charge_err, 2)); + } + // If flag != 0, do nothing + } + } + } + } + } + + + // std::cout << "Number of Measurements: " << m_charge_data.size() << std::endl; + // for (const auto& [coord_key, charge_measurement] : m_charge_data) { + // std::cout << "CoordReadout: (APA=" << coord_key.apa + // << ", Time=" << coord_key.time + // << ", Channel=" << coord_key.channel + // << ") -> Charge=" << charge_measurement.charge + // << ", ChargeErr=" << charge_measurement.charge_err + // << ", Flag=" << charge_measurement.flag << std::endl; + // } +} + +void TrackFitting::fill_global_rb_map() { + // Clear the global readout map first + if (global_rb_map.size() != 0 ) return; + + auto clusters = m_grouping->children(); + // Loop over the m_grouping's clusters + for (auto& cluster : clusters) { + // For each cluster, loop over its blobs + if (!cluster->get_scope_filter(cluster->get_default_scope())) continue; + + auto blobs = cluster->children(); + for (auto blob : blobs) { + if (!blob) continue; + + // Get the wire plane ID for this blob to determine apa and face + auto wpid = blob->wpid(); + int apa = wpid.apa(); + int face = wpid.face(); + + // Get the time slice bounds for this blob + int time_slice_min = blob->slice_index_min(); + // int time_slice_max = blob->slice_index_max(); + + // For every blob, loop over its planes (U=0, V=1, W=2) + for (int plane = 0; plane < 3; ++plane) { + if (m_grouping->is_blob_plane_bad(blob, plane)) continue; + + // Get wire bounds for this plane in the blob + int wire_min, wire_max; + switch (plane) { + case 0: // U plane + wire_min = blob->u_wire_index_min(); + wire_max = blob->u_wire_index_max(); + break; + case 1: // V plane + wire_min = blob->v_wire_index_min(); + wire_max = blob->v_wire_index_max(); + break; + case 2: // W plane + wire_min = blob->w_wire_index_min(); + wire_max = blob->w_wire_index_max(); + break; + default: + continue; + } + + // Skip if no valid wire range + if (wire_min >= wire_max) continue; + + // Loop over time slices in this blob + int time_slice = time_slice_min; + // Loop over wire indices in this plane + for (int wire_index = wire_min; wire_index < wire_max; ++wire_index) { + // Convert wire coordinates to channel using existing helper function + int channel = fetch_channel_from_anode(apa, face, plane, wire_index); + if (channel == -1) continue; // Skip invalid channels + + // Create CoordReadout key and find out its CoordReadout + CoordReadout coord_key(apa, time_slice, channel); + + // Fill in global_rb_map - add this blob to the set for this coordinate + global_rb_map[coord_key].insert(blob); + // std::cout << "Added blob to global_rb_map at " << coord_key.apa << " " << coord_key.time << " " << coord_key.channel << std::endl; + } + } + } + } + + std::cout << "Global RB Map filled with " << global_rb_map.size() << " coordinate entries." << std::endl; +} + +// ============================================================================ +// Helper functions for organize_segments_path methods +// ============================================================================ + +void TrackFitting::check_and_reset_close_vertices() { + if (!m_graph) return; + + auto edge_range = boost::edges(*m_graph); + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + auto segment = edge_bundle.segment; + if (!segment) continue; + + // Get vertices connected to this segment + auto vd1 = boost::source(*e_it, *m_graph); + auto vd2 = boost::target(*e_it, *m_graph); + auto& v1_bundle = (*m_graph)[vd1]; + auto& v2_bundle = (*m_graph)[vd2]; + auto start_v = v1_bundle.vertex; + auto end_v = v2_bundle.vertex; + + if (!start_v || !end_v) continue; + + // Determine which vertex corresponds to which end of the segment + const auto& segment_wcpts = segment->wcpts(); + if (segment_wcpts.empty()) continue; + + // Check vertex ordering by comparing with segment endpoints + if (start_v->wcpt().index != segment_wcpts.front().index) { + std::swap(start_v, end_v); + std::swap(vd1, vd2); + } + + // Check if vertices are too close together + double vertex_distance = sqrt( + pow(start_v->fit().point.x() - end_v->fit().point.x(), 2) + + pow(start_v->fit().point.y() - end_v->fit().point.y(), 2) + + pow(start_v->fit().point.z() - end_v->fit().point.z(), 2) + ); + + if (vertex_distance < 0.01 * units::cm) { + // Reset vertices to original points if they are endpoints (degree 1) + if (boost::degree(vd1, *m_graph) == 1) { + PR::Fit start_fit = start_v->fit(); + start_fit.point = start_v->wcpt().point; + start_v->fit(start_fit); + } + if (boost::degree(vd2, *m_graph) == 1) { + PR::Fit end_fit = end_v->fit(); + end_fit.point = end_v->wcpt().point; + end_v->fit(end_fit); + } + } + } +} + +bool TrackFitting::get_ordered_segment_vertices( + std::shared_ptr segment, + const PR::edge_descriptor& ed, + std::shared_ptr& start_v, + std::shared_ptr& end_v, + PR::node_descriptor& vd1, + PR::node_descriptor& vd2 +) { + if (!m_graph || !segment) return false; + + // Get vertices connected to this segment + vd1 = boost::source(ed, *m_graph); + vd2 = boost::target(ed, *m_graph); + auto& v1_bundle = (*m_graph)[vd1]; + auto& v2_bundle = (*m_graph)[vd2]; + start_v = v1_bundle.vertex; + end_v = v2_bundle.vertex; + + if (!start_v || !end_v) return false; + + // Determine which vertex corresponds to which end of the segment + const auto& segment_wcpts = segment->wcpts(); + if (segment_wcpts.empty()) return false; + + // Check vertex ordering by comparing with segment endpoints + if (start_v->wcpt().index != segment_wcpts.front().index) { + std::swap(start_v, end_v); + std::swap(vd1, vd2); + } + + return true; +} + +std::vector TrackFitting::generate_fits_with_projections( + std::shared_ptr segment, + const std::vector& pts +) { + std::vector fits; + if (!segment || pts.empty()) return fits; + + auto cluster = segment->cluster(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + + for (size_t i = 0; i != pts.size(); i++) { + PR::Fit fit; + fit.point = pts.at(i); + fit.dQ = 0; + fit.dx = -1; + fit.reduced_chi2 = 0; + + // Generate 2D projections + auto test_wpid = m_dv->contained_by(pts.at(i)); + if (test_wpid.apa() != -1 && test_wpid.face() != -1) { + int apa = test_wpid.apa(); + int face = test_wpid.face(); + + auto p_raw = transform->backward(pts.at(i), cluster_t0, apa, face); + WirePlaneId wpid(kAllLayers, face, apa); + auto offset_it = wpid_offsets.find(wpid); + auto slope_it = wpid_slopes.find(wpid); + + if (offset_it != wpid_offsets.end() && slope_it != wpid_slopes.end()) { + auto offset_t = std::get<0>(offset_it->second); + auto offset_u = std::get<1>(offset_it->second); + auto offset_v = std::get<2>(offset_it->second); + auto offset_w = std::get<3>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yu = std::get<1>(slope_it->second).first; + auto slope_zu = std::get<1>(slope_it->second).second; + auto slope_yv = std::get<2>(slope_it->second).first; + auto slope_zv = std::get<2>(slope_it->second).second; + auto slope_yw = std::get<3>(slope_it->second).first; + auto slope_zw = std::get<3>(slope_it->second).second; + + fit.pu = offset_u + (slope_yu * p_raw.y() + slope_zu * p_raw.z()); + fit.pv = offset_v + (slope_yv * p_raw.y() + slope_zv * p_raw.z()); + fit.pw = offset_w + (slope_yw * p_raw.y() + slope_zw * p_raw.z()); + fit.pt = offset_t + slope_x * p_raw.x(); + fit.paf = std::make_pair(apa, face); + } + } + + fits.push_back(fit); + } + + return fits; +} + +void TrackFitting::organize_segments_path_3rd(double step_size){ + if (!m_graph) return; + + // First pass: check for vertices that are too close together + check_and_reset_close_vertices(); + + // Second pass: organize segments path with uniform step size + auto edge_range = boost::edges(*m_graph); + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + auto segment = edge_bundle.segment; + if (!segment) continue; + + // Get ordered vertices + std::shared_ptr start_v, end_v; + PR::node_descriptor vd1, vd2; + if (!get_ordered_segment_vertices(segment, *e_it, start_v, end_v, vd1, vd2)) continue; + + // Check if vertices are endpoints (degree == 1) + bool flag_startv_end = (boost::degree(vd1, *m_graph) == 1); + bool flag_endv_end = (boost::degree(vd2, *m_graph) == 1); + + std::vector pts, curr_pts; + + // Get current fitted path from the segment + if (!segment->fits().empty()) { + for (const auto& fit : segment->fits()) { + curr_pts.push_back(fit.point); + } + } else { + // If no fits, use original wcpts + for (const auto& wcpt : segment->wcpts()) { + curr_pts.push_back(wcpt.point); + } + } + + // Examine end points + curr_pts = examine_end_ps_vec(segment, curr_pts, flag_startv_end, flag_endv_end); + + WireCell::Point start_p = curr_pts.front(); + WireCell::Point end_p = curr_pts.back(); + + // Build points with uniform step size + pts.push_back(start_p); + double extra_dis = 0; + + for (size_t i = 0; i != curr_pts.size(); i++) { + WireCell::Point p1 = curr_pts.at(i); + + double dis_end = sqrt(pow(p1.x() - end_p.x(), 2) + pow(p1.y() - end_p.y(), 2) + pow(p1.z() - end_p.z(), 2)); + if (dis_end < step_size) continue; + + double dis_prev = sqrt(pow(p1.x() - pts.back().x(), 2) + pow(p1.y() - pts.back().y(), 2) + pow(p1.z() - pts.back().z(), 2)); + + if (dis_prev + extra_dis > step_size) { + extra_dis += dis_prev; + while (extra_dis > step_size) { + WireCell::Point tmp_p( + pts.back().x() + (p1.x() - pts.back().x()) / dis_prev * step_size, + pts.back().y() + (p1.y() - pts.back().y()) / dis_prev * step_size, + pts.back().z() + (p1.z() - pts.back().z()) / dis_prev * step_size + ); + pts.push_back(tmp_p); + dis_prev = sqrt(pow(p1.x() - pts.back().x(), 2) + pow(p1.y() - pts.back().y(), 2) + pow(p1.z() - pts.back().z(), 2)); + extra_dis -= step_size; + } + } else if (dis_prev + extra_dis < step_size) { + extra_dis += dis_prev; + continue; + } else { + pts.push_back(p1); + extra_dis = 0; + } + } + + // Handle end point properly + { + double dis1 = sqrt(pow(pts.back().x() - end_p.x(), 2) + pow(pts.back().y() - end_p.y(), 2) + pow(pts.back().z() - end_p.z(), 2)); + + if (dis1 < step_size * 0.6) { + if (pts.size() <= 1) { + // Do nothing + } else { + double dis2 = sqrt(pow(pts.back().x() - pts.at(pts.size()-2).x(), 2) + + pow(pts.back().y() - pts.at(pts.size()-2).y(), 2) + + pow(pts.back().z() - pts.at(pts.size()-2).z(), 2)); + double dis3 = (dis1 + dis2) / 2.0; + + WireCell::Point tmp_p( + pts.at(pts.size()-2).x() + (pts.back().x() - pts.at(pts.size()-2).x()) / dis2 * dis3, + pts.at(pts.size()-2).y() + (pts.back().y() - pts.at(pts.size()-2).y()) / dis2 * dis3, + pts.at(pts.size()-2).z() + (pts.back().z() - pts.at(pts.size()-2).z()) / dis2 * dis3 + ); + pts.pop_back(); + pts.push_back(tmp_p); + } + } else if (dis1 > step_size * 1.6) { + int npoints = std::round(dis1 / step_size); + WireCell::Point p_save = pts.back(); + for (int j = 0; j + 1 < npoints; j++) { + WireCell::Point p( + p_save.x() + (end_p.x() - p_save.x()) / npoints * (j + 1), + p_save.y() + (end_p.y() - p_save.y()) / npoints * (j + 1), + p_save.z() + (end_p.z() - p_save.z()) / npoints * (j + 1) + ); + pts.push_back(p); + } + } + + pts.push_back(end_p); + } + + // Ensure there is no single point + if (pts.size() == 1) { + pts.push_back(end_p); + } + + // Generate 2D projections and store fit points in the segment + segment->fits(generate_fits_with_projections(segment, pts)); + } +} + +void TrackFitting::organize_segments_path_2nd(double low_dis_limit, double end_point_limit){ + if (!m_graph) return; + + // First pass: check for vertices that are too close together + check_and_reset_close_vertices(); + + // Second pass: organize segments path with 2D projection + auto edge_range = boost::edges(*m_graph); + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + auto segment = edge_bundle.segment; + if (!segment) continue; + + // Get ordered vertices + std::shared_ptr start_v, end_v; + PR::node_descriptor vd1, vd2; + if (!get_ordered_segment_vertices(segment, *e_it, start_v, end_v, vd1, vd2)) continue; + + // Check if vertices are endpoints (degree == 1) + bool flag_startv_end = (boost::degree(vd1, *m_graph) == 1); + bool flag_endv_end = (boost::degree(vd2, *m_graph) == 1); + + std::vector pts, curr_pts; + + // Get current fitted path from the segment + if (!segment->fits().empty()) { + for (const auto& fit : segment->fits()) { + curr_pts.push_back(fit.point); + } + } else { + // If no fits, use original wcpts + for (const auto& wcpt : segment->wcpts()) { + curr_pts.push_back(wcpt.point); + } + } + + // Examine end points + curr_pts = examine_end_ps_vec(segment, curr_pts, flag_startv_end, flag_endv_end); + + WireCell::Point start_p, end_p; + + // Process start vertex + if (!start_v->fit().flag_fix) { + start_p = curr_pts.front(); + + if (flag_startv_end) { + WireCell::Point p2 = curr_pts.front(); + double dis1 = 0; + for (auto it = curr_pts.begin(); it != curr_pts.end(); it++) { + p2 = *it; + dis1 = sqrt(pow(start_p.x() - p2.x(), 2) + pow(start_p.y() - p2.y(), 2) + pow(start_p.z() - p2.z(), 2)); + if (dis1 > low_dis_limit) break; + } + if (dis1 != 0) { + start_p = WireCell::Point( + start_p.x() + (start_p.x() - p2.x()) / dis1 * end_point_limit, + start_p.y() + (start_p.y() - p2.y()) / dis1 * end_point_limit, + start_p.z() + (start_p.z() - p2.z()) / dis1 * end_point_limit + ); + } + } + + // Set fit point for start vertex + PR::Fit start_fit = start_v->fit(); + start_fit.point = start_p; + start_v->fit(start_fit); + } else { + start_p = start_v->fit().point; + } + + // Process end vertex + if (!end_v->fit().flag_fix) { + end_p = curr_pts.back(); + + if (flag_endv_end) { + WireCell::Point p2 = curr_pts.back(); + double dis1 = 0; + for (auto it = curr_pts.rbegin(); it != curr_pts.rend(); it++) { + p2 = *it; + dis1 = sqrt(pow(end_p.x() - p2.x(), 2) + pow(end_p.y() - p2.y(), 2) + pow(end_p.z() - p2.z(), 2)); + if (dis1 > low_dis_limit) break; + } + if (dis1 != 0) { + end_p = WireCell::Point( + end_p.x() + (end_p.x() - p2.x()) / dis1 * end_point_limit, + end_p.y() + (end_p.y() - p2.y()) / dis1 * end_point_limit, + end_p.z() + (end_p.z() - p2.z()) / dis1 * end_point_limit + ); + } + } + + // Set fit point for end vertex + PR::Fit end_fit = end_v->fit(); + end_fit.point = end_p; + end_v->fit(end_fit); + } else { + end_p = end_v->fit().point; + } + + // Build the middle points + pts.push_back(start_p); + for (size_t i = 0; i != curr_pts.size(); i++) { + WireCell::Point p1 = curr_pts.at(i); + double dis = low_dis_limit; + double dis1 = sqrt(pow(p1.x() - end_p.x(), 2) + pow(p1.y() - end_p.y(), 2) + pow(p1.z() - end_p.z(), 2)); + if (pts.size() > 0) { + dis = sqrt(pow(p1.x() - pts.back().x(), 2) + pow(p1.y() - pts.back().y(), 2) + pow(p1.z() - pts.back().z(), 2)); + } + + if (dis1 < low_dis_limit * 0.8) { + continue; + } else if (dis < low_dis_limit * 0.8) { + continue; + } else if (dis < low_dis_limit * 1.6) { + pts.push_back(p1); + } else { + int npoints = std::round(dis / low_dis_limit); + WireCell::Point p_save = pts.back(); + for (int j = 0; j != npoints; j++) { + WireCell::Point p( + p_save.x() + (p1.x() - p_save.x()) / npoints * (j + 1), + p_save.y() + (p1.y() - p_save.y()) / npoints * (j + 1), + p_save.z() + (p1.z() - p_save.z()) / npoints * (j + 1) + ); + pts.push_back(p); + } + } + } + + // Handle final connection to end point + { + double dis1 = sqrt(pow(pts.back().x() - end_p.x(), 2) + pow(pts.back().y() - end_p.y(), 2) + pow(pts.back().z() - end_p.z(), 2)); + if (dis1 < low_dis_limit * 0.2) { + if (pts.size() > 1) pts.pop_back(); + } else if (dis1 > low_dis_limit * 1.6) { + int npoints = std::round(dis1 / low_dis_limit); + WireCell::Point p_save = pts.back(); + for (int j = 0; j + 1 < npoints; j++) { + WireCell::Point p( + p_save.x() + (end_p.x() - p_save.x()) / npoints * (j + 1), + p_save.y() + (end_p.y() - p_save.y()) / npoints * (j + 1), + p_save.z() + (end_p.z() - p_save.z()) / npoints * (j + 1) + ); + pts.push_back(p); + } + } + pts.push_back(end_p); + } + + // Handle case where only one point exists + if (pts.size() == 1) { + pts.push_back(end_p); + } + + // Generate 2D projections and store fit points in the segment + segment->fits(generate_fits_with_projections(segment, pts)); + } +} + + +void TrackFitting::organize_segments_path(double low_dis_limit, double end_point_limit){ + if (!m_graph) return; + + // Iterate over all edges (segments) in the graph + auto edge_range = boost::edges(*m_graph); + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + auto segment = edge_bundle.segment; + if (!segment) continue; + + // Get ordered vertices + std::shared_ptr start_v, end_v; + PR::node_descriptor vd1, vd2; + if (!get_ordered_segment_vertices(segment, *e_it, start_v, end_v, vd1, vd2)) continue; + + // Check if vertices are endpoints (degree == 1) + bool flag_startv_end = (boost::degree(vd1, *m_graph) == 1); + bool flag_endv_end = (boost::degree(vd2, *m_graph) == 1); + + std::vector pts; + std::vector temp_wcps_vec; + + // Convert WCPoints to Points + for (const auto& wcp : segment->wcpts()) { + temp_wcps_vec.push_back(wcp.point); + } + + WireCell::Point start_p, end_p; + + // Process start vertex + if (!start_v->fit().flag_fix) { + start_p = temp_wcps_vec.front(); + + if (flag_startv_end) { + WireCell::Point p2 = temp_wcps_vec.front(); + double dis1 = 0; + for (auto it = temp_wcps_vec.begin(); it != temp_wcps_vec.end(); it++) { + p2 = *it; + dis1 = sqrt(pow(start_p.x() - p2.x(), 2) + pow(start_p.y() - p2.y(), 2) + pow(start_p.z() - p2.z(), 2)); + if (dis1 > low_dis_limit) break; + } + if (dis1 != 0) { + start_p = WireCell::Point( + start_p.x() + (start_p.x() - p2.x()) / dis1 * end_point_limit, + start_p.y() + (start_p.y() - p2.y()) / dis1 * end_point_limit, + start_p.z() + (start_p.z() - p2.z()) / dis1 * end_point_limit + ); + } + } + + // Set fit point for start vertex + PR::Fit start_fit = start_v->fit(); + start_fit.point = start_p; + start_v->fit(start_fit); + } else { + start_p = start_v->fit().point; + } + + // Process end vertex + if (!end_v->fit().flag_fix) { + end_p = temp_wcps_vec.back(); + + if (flag_endv_end) { + WireCell::Point p2 = temp_wcps_vec.back(); + double dis1 = 0; + for (auto it = temp_wcps_vec.rbegin(); it != temp_wcps_vec.rend(); it++) { + p2 = *it; + dis1 = sqrt(pow(end_p.x() - p2.x(), 2) + pow(end_p.y() - p2.y(), 2) + pow(end_p.z() - p2.z(), 2)); + if (dis1 > low_dis_limit) break; + } + if (dis1 != 0) { + end_p = WireCell::Point( + end_p.x() + (end_p.x() - p2.x()) / dis1 * end_point_limit, + end_p.y() + (end_p.y() - p2.y()) / dis1 * end_point_limit, + end_p.z() + (end_p.z() - p2.z()) / dis1 * end_point_limit + ); + } + } + + // Set fit point for end vertex + PR::Fit end_fit = end_v->fit(); + end_fit.point = end_p; + end_v->fit(end_fit); + } else { + end_p = end_v->fit().point; + } + + // Build the middle points + pts.push_back(start_p); + for (size_t i = 0; i != temp_wcps_vec.size(); i++) { + WireCell::Point p1 = temp_wcps_vec.at(i); + double dis = low_dis_limit; + double dis1 = sqrt(pow(p1.x() - end_p.x(), 2) + pow(p1.y() - end_p.y(), 2) + pow(p1.z() - end_p.z(), 2)); + if (pts.size() > 0) { + dis = sqrt(pow(p1.x() - pts.back().x(), 2) + pow(p1.y() - pts.back().y(), 2) + pow(p1.z() - pts.back().z(), 2)); + } + + if (dis1 < low_dis_limit * 0.8) { + continue; + } else if (dis < low_dis_limit * 0.8) { + continue; + } else if (dis < low_dis_limit * 1.6) { + pts.push_back(p1); + } else { + int npoints = std::round(dis / low_dis_limit); + WireCell::Point p_save = pts.back(); + for (int j = 0; j != npoints; j++) { + WireCell::Point p( + p_save.x() + (p1.x() - p_save.x()) / npoints * (j + 1), + p_save.y() + (p1.y() - p_save.y()) / npoints * (j + 1), + p_save.z() + (p1.z() - p_save.z()) / npoints * (j + 1) + ); + pts.push_back(p); + } + } + } + + // Handle final connection to end point + { + double dis1 = sqrt(pow(pts.back().x() - end_p.x(), 2) + pow(pts.back().y() - end_p.y(), 2) + pow(pts.back().z() - end_p.z(), 2)); + if (dis1 < low_dis_limit * 0.2) { + if (pts.size() > 1) pts.pop_back(); + } else if (dis1 > low_dis_limit * 1.6) { + int npoints = std::round(dis1 / low_dis_limit); + WireCell::Point p_save = pts.back(); + for (int j = 0; j + 1 < npoints; j++) { + WireCell::Point p( + p_save.x() + (end_p.x() - p_save.x()) / npoints * (j + 1), + p_save.y() + (end_p.y() - p_save.y()) / npoints * (j + 1), + p_save.z() + (end_p.z() - p_save.z()) / npoints * (j + 1) + ); + pts.push_back(p); + } + } + pts.push_back(end_p); + } + + // Generate 2D projections and store fit points in the segment + segment->fits(generate_fits_with_projections(segment, pts)); + } +} + +std::vector TrackFitting::organize_orig_path(std::shared_ptr segment, double low_dis_limit, double end_point_limit) { + std::vector pts; + + // Get the WCPoints from the segment + const auto& segment_wcpts = segment->wcpts(); + if (segment_wcpts.empty()) { + return pts; + } + + // Convert WCPoints to vector for easier manipulation + std::vector temp_wcps_vec; + for (const auto& wcp : segment_wcpts) { + temp_wcps_vec.push_back(wcp.point); + } + + // Fill in the beginning point ... + { + WireCell::Point p1 = temp_wcps_vec.front(); + WireCell::Point p2 = temp_wcps_vec.front(); + double dis1 = 0; + for (auto it = temp_wcps_vec.begin(); it != temp_wcps_vec.end(); it++) { + p2 = *it; + dis1 = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); + if (dis1 > low_dis_limit) break; + } + if (dis1 != 0) { + WireCell::Point extended_p1( + p1.x() + (p1.x() - p2.x()) / dis1 * end_point_limit, + p1.y() + (p1.y() - p2.y()) / dis1 * end_point_limit, + p1.z() + (p1.z() - p2.z()) / dis1 * end_point_limit + ); + pts.push_back(extended_p1); + } + } + + // std::cout << "Test b: " << pts.size() << " " << pts.back() << " " << temp_wcps_vec.front() << std::endl; + + // Fill in the middle part + for (size_t i = 0; i != temp_wcps_vec.size(); i++) { + WireCell::Point p1 = temp_wcps_vec.at(i); + + double dis = low_dis_limit; + if (pts.size() > 0) { + dis = sqrt(pow(p1.x() - pts.back().x(), 2) + pow(p1.y() - pts.back().y(), 2) + pow(p1.z() - pts.back().z(), 2)); + } + + if (dis < low_dis_limit * 0.8) { + continue; + } else if (dis < low_dis_limit * 1.6) { + pts.push_back(p1); + } else { + int npoints = std::round(dis / low_dis_limit); + WireCell::Point p_save = pts.back(); + for (int j = 0; j != npoints; j++) { + WireCell::Point p( + p_save.x() + (p1.x() - p_save.x()) / npoints * (j + 1), + p_save.y() + (p1.y() - p_save.y()) / npoints * (j + 1), + p_save.z() + (p1.z() - p_save.z()) / npoints * (j + 1) + ); + pts.push_back(p); + } + } + } + + // std::cout << "Test m: " << pts.size() << " " << pts.back() << " " << temp_wcps_vec.back() << std::endl; + + + // Fill in the end part + { + WireCell::Point p1 = temp_wcps_vec.back(); + WireCell::Point p2 = temp_wcps_vec.back(); + double dis1 = 0; + for (auto it = temp_wcps_vec.rbegin(); it != temp_wcps_vec.rend(); it++) { + p2 = *it; + dis1 = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); + if (dis1 > low_dis_limit) break; + } + if (dis1 != 0) { + WireCell::Point extended_p1( + p1.x() + (p1.x() - p2.x()) / dis1 * end_point_limit, + p1.y() + (p1.y() - p2.y()) / dis1 * end_point_limit, + p1.z() + (p1.z() - p2.z()) / dis1 * end_point_limit + ); + pts.push_back(extended_p1); + } + } + + // std::cout << "Test e: " << pts.size() << " " << pts.back() << " " << temp_wcps_vec.back() << std::endl; + + + return pts; +} + +std::vector TrackFitting::examine_end_ps_vec(std::shared_ptr segment,const std::vector& pts, bool flag_start, bool flag_end) { + std::list ps_list(pts.begin(), pts.end()); + + // get the cluster from the segment + auto cluster = segment->cluster(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + + if (flag_start) { + // test start + WireCell::Point temp_start = ps_list.front(); + while (ps_list.size() > 0) { + // figure out the wpid for ps_list.front() ... + auto test_wpid = m_dv->contained_by(ps_list.front()); + + if (test_wpid.face() != -1 && test_wpid.apa() != -1) { + // this function takes the raw points ... + auto temp_p_raw = transform->backward(ps_list.front(), cluster_t0, test_wpid.face(), test_wpid.apa()); + // std::cout << temp_p_raw << " " << ps_list.front() << " " << test_wpid.apa() << " " << test_wpid.face() << std::endl; + if (m_grouping->is_good_point(temp_p_raw, test_wpid.apa(), test_wpid.face(), 0.2*units::cm, 0, 0)) break; + } + temp_start = ps_list.front(); + ps_list.pop_front(); + } + + if (ps_list.size() > 0) { + double dis_step = 0.2*units::cm; + double temp_dis = sqrt(pow(temp_start.x() - ps_list.front().x(), 2) + pow(temp_start.y() - ps_list.front().y(), 2) + pow(temp_start.z() - ps_list.front().z(), 2)); + int ntest = std::round(temp_dis/dis_step); + for (size_t i = 1; i < ntest; i++) { + WireCell::Point test_p(temp_start.x() + (ps_list.front().x() - temp_start.x())/ntest * i, + temp_start.y() + (ps_list.front().y() - temp_start.y())/ntest * i, + temp_start.z() + (ps_list.front().z() - temp_start.z())/ntest * i); + // figure out the wpid for the test_p ... + auto test_wpid = m_dv->contained_by(test_p); + if (test_wpid.face() != -1 && test_wpid.apa() != -1) { + // this function takes the raw points ... + auto temp_p_raw = transform->backward(test_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + if (m_grouping->is_good_point(temp_p_raw, test_wpid.apa(), test_wpid.face(), 0.2*units::cm, 0, 0)) { + ps_list.push_front(test_p); + break; + } + } + } + } else { + ps_list.push_front(temp_start); + } + } + + if (flag_end) { + WireCell::Point temp_end = ps_list.back(); + while (ps_list.size() > 0) { + // figure out the wpid for the ps_list.back() ... + auto test_wpid = m_dv->contained_by(ps_list.back()); + if (test_wpid.face() != -1 && test_wpid.apa() != -1) { + //this function takes the raw points ... + auto temp_p_raw = transform->backward(ps_list.back(), cluster_t0, test_wpid.face(), test_wpid.apa()); + if (m_grouping->is_good_point(temp_p_raw, test_wpid.apa(), test_wpid.face(), 0.2*units::cm, 0, 0)) break; + } + temp_end = ps_list.back(); + ps_list.pop_back(); + } + if (ps_list.size() > 0) { + double dis_step = 0.2*units::cm; + double temp_dis = sqrt(pow(temp_end.x() - ps_list.back().x(), 2) + pow(temp_end.y() - ps_list.back().y(), 2) + pow(temp_end.z() - ps_list.back().z(), 2)); + int ntest = std::round(temp_dis/dis_step); + for (size_t i = 1; i < ntest; i++) { + WireCell::Point test_p(temp_end.x() + (ps_list.back().x() - temp_end.x())/ntest * i, + temp_end.y() + (ps_list.back().y() - temp_end.y())/ntest * i, + temp_end.z() + (ps_list.back().z() - temp_end.z())/ntest * i); + + auto test_wpid = m_dv->contained_by(test_p); + // figure out the wpid for the test_p ... + if (test_wpid.face() != -1 && test_wpid.apa() != -1) { + // the following function takes raw points ... + auto temp_p_raw = transform->backward(test_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + if (m_grouping->is_good_point(temp_p_raw, test_wpid.apa(), test_wpid.face(), 0.2*units::cm, 0, 0)) { + ps_list.push_back(test_p); + break; + } + } + } + } else { + ps_list.push_back(temp_end); + } + } + + std::vector tmp_pts(ps_list.begin(), ps_list.end()); + return tmp_pts; +} + + +void TrackFitting::organize_ps_path(std::shared_ptr segment, std::vector& pts, double low_dis_limit, double end_point_limit) { + + std::vector ps_vec = examine_end_ps_vec(segment, pts, true, true); + if (ps_vec.size() <= 1) ps_vec = pts; + + pts.clear(); + // fill in the beginning part + { + WireCell::Point p1 = ps_vec.front(); + WireCell::Point p2 = ps_vec.front(); + double dis1 = 0; + for (auto it = ps_vec.begin(); it != ps_vec.end(); it++) { + p2 = *it; + dis1 = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); + if (dis1 > low_dis_limit) break; + } + if (dis1 > low_dis_limit) { + WireCell::Point extended_p1( + p1.x() + (p1.x() - p2.x()) / dis1 * end_point_limit, + p1.y() + (p1.y() - p2.y()) / dis1 * end_point_limit, + p1.z() + (p1.z() - p2.z()) / dis1 * end_point_limit + ); + pts.push_back(extended_p1); + } + } + + // fill in the middle part + for (size_t i = 0; i != ps_vec.size(); i++) { + WireCell::Point p1 = ps_vec.at(i); + double dis; + if (pts.size() != 0) { + dis = sqrt(pow(p1.x() - pts.back().x(), 2) + pow(p1.y() - pts.back().y(), 2) + pow(p1.z() - pts.back().z(), 2)); + } else { + dis = sqrt(pow(p1.x() - ps_vec.back().x(), 2) + pow(p1.y() - ps_vec.back().y(), 2) + pow(p1.z() - ps_vec.back().z(), 2)); + } + + // std::cout << i << " " << dis << " " << low_dis_limit * 0.8 << " " << low_dis_limit * 1.6 << std::endl; + + if (dis < low_dis_limit * 0.8) { + continue; + } else if (dis < low_dis_limit * 1.6) { + pts.push_back(p1); + } else { + int npoints = std::round(dis / low_dis_limit); + WireCell::Point p_save = pts.back(); + for (int j = 0; j != npoints; j++) { + WireCell::Point p( + p_save.x() + (p1.x() - p_save.x()) / npoints * (j + 1), + p_save.y() + (p1.y() - p_save.y()) / npoints * (j + 1), + p_save.z() + (p1.z() - p_save.z()) / npoints * (j + 1) + ); + pts.push_back(p); + } + } + } + + // fill in the end part + if (end_point_limit != 0) { + WireCell::Point p1 = ps_vec.back(); + WireCell::Point p2 = ps_vec.back(); + double dis1 = 0; + for (auto it = ps_vec.rbegin(); it != ps_vec.rend(); it++) { + p2 = *it; + dis1 = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); + if (dis1 > low_dis_limit) break; + } + if (dis1 != 0) { + WireCell::Point extended_p1( + p1.x() + (p1.x() - p2.x()) / dis1 * end_point_limit, + p1.y() + (p1.y() - p2.y()) / dis1 * end_point_limit, + p1.z() + (p1.z() - p2.z()) / dis1 * end_point_limit + ); + pts.push_back(extended_p1); + } + } else { + WireCell::Point p1 = ps_vec.back(); + double dis1 = sqrt(pow(p1.x() - pts.back().x(), 2) + pow(p1.y() - pts.back().y(), 2) + pow(p1.z() - pts.back().z(), 2)); + if (dis1 >= 0.45*units::cm) + pts.push_back(p1); + } + + if (pts.size() <= 1) + pts = ps_vec; +} + + + void TrackFitting::form_point_association(std::shared_ptr segment,WireCell::Point &p, PlaneData& temp_2dut, PlaneData& temp_2dvt, PlaneData& temp_2dwt, double dis_cut, int nlevel, double time_tick_cut ){ + + // Clear previous associations + temp_2dut.associated_2d_points.clear(); + temp_2dvt.associated_2d_points.clear(); + temp_2dwt.associated_2d_points.clear(); + + // Get cluster from segment + auto cluster = segment->cluster(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + // find the raw point ... + + + // Get closest point in cluster and find neighbors using graph + auto closest_result = cluster->get_closest_wcpoint(geo_point_t(p.x(), p.y(), p.z())); + size_t closest_point_index = closest_result.first; + geo_point_t closest_point = closest_result.second; + + double temp_dis = sqrt(pow(closest_point.x() - p.x(), 2) + + pow(closest_point.y() - p.y(), 2) + + pow(closest_point.z() - p.z(), 2)); + // find the WPID for this point ... + WirePlaneId wpid = cluster->wire_plane_id(closest_point_index); + int apa = wpid.apa(); + int face = wpid.face(); + + // Get geometry information from TrackFitting internal data + auto paras = wpid_params.find(wpid); + auto geoms = wpid_geoms.find(wpid); + + double angle_u = std::get<1>(paras->second); + double angle_v = std::get<2>(paras->second); + double angle_w = std::get<3>(paras->second); + + double time_tick_width = std::get<0>(geoms->second); + double pitch_u = std::get<1>(geoms->second); + double pitch_v = std::get<2>(geoms->second); + double pitch_w = std::get<3>(geoms->second); + + // Get wire indices and time slice for current point + WirePlaneId current_wpid = wpid; + int cur_wire_u = cluster->wire_index(closest_point_index, 0); + int cur_wire_v = cluster->wire_index(closest_point_index, 1); + int cur_wire_w = cluster->wire_index(closest_point_index, 2); + int cur_time_slice = cluster->blob_with_point(closest_point_index)->slice_index_min(); + int cur_ntime_ticks = cluster->blob_with_point(closest_point_index)->slice_index_max() - cur_time_slice; + + if (temp_dis < dis_cut){ + + // auto p_raw = transform->backward(p, cluster_t0, apa, face); + // std::cout << "WirePlaneId: " << wpid << ", Angles: (" << angle_u << ", " << angle_v << ", " << angle_w << ")" << " " << time_tick_width/units::cm << " " << pitch_u/units::cm << " " << pitch_v/units::cm << " " << pitch_w/units::cm << std::endl; + // Get graph algorithms interface + // auto cached_gas = cluster->get_cached_graph_algorithms(); + // for (auto ga: cached_gas){ + // std::cout << "GraphAlgorithm name: " << ga << std::endl; + // } + + const auto& ga = cluster->graph_algorithms("basic_pid"); + //Find nearby points using graph traversal + auto total_vertices_found = ga.find_neighbors_nlevel(closest_point_index, nlevel); + // std::cout << "Neighbors: " << closest_point_index << " " << total_vertices_found.size() << std::endl; + + // Collect nearby blobs and their properties + std::set nearby_blobs_set; + for (auto vertex_idx : total_vertices_found) { + const Facade::Blob* blob = cluster->blob_with_point(vertex_idx); + if (blob) { + auto blob_wpid = blob->wpid(); + if (blob_wpid.apa()==apa && blob_wpid.face()==face) + nearby_blobs_set.insert(blob); + } + // // print out the distance between the vertex_idx and the original point + // geo_point_t vertex_point = cluster->point3d(vertex_idx); + // double distance = sqrt(pow(vertex_point.x() - p.x(), 2) + + // pow(vertex_point.y() - p.y(), 2) + + // pow(vertex_point.z() - p.z(), 2)); + // std::cout << "Vertex " << vertex_idx << " distance to original point: " + // << distance/units::cm << " cm" << std::endl; + } + + + // std::cout << nearby_blobs_set.size() << " nearby blobs found for point " << cur_time_slice << " " << cur_wire_u << " " << cur_wire_v << " " << cur_wire_w << " " << dis_cut << std::endl; + + // Calculate adaptive distance cuts for each plane + double dis_cut_u = dis_cut; + double dis_cut_v = dis_cut; + double dis_cut_w = dis_cut; + double max_time_slice_u = 0, max_time_slice_v = 0, max_time_slice_w = 0; + + // Find maximum time slice differences for adaptive cuts + for (const auto* blob : nearby_blobs_set) { + int this_time_slice = blob->slice_index_min(); + + // Check U plane + if (cur_wire_u >= blob->u_wire_index_min()-1 && cur_wire_u < blob->u_wire_index_max() + 1) { + max_time_slice_u = std::max(max_time_slice_u, static_cast(abs(this_time_slice - cur_time_slice))); + } + + // Check V plane + if (cur_wire_v >= blob->v_wire_index_min()-1 && cur_wire_v < blob->v_wire_index_max() + 1) { + max_time_slice_v = std::max(max_time_slice_v, static_cast(abs(this_time_slice - cur_time_slice))); + } + + // Check W plane + if (cur_wire_w >= blob->w_wire_index_min()-1 && cur_wire_w < blob->w_wire_index_max() + 1) { + max_time_slice_w = std::max(max_time_slice_w, static_cast(abs(this_time_slice - cur_time_slice))); + } + } + + // Update distance cuts based on time slice spans + if (max_time_slice_u * time_tick_width * 1.2 < dis_cut_u) + dis_cut_u = max_time_slice_u * time_tick_width * 1.2; + if (max_time_slice_v * time_tick_width * 1.2 < dis_cut_v) + dis_cut_v = max_time_slice_v * time_tick_width * 1.2; + if (max_time_slice_w * time_tick_width * 1.2 < dis_cut_w) + dis_cut_w = max_time_slice_w * time_tick_width * 1.2; + + // std::cout << dis_cut_u << " " << dis_cut_v << " " << dis_cut_w << std::endl; + + // Process each nearby blob for wire range calculations + for (const auto* blob : nearby_blobs_set) { + int this_time_slice = blob->slice_index_min(); + + // std::cout << "Blob info: " << blob->u_wire_index_min() << " " << blob->u_wire_index_max() << " " << blob->v_wire_index_min() << " " << blob->v_wire_index_max() << " " << blob->w_wire_index_min() << " " << blob->w_wire_index_max() << " " << this_time_slice << std::endl; + + // Calculate remaining distance cuts accounting for time offset + double rem_dis_sq_cut_u = pow(dis_cut_u, 2) - pow((cur_time_slice - this_time_slice) * time_tick_width, 2); + double rem_dis_sq_cut_v = pow(dis_cut_v, 2) - pow((cur_time_slice - this_time_slice) * time_tick_width, 2); + double rem_dis_sq_cut_w = pow(dis_cut_w, 2) - pow((cur_time_slice - this_time_slice) * time_tick_width, 2); + + // std::cout << rem_dis_cut_u << " " << rem_dis_cut_v << " " << rem_dis_cut_w << " " << cur_time_slice << " " < 0 || rem_dis_sq_cut_v > 0 || rem_dis_sq_cut_w > 0) && abs(cur_time_slice - this_time_slice) <= time_tick_cut) { + + // Calculate minimum wire distances + float min_u_dis, min_v_dis, min_w_dis; + + // U wire distance + if (cur_wire_u < blob->u_wire_index_min()) { + min_u_dis = blob->u_wire_index_min() - cur_wire_u; + } else if (cur_wire_u < blob->u_wire_index_max()) { + min_u_dis = 0; + } else { + min_u_dis = cur_wire_u - blob->u_wire_index_max()+1; + } + + // V wire distance + if (cur_wire_v < blob->v_wire_index_min()) { + min_v_dis = blob->v_wire_index_min() - cur_wire_v; + } else if (cur_wire_v < blob->v_wire_index_max()) { + min_v_dis = 0; + } else { + min_v_dis = cur_wire_v - blob->v_wire_index_max()+1; + } + + // W wire distance + if (cur_wire_w < blob->w_wire_index_min()) { + min_w_dis = blob->w_wire_index_min() - cur_wire_w; + } else if (cur_wire_w < blob->w_wire_index_max()) { + min_w_dis = 0; + } else { + min_w_dis = cur_wire_w - blob->w_wire_index_max()+1; + } + + // Use the dedicated calculate_ranges_simplified function + float range_sq_u, range_sq_v, range_sq_w; + WireCell::Clus::TrackFittingUtil::calculate_ranges_simplified( + angle_u, angle_v, angle_w, + rem_dis_sq_cut_u, rem_dis_sq_cut_v, rem_dis_sq_cut_w, + min_u_dis, min_v_dis, min_w_dis, + pitch_u, pitch_v, pitch_w, + range_sq_u, range_sq_v, range_sq_w); + + // std::cout << "Cuts: " << range_sq_u << " " << range_sq_v << " " << range_sq_w << std::endl; + + + // If all ranges are positive, add wire indices to associations + if (range_sq_u > 0 && range_sq_v > 0 && range_sq_w > 0) { + // Calculate wire limits + float low_u_limit = cur_wire_u - sqrt(range_sq_u) / pitch_u; + float high_u_limit = cur_wire_u + sqrt(range_sq_u) / pitch_u; + float low_v_limit = cur_wire_v - sqrt(range_sq_v) / pitch_v; + float high_v_limit = cur_wire_v + sqrt(range_sq_v) / pitch_v; + float low_w_limit = cur_wire_w - sqrt(range_sq_w) / pitch_w; + float high_w_limit = cur_wire_w + sqrt(range_sq_w) / pitch_w; + + // std::cout << low_u_limit << " " << high_u_limit << " " + // << low_v_limit << " " << high_v_limit << " " + // << low_w_limit << " " << high_w_limit << " " << this_time_slice << std::endl; + + // Add U plane associations + for (int j = std::round(low_u_limit); j <= std::round(high_u_limit); j++) { + Coord2D coord(current_wpid.apa(), current_wpid.face(), + this_time_slice, j, + get_channel_for_wire(current_wpid.apa(), current_wpid.face(), 0, j), + WirePlaneLayer_t::kUlayer); + temp_2dut.associated_2d_points.insert(coord); + } + + // Add V plane associations + for (int j = std::round(low_v_limit); j <= std::round(high_v_limit); j++) { + Coord2D coord(current_wpid.apa(), current_wpid.face(), + this_time_slice, j, + get_channel_for_wire(current_wpid.apa(), current_wpid.face(), 1, j), + WirePlaneLayer_t::kVlayer); + temp_2dvt.associated_2d_points.insert(coord); + } + + // Add W plane associations + for (int j = std::round(low_w_limit); j <= std::round(high_w_limit); j++) { + Coord2D coord(current_wpid.apa(), current_wpid.face(), + this_time_slice, j, + get_channel_for_wire(current_wpid.apa(), current_wpid.face(), 2, j), + WirePlaneLayer_t::kWlayer); + temp_2dwt.associated_2d_points.insert(coord); + } + } + } + } + } + + // std::cout << "Pixels: " << temp_2dut.associated_2d_points.size() << " " << temp_2dvt.associated_2d_points.size() << " " << temp_2dwt.associated_2d_points.size() << std::endl; + + + // Steiner Tree ... + if (cluster->has_graph("steiner_graph") && cluster->has_pc("steiner_pc")) { + auto graph_name = "steiner_graph"; + auto pc_name = "steiner_pc"; + const auto& steiner_pc = cluster->get_pc(pc_name); + const auto& coords = cluster->get_default_scope().coords; + const auto& x_coords = steiner_pc.get(coords.at(0))->elements(); + const auto& y_coords = steiner_pc.get(coords.at(1))->elements(); + const auto& z_coords = steiner_pc.get(coords.at(2))->elements(); + const auto& wpid_array = steiner_pc.get("wpid")->elements(); + + + auto steiner_search_result = cluster->kd_steiner_knn(1, p); + auto steiner_search_point = cluster->kd_steiner_points(steiner_search_result); + + size_t closest_point_index = steiner_search_result.front().first; + auto closest_point = steiner_search_point.front().first; + auto closest_point_wpid = steiner_search_point.front().second.first; + + // + // std::cout << closest_point_index << " " << closest_point << " " << p << " " << closest_point_wpid << " " << test << std::endl; + + double temp_dis = sqrt(pow(closest_point.x() - p.x(), 2) + + pow(closest_point.y() - p.y(), 2) + + pow(closest_point.z() - p.z(), 2)); + + // std::cout << "Steiner " << temp_dis << " " << dis_cut << " " <graph_algorithms(graph_name); + + // // Print Steiner graph statistics + // const auto& graph_steiner = cluster->get_graph("steiner_graph"); + // std::cout << "Steiner graph: vertices = " << boost::num_vertices(graph_steiner) + // << ", edges = " << boost::num_edges(graph_steiner) << std::endl; + + // Find nearby points using graph traversal (equivalent to original nested loop) + auto total_vertices_found = ga.find_neighbors_nlevel(closest_point_index, nlevel); + + // find the raw point ... + auto closest_point_raw = transform->backward(closest_point, cluster_t0, apa, face); + + // std::cout << p << " " << closest_point << " " << closest_point_raw << std::endl; + + auto cur_u = m_grouping->convert_3Dpoint_time_ch(closest_point_raw, apa, face, 0); + auto cur_v = m_grouping->convert_3Dpoint_time_ch(closest_point_raw, apa, face, 1); + auto cur_w = m_grouping->convert_3Dpoint_time_ch(closest_point_raw, apa, face, 2); + + int cur_time_slice = std::floor(std::get<0>(cur_u)/cur_ntime_ticks)*cur_ntime_ticks; + int cur_wire_u = std::get<1>(cur_u); + int cur_wire_v = std::get<1>(cur_v); + int cur_wire_w = std::get<1>(cur_w); + + // std::cout << "B: " << cluster_t0 << " " << std::get<0>(cur_u) << " " << cur_time_slice << " " << cur_wire_u << " " << cur_wire_v << " " << cur_wire_w << " " << total_vertices_found.size() << " " << nlevel << std::endl; + + // Calculate adaptive distance cuts (equivalent to original max_time_slice_u/v/w calculation) + double dis_cut_u = dis_cut; + double dis_cut_v = dis_cut; + double dis_cut_w = dis_cut; + + double max_time_slice_u = 0; + double max_time_slice_v = 0; + double max_time_slice_w = 0; + + std::map > map_time_wires; + + // std::map> map_vertex_info; + // Collect point indices + for (auto vertex_idx : total_vertices_found) { + auto vertex_wpid = wpid_array[vertex_idx]; + // std::cout << vertex_wpid << " " << std::endl; + + if (vertex_wpid.apa() != apa || vertex_wpid.face() != face) continue; + + // Handle points not associated with blobs + geo_point_t vertex_point = {x_coords[vertex_idx], + y_coords[vertex_idx], + z_coords[vertex_idx]}; + + auto vertex_point_raw = transform->backward(vertex_point, cluster_t0, apa, face); + auto vertex_u = m_grouping->convert_3Dpoint_time_ch(vertex_point_raw, apa, face, 0); + auto vertex_v = m_grouping->convert_3Dpoint_time_ch(vertex_point_raw, apa, face, 1); + auto vertex_w = m_grouping->convert_3Dpoint_time_ch(vertex_point_raw, apa, face, 2); + + int vertex_time_slice = std::floor(std::get<0>(vertex_u)/cur_ntime_ticks)*cur_ntime_ticks; + int vertex_wire_u = std::get<1>(vertex_u); + int vertex_wire_v = std::get<1>(vertex_v); + int vertex_wire_w = std::get<1>(vertex_w); + + int umin = vertex_wire_u, umax = vertex_wire_u+1; + int vmin = vertex_wire_v, vmax = vertex_wire_v+1; + int wmin = vertex_wire_w, wmax = vertex_wire_w+1; + auto it = map_time_wires.find(vertex_time_slice); + if (it == map_time_wires.end()) { + // No entry yet, insert new boundaries + map_time_wires[vertex_time_slice] = std::make_tuple(umin, umax, vmin, vmax, wmin, wmax); + } else { + // Update boundaries if needed + auto& tup = it->second; + std::get<0>(tup) = std::min(std::get<0>(tup), umin); + std::get<1>(tup) = std::max(std::get<1>(tup), umax); + std::get<2>(tup) = std::min(std::get<2>(tup), vmin); + std::get<3>(tup) = std::max(std::get<3>(tup), vmax); + std::get<4>(tup) = std::min(std::get<4>(tup), wmin); + std::get<5>(tup) = std::max(std::get<5>(tup), wmax); + } + } + + for (const auto& [vertex_time_slice, wire_ranges] : map_time_wires) { + int umin = std::get<0>(wire_ranges); + int umax = std::get<1>(wire_ranges); + int vmin = std::get<2>(wire_ranges); + int vmax = std::get<3>(wire_ranges); + int wmin = std::get<4>(wire_ranges); + int wmax = std::get<5>(wire_ranges); + + for (auto vertex_wire_u = umin; vertex_wire_u < umax; ++vertex_wire_u) { + // Check U, V, W wire proximity + if (abs(vertex_wire_u - cur_wire_u)*pitch_u<=dis_cut) { + if (abs(vertex_time_slice - cur_time_slice) > max_time_slice_u) + max_time_slice_u = abs(vertex_time_slice - cur_time_slice); + } + } + for (auto vertex_wire_v = vmin; vertex_wire_v < vmax; ++vertex_wire_v) { + // Check U, V, W wire proximity + if (abs(vertex_wire_v - cur_wire_v)*pitch_v<=dis_cut) { + if (abs(vertex_time_slice - cur_time_slice) > max_time_slice_v) + max_time_slice_v = abs(vertex_time_slice - cur_time_slice); + } + } + for (auto vertex_wire_w = wmin; vertex_wire_w < wmax; ++vertex_wire_w) { + // Check U, V, W wire proximity + if (abs(vertex_wire_w - cur_wire_w)*pitch_w<=dis_cut) { + if (abs(vertex_time_slice - cur_time_slice) > max_time_slice_w) + max_time_slice_w = abs(vertex_time_slice - cur_time_slice); + } + } + } + + // Apply adaptive cuts (equivalent to original adaptive cut logic) + if (max_time_slice_u * time_tick_width * 1.2 < dis_cut_u) + dis_cut_u = max_time_slice_u * time_tick_width * 1.2; + if (max_time_slice_v * time_tick_width * 1.2 < dis_cut_v) + dis_cut_v = max_time_slice_v * time_tick_width * 1.2; + if (max_time_slice_w * time_tick_width * 1.2 < dis_cut_w) + dis_cut_w = max_time_slice_w * time_tick_width * 1.2; + + // std::cout << "Steiner dis: " << dis_cut_u << " " << dis_cut_v << " " << dis_cut_w << " " << std::endl; + + // Process each nearby blob for wire range calculations (equivalent to final loop) + for (const auto& [vertex_time_slice, wire_ranges] : map_time_wires) { + int umin = std::get<0>(wire_ranges); + int umax = std::get<1>(wire_ranges); + int vmin = std::get<2>(wire_ranges); + int vmax = std::get<3>(wire_ranges); + int wmin = std::get<4>(wire_ranges); + int wmax = std::get<5>(wire_ranges); + + + // Calculate remaining distance cuts accounting for time offset + double rem_dis_cut_u = pow(dis_cut_u, 2) - pow((cur_time_slice - vertex_time_slice) * time_tick_width, 2); + double rem_dis_cut_v = pow(dis_cut_v, 2) - pow((cur_time_slice - vertex_time_slice) * time_tick_width, 2); + double rem_dis_cut_w = pow(dis_cut_w, 2) - pow((cur_time_slice - vertex_time_slice) * time_tick_width, 2); + + // std::cout << rem_dis_cut_u << " " << rem_dis_cut_v << " " << rem_dis_cut_w << " " << std::endl; + + if ((rem_dis_cut_u > 0 || rem_dis_cut_v > 0 || rem_dis_cut_w > 0) && abs(cur_time_slice - vertex_time_slice) <= time_tick_cut) { + + // Calculate minimum wire distances (equivalent to original min_u/v/w_dis calculation) + float min_u_dis, min_v_dis, min_w_dis; + + // U wire distance + if (cur_wire_u < umin) { + min_u_dis = umin - cur_wire_u; + } else if (cur_wire_u >= umax) { + min_u_dis = cur_wire_u - umax + 1; + } else { + min_u_dis = 0; + } + + // V wire distance + if (cur_wire_v < vmin) { + min_v_dis = vmin - cur_wire_v; + } else if (cur_wire_v >= vmax) { + min_v_dis = cur_wire_v - vmax + 1; + } else { + min_v_dis = 0; + } + + // W wire distance + if (cur_wire_w < wmin) { + min_w_dis = wmin - cur_wire_w; + } else if (cur_wire_w >= wmax) { + min_w_dis = cur_wire_w - wmax + 1; + } else { + min_w_dis = 0; + } + + // Use the dedicated calculate_ranges_simplified function (replaces original range calculation) + float range_u, range_v, range_w; + WireCell::Clus::TrackFittingUtil::calculate_ranges_simplified( + angle_u, angle_v, angle_w, + rem_dis_cut_u, rem_dis_cut_v, rem_dis_cut_w, + min_u_dis, min_v_dis, min_w_dis, + pitch_u, pitch_v, pitch_w, + range_u, range_v, range_w); + + // std::cout << min_u_dis << " " << min_v_dis << " " << min_w_dis << " " << range_u << " " << range_v << " " << range_w << std::endl; + + // If all ranges are positive, add wire indices to associations + if (range_u > 0 && range_v > 0 && range_w > 0) { + // Calculate wire limits (equivalent to original low/high_limit calculations) + float low_u_limit = cur_wire_u - sqrt(range_u) / pitch_u; + float high_u_limit = cur_wire_u + sqrt(range_u) / pitch_u; + float low_v_limit = cur_wire_v - sqrt(range_v) / pitch_v; + float high_v_limit = cur_wire_v + sqrt(range_v) / pitch_v; + float low_w_limit = cur_wire_w - sqrt(range_w) / pitch_w; + float high_w_limit = cur_wire_w + sqrt(range_w) / pitch_w; + + // Add U plane associations (equivalent to temp_2dut.insert) + for (int j = std::round(low_u_limit); j <= std::round(high_u_limit); j++) { + Coord2D coord(apa, face, + vertex_time_slice, j, + get_channel_for_wire(apa, face, 0, j), + WirePlaneLayer_t::kUlayer); + temp_2dut.associated_2d_points.insert(coord); + } + // Add V + for (int j = std::round(low_v_limit); j <= std::round(high_v_limit); j++) { + Coord2D coord(apa, face, + vertex_time_slice, j, + get_channel_for_wire(apa, face, 1, j), + WirePlaneLayer_t::kVlayer); + temp_2dvt.associated_2d_points.insert(coord); + } + + // Add W + for (int j = std::round(low_w_limit); j <= std::round(high_w_limit); j++) { + Coord2D coord(apa, face, + vertex_time_slice, j, + get_channel_for_wire(apa, face, 2, j), + WirePlaneLayer_t::kWlayer); + temp_2dwt.associated_2d_points.insert(coord); + } + } + } + } + } + } + + + // std::cout << "Pixels 1: " << temp_2dut.associated_2d_points.size() << " " << temp_2dvt.associated_2d_points.size() << " " << temp_2dwt.associated_2d_points.size() << std::endl; + + // Fallback: simple projection if no associations were found from complex method + if (temp_2dut.associated_2d_points.size() == 0 && + temp_2dvt.associated_2d_points.size() == 0 && + temp_2dwt.associated_2d_points.size() == 0) { + + // Convert time_tick_cut to integer for iteration + int time_cut = static_cast(time_tick_cut); + int wire_cut = time_cut/cur_ntime_ticks; + + // Simple diamond pattern projection around current point + for (int i = -wire_cut; i <= wire_cut; i++) { + // loop over time dimension ... + for (int j = -time_cut; j <= time_cut; j+=cur_ntime_ticks) { + if (abs(i*cur_ntime_ticks) + abs(j) <= time_cut) { + // U plane projection + Coord2D coord_u(apa, face, cur_time_slice + j, cur_wire_u + i, + get_channel_for_wire(apa, face, 0, cur_wire_u + i), + kUlayer); + temp_2dut.associated_2d_points.insert(coord_u); + + // V plane projection + Coord2D coord_v(apa, face, cur_time_slice + j, cur_wire_v + i, + get_channel_for_wire(apa, face, 1, cur_wire_v + i), + kVlayer); + temp_2dvt.associated_2d_points.insert(coord_v); + + // W plane projection + Coord2D coord_w(apa, face, cur_time_slice + j, cur_wire_w + i, + get_channel_for_wire(apa, face, 2, cur_wire_w + i), + kWlayer); + temp_2dwt.associated_2d_points.insert(coord_w); + } + } + } + } + + // std::cout << "Pixels 2: " << temp_2dut.associated_2d_points.size() << " " << temp_2dvt.associated_2d_points.size() << " " << temp_2dwt.associated_2d_points.size() << std::endl; + + + } + +void TrackFitting::update_association(std::shared_ptr segment, PlaneData& temp_2dut, PlaneData& temp_2dvt, PlaneData& temp_2dwt){ + if (!m_graph || !segment) return; + + // Get cluster and transformation info + auto cluster = segment->cluster(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + // double cluster_t0 = cluster->get_cluster_t0(); + + // Collect all segments from the graph for comparison + std::vector> all_segments; + auto edge_range = boost::edges(*m_graph); + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + if (edge_bundle.segment && edge_bundle.segment != segment) { + all_segments.push_back(edge_bundle.segment); + } + } + + // Process U plane (plane 0) + std::set save_2dut; + for (auto it = temp_2dut.associated_2d_points.begin(); it != temp_2dut.associated_2d_points.end(); it++) { + const auto& coord = *it; + + // Convert 2D coordinates to 3D point using existing geometry infrastructure + // Get APA/face info from the coordinate + int apa = coord.apa; + int face = coord.face; + + // Create a test point in raw coordinates + WirePlaneId wpid(kUlayer, face, apa); + auto offset_it = wpid_offsets.find(WirePlaneId(kAllLayers, face, apa)); + auto slope_it = wpid_slopes.find(WirePlaneId(kAllLayers, face, apa)); + + if (offset_it == wpid_offsets.end() || slope_it == wpid_slopes.end()) continue; + + auto offset_t = std::get<0>(offset_it->second); + auto offset_u = std::get<1>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yu = std::get<1>(slope_it->second).first; + // auto slope_zu = std::get<1>(slope_it->second).second; + + // Convert wire/time coordinates to 3D position + double raw_x = (coord.time - offset_t) / slope_x; + double raw_y = (coord.wire - offset_u)/slope_yu; + + // Create a test point (simplified - may need proper wire geometry) + WireCell::Point test_point(raw_x, raw_y, 0); // Y,Z will be determined by wire geometry + + // Get distances to main segment and all other segments + auto main_distances = segment_get_closest_2d_distances(segment, test_point, apa, face, "fit"); + double min_dis_track = std::get<0>(main_distances); // U plane distance + + double min_dis1_track = 1e9; + for (const auto& other_seg : all_segments) { + auto other_distances = segment_get_closest_2d_distances(other_seg, test_point, apa, face, "fit"); + double temp_dis = std::get<0>(other_distances); // U plane distance + if (temp_dis < min_dis1_track) { + min_dis1_track = temp_dis; + } + } + + // Apply selection criteria + if (min_dis_track < min_dis1_track || min_dis_track < 0.3 * units::cm) { + save_2dut.insert(*it); + } + } + + // Process V plane (plane 1) + std::set save_2dvt; + for (auto it = temp_2dvt.associated_2d_points.begin(); it != temp_2dvt.associated_2d_points.end(); it++) { + const auto& coord = *it; + + int apa = coord.apa; + int face = coord.face; + + WirePlaneId wpid(kVlayer, face, apa); + auto offset_it = wpid_offsets.find(WirePlaneId(kAllLayers, face, apa)); + auto slope_it = wpid_slopes.find(WirePlaneId(kAllLayers, face, apa)); + + if (offset_it == wpid_offsets.end() || slope_it == wpid_slopes.end()) continue; + + auto offset_t = std::get<0>(offset_it->second); + auto offset_v = std::get<2>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yv = std::get<2>(slope_it->second).first; + + double raw_x = (coord.time - offset_t) / slope_x; + double raw_y = (coord.wire - offset_v)/slope_yv; + WireCell::Point test_point(raw_x, raw_y, 0); + + auto main_distances = segment_get_closest_2d_distances(segment, test_point, apa, face, "fit"); + double min_dis_track = std::get<1>(main_distances); // V plane distance + + double min_dis1_track = 1e9; + for (const auto& other_seg : all_segments) { + auto other_distances = segment_get_closest_2d_distances(other_seg, test_point, apa, face, "fit"); + double temp_dis = std::get<1>(other_distances); // V plane distance + if (temp_dis < min_dis1_track) { + min_dis1_track = temp_dis; + } + } + + if (min_dis_track < min_dis1_track || min_dis_track < 0.3 * units::cm) { + save_2dvt.insert(*it); + } + } + + // Process W plane (plane 2) + std::set save_2dwt; + for (auto it = temp_2dwt.associated_2d_points.begin(); it != temp_2dwt.associated_2d_points.end(); it++) { + const auto& coord = *it; + + int apa = coord.apa; + int face = coord.face; + + WirePlaneId wpid(kWlayer, face, apa); + auto offset_it = wpid_offsets.find(WirePlaneId(kAllLayers, face, apa)); + auto slope_it = wpid_slopes.find(WirePlaneId(kAllLayers, face, apa)); + + if (offset_it == wpid_offsets.end() || slope_it == wpid_slopes.end()) continue; + + auto offset_t = std::get<0>(offset_it->second); + auto offset_w = std::get<3>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yw = std::get<3>(slope_it->second).first; + + double raw_x = (coord.time - offset_t) / slope_x; + double raw_y = (coord.wire - offset_w)/slope_yw; + WireCell::Point test_point(raw_x, raw_y, 0); + + auto main_distances = segment_get_closest_2d_distances(segment, test_point, apa, face, "fit"); + double min_dis_track = std::get<2>(main_distances); // W plane distance + + double min_dis1_track = 1e9; + for (const auto& other_seg : all_segments) { + auto other_distances = segment_get_closest_2d_distances(other_seg, test_point, apa, face, "fit"); + double temp_dis = std::get<2>(other_distances); // W plane distance + if (temp_dis < min_dis1_track) { + min_dis1_track = temp_dis; + } + } + + if (min_dis_track < min_dis1_track || min_dis_track < 0.3 * units::cm) { + save_2dwt.insert(*it); + } + } + + // Update the input plane data with filtered results + temp_2dut.associated_2d_points = save_2dut; + temp_2dvt.associated_2d_points = save_2dvt; + temp_2dwt.associated_2d_points = save_2dwt; +} + + + void TrackFitting::examine_point_association(std::shared_ptr segment, WireCell::Point &p, PlaneData& temp_2dut, PlaneData& temp_2dvt, PlaneData& temp_2dwt, bool flag_end_point, double charge_cut){ + + // Get cluster from segment + auto cluster = segment->cluster(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + + auto first_blob = segment->cluster()->children()[0]; + int cur_ntime_ticks = first_blob->slice_index_max() - first_blob->slice_index_min(); + + // find the apa and face ... + auto wpid = m_dv->contained_by(p); + int apa = wpid.apa(); + int face = wpid.face(); + + if (apa == -1 || face == -1) return; + + // // Convert 3D point to wire/time coordinates for each plane + geo_point_t p_raw = transform->backward(p, cluster_t0, apa, face); + auto [time_u, wire_u] = m_grouping->convert_3Dpoint_time_ch(p_raw, apa, face, 0); + auto [time_v, wire_v] = m_grouping->convert_3Dpoint_time_ch(p_raw, apa, face, 1); + auto [time_w, wire_w] = m_grouping->convert_3Dpoint_time_ch(p_raw, apa, face, 2); + + std::set temp_types_u; + std::set temp_types_v; + std::set temp_types_w; + + std::set saved_2dut; + std::set saved_2dvt; + std::set saved_2dwt; + + std::vector results; + results.resize(3,0); + + // Process U plane + for (auto it = temp_2dut.associated_2d_points.begin(); it != temp_2dut.associated_2d_points.end(); it++){ + CoordReadout coord_key(it->apa, it->time, it->channel); + auto charge_it = m_charge_data.find(coord_key); + if (charge_it != m_charge_data.end() && charge_it->second.charge > charge_cut) { + temp_types_u.insert(charge_it->second.flag); + if (charge_it->second.flag == 0) results.at(0)++; + saved_2dut.insert(*it); + } + } + + // Process V plane + for (auto it = temp_2dvt.associated_2d_points.begin(); it != temp_2dvt.associated_2d_points.end(); it++){ + CoordReadout coord_key(it->apa, it->time, it->channel); + auto charge_it = m_charge_data.find(coord_key); + // std::cout << "V: " << it->time/4 << " " << it->channel << " " << std::endl; + if (charge_it != m_charge_data.end() && charge_it->second.charge > charge_cut) { + // std::cout << "V: " << it->time/4 << " " << it->channel << " " << charge_it->second.charge << " " << charge_cut << std::endl; + + temp_types_v.insert(charge_it->second.flag); + if (charge_it->second.flag == 0) results.at(1)++; + saved_2dvt.insert(*it); + } + } + + // Process W plane + for (auto it = temp_2dwt.associated_2d_points.begin(); it != temp_2dwt.associated_2d_points.end(); it++){ + CoordReadout coord_key(it->apa, it->time, it->channel); + auto charge_it = m_charge_data.find(coord_key); + if (charge_it != m_charge_data.end() && charge_it->second.charge > charge_cut) { + temp_types_w.insert(charge_it->second.flag); + if (charge_it->second.flag == 0) results.at(2)++; + saved_2dwt.insert(*it); + } + } + + // Calculate quality ratios + if (temp_2dut.associated_2d_points.size() != 0) + results.at(0) = (saved_2dut.size() - results.at(0)*1.0)/temp_2dut.associated_2d_points.size(); + else + results.at(0) = 0; + + if (temp_2dvt.associated_2d_points.size() != 0) + results.at(1) = (saved_2dvt.size() - results.at(1)*1.0)/temp_2dvt.associated_2d_points.size(); + else + results.at(1) = 0; + + if (temp_2dwt.associated_2d_points.size() != 0) + results.at(2) = (saved_2dwt.size() - results.at(2)*1.0)/temp_2dwt.associated_2d_points.size(); + else + results.at(2) = 0; + + // Reset if only flag 0 found + if (temp_types_u.find(0) != temp_types_u.end() && temp_types_u.size() == 1){ + saved_2dut.clear(); + results.at(0) = 0; + } + if (temp_types_v.find(0) != temp_types_v.end() && temp_types_v.size() == 1){ + saved_2dvt.clear(); + results.at(1) = 0; + } + if (temp_types_w.find(0) != temp_types_w.end() && temp_types_w.size() == 1){ + saved_2dwt.clear(); + results.at(2) = 0; + } + + // std::cout << saved_2dut.size() << " " << saved_2dvt.size() << " " << saved_2dwt.size() << " " << charge_cut << std::endl; + + + // Handle dead plane scenarios + // U and V planes are dead ... + if (saved_2dut.size() == 0 && saved_2dvt.size() == 0 && saved_2dwt.size() != 0){ + int channel_u = get_channel_for_wire(apa, face, 0, wire_u); + int channel_v = get_channel_for_wire(apa, face, 1, wire_v); + saved_2dut.insert(Coord2D(apa, face, time_u, wire_u, channel_u, kUlayer)); + saved_2dvt.insert(Coord2D(apa, face, time_v, wire_v, channel_v, kVlayer)); + + // W plane check for outliers + if (!flag_end_point && saved_2dwt.size() > 0) + { + std::pair ave_pos = std::make_pair(0,0); + double total_charge = 0; + for (auto it1 = saved_2dwt.begin(); it1 != saved_2dwt.end(); it1++){ + CoordReadout coord_key(it1->apa, it1->time, it1->channel); + auto charge_it = m_charge_data.find(coord_key); + if (charge_it != m_charge_data.end()){ + ave_pos.first += it1->wire * charge_it->second.charge; + ave_pos.second += it1->time * charge_it->second.charge; + total_charge += charge_it->second.charge; + } + } + if (total_charge != 0){ + ave_pos.first /= total_charge; + ave_pos.second /= total_charge; + } + double rms = 0; + for (auto it1 = saved_2dwt.begin(); it1 != saved_2dwt.end(); it1++){ + rms += pow(it1->wire - ave_pos.first, 2) + pow((it1->time - ave_pos.second)/cur_ntime_ticks, 2); + } + rms = sqrt(rms/saved_2dwt.size()); + + if (sqrt(pow(ave_pos.first - wire_w, 2) + pow((ave_pos.second - time_w)/cur_ntime_ticks, 2)) > 0.75*rms && + saved_2dwt.size() <= 5 && saved_2dwt.size() < 0.2 * temp_2dwt.associated_2d_points.size()){ + saved_2dwt.clear(); + int channel_w = get_channel_for_wire(apa, face, 2, wire_w); + saved_2dwt.insert(Coord2D(apa, face, time_w, wire_w, channel_w, kWlayer)); + results.at(2) = 0; + } + } + } + else if (saved_2dut.size() == 0 && saved_2dwt.size() == 0 && saved_2dvt.size() != 0){ + // U and W planes are dead ... + int channel_u = get_channel_for_wire(apa, face, 0, wire_u); + int channel_w = get_channel_for_wire(apa, face, 2, wire_w); + saved_2dut.insert(Coord2D(apa, face, time_u, wire_u, channel_u, kUlayer)); + saved_2dwt.insert(Coord2D(apa, face, time_w, wire_w, channel_w, kWlayer)); + + // V plane check for outliers + if (!flag_end_point && saved_2dvt.size() > 0) + { + std::pair ave_pos = std::make_pair(0,0); + double total_charge = 0; + for (auto it1 = saved_2dvt.begin(); it1 != saved_2dvt.end(); it1++){ + CoordReadout coord_key(it1->apa, it1->time, it1->channel); + auto charge_it = m_charge_data.find(coord_key); + if (charge_it != m_charge_data.end()){ + ave_pos.first += it1->wire * charge_it->second.charge; + ave_pos.second += it1->time * charge_it->second.charge; + total_charge += charge_it->second.charge; + } + } + if (total_charge != 0){ + ave_pos.first /= total_charge; + ave_pos.second /= total_charge; + } + double rms = 0; + for (auto it1 = saved_2dvt.begin(); it1 != saved_2dvt.end(); it1++){ + rms += pow(it1->wire - ave_pos.first, 2) + pow((it1->time - ave_pos.second)/cur_ntime_ticks, 2); + } + rms = sqrt(rms/saved_2dvt.size()); + + if (sqrt(pow(ave_pos.first - wire_v, 2) + pow((ave_pos.second - time_v)/cur_ntime_ticks, 2)) > 0.75*rms && + saved_2dvt.size() <= 5 && saved_2dvt.size() < 0.2 * temp_2dvt.associated_2d_points.size()){ + saved_2dvt.clear(); + int channel_v = get_channel_for_wire(apa, face, 1, wire_v); + saved_2dvt.insert(Coord2D(apa, face, time_v, wire_v, channel_v, kVlayer)); + results.at(1) = 0; + } + } + } + else if (saved_2dvt.size() == 0 && saved_2dwt.size() == 0 && saved_2dut.size() != 0){ + // V and W planes are dead ... + int channel_v = get_channel_for_wire(apa, face, 1, wire_v); + int channel_w = get_channel_for_wire(apa, face, 2, wire_w); + saved_2dvt.insert(Coord2D(apa, face, time_v, wire_v, channel_v, kVlayer)); + saved_2dwt.insert(Coord2D(apa, face, time_w, wire_w, channel_w, kWlayer)); + + // U plane check for outliers + if (!flag_end_point && saved_2dut.size() > 0) + { + std::pair ave_pos = std::make_pair(0,0); + double total_charge = 0; + for (auto it1 = saved_2dut.begin(); it1 != saved_2dut.end(); it1++){ + CoordReadout coord_key(it1->apa, it1->time, it1->channel); + auto charge_it = m_charge_data.find(coord_key); + if (charge_it != m_charge_data.end()){ + ave_pos.first += it1->wire * charge_it->second.charge; + ave_pos.second += it1->time * charge_it->second.charge; + total_charge += charge_it->second.charge; + } + } + if (total_charge != 0){ + ave_pos.first /= total_charge; + ave_pos.second /= total_charge; + } + double rms = 0; + for (auto it1 = saved_2dut.begin(); it1 != saved_2dut.end(); it1++){ + rms += pow(it1->wire - ave_pos.first, 2) + pow((it1->time - ave_pos.second)/cur_ntime_ticks, 2); + } + rms = sqrt(rms/saved_2dut.size()); + + if (sqrt(pow(ave_pos.first - wire_u, 2) + pow((ave_pos.second - time_u)/cur_ntime_ticks, 2)) > 0.75*rms && + saved_2dut.size() <= 5 && saved_2dut.size() < 0.2 * temp_2dut.associated_2d_points.size()){ + saved_2dut.clear(); + int channel_u = get_channel_for_wire(apa, face, 0, wire_u); + saved_2dut.insert(Coord2D(apa, face, time_u, wire_u, channel_u, kUlayer)); + results.at(0) = 0; + } + } + } + // Handle partial dead plane scenarios (only one plane dead, check outliers in others) + else if (saved_2dut.size() == 0 && saved_2dwt.size() != 0 && saved_2dvt.size() != 0){ + // Only U plane is dead, check W and V plane outliers + auto check_outliers = [&](std::set& saved_plane, std::vector& results, int result_idx, + const std::set& temp_plane, int expected_wire, int expected_time) { + if (!flag_end_point && saved_plane.size() > 0) + { + std::pair ave_pos = std::make_pair(0,0); + double total_charge = 0; + for (auto it1 = saved_plane.begin(); it1 != saved_plane.end(); it1++){ + CoordReadout coord_key(it1->apa, it1->time, it1->channel); + auto charge_it = m_charge_data.find(coord_key); + if (charge_it != m_charge_data.end()){ + ave_pos.first += it1->wire * charge_it->second.charge; + ave_pos.second += it1->time * charge_it->second.charge; + total_charge += charge_it->second.charge; + } + } + if (total_charge != 0){ + ave_pos.first /= total_charge; + ave_pos.second /= total_charge; + } + double rms = 0; + for (auto it1 = saved_plane.begin(); it1 != saved_plane.end(); it1++){ + rms += pow(it1->wire - ave_pos.first, 2) + pow((it1->time - ave_pos.second)/cur_ntime_ticks, 2); + } + rms = sqrt(rms/saved_plane.size()); + + if (sqrt(pow(ave_pos.first - expected_wire, 2) + pow((ave_pos.second - expected_time)/cur_ntime_ticks, 2)) > 0.75*rms && + saved_plane.size() <= 5 && saved_plane.size() < 0.2 * temp_plane.size()){ + saved_plane.clear(); + int channel = get_channel_for_wire(apa, face, result_idx == 2 ? 2 : 1, expected_wire); + WirePlaneLayer_t plane_layer = (result_idx == 2) ? kWlayer : kVlayer; + saved_plane.insert(Coord2D(apa, face, expected_time, expected_wire, channel, plane_layer)); + results.at(result_idx) = 0; + } + } + }; + + check_outliers(saved_2dwt, results, 2, temp_2dwt.associated_2d_points, wire_w, time_w); + check_outliers(saved_2dvt, results, 1, temp_2dvt.associated_2d_points, wire_v, time_v); + } + else if (saved_2dvt.size() == 0 && saved_2dut.size() != 0 && saved_2dwt.size() != 0){ + // Only V plane is dead, check U and W plane outliers + auto check_outliers = [&](std::set& saved_plane, std::vector& results, int result_idx, + const std::set& temp_plane, int expected_wire, int expected_time) { + if (!flag_end_point && saved_plane.size() > 0) + { + std::pair ave_pos = std::make_pair(0,0); + double total_charge = 0; + for (auto it1 = saved_plane.begin(); it1 != saved_plane.end(); it1++){ + CoordReadout coord_key(it1->apa, it1->time, it1->channel); + auto charge_it = m_charge_data.find(coord_key); + if (charge_it != m_charge_data.end()){ + ave_pos.first += it1->wire * charge_it->second.charge; + ave_pos.second += it1->time * charge_it->second.charge; + total_charge += charge_it->second.charge; + } + } + if (total_charge != 0){ + ave_pos.first /= total_charge; + ave_pos.second /= total_charge; + } + double rms = 0; + for (auto it1 = saved_plane.begin(); it1 != saved_plane.end(); it1++){ + rms += pow(it1->wire - ave_pos.first, 2) + pow((it1->time - ave_pos.second)/cur_ntime_ticks, 2); + } + rms = sqrt(rms/saved_plane.size()); + + if (sqrt(pow(ave_pos.first - expected_wire, 2) + pow((ave_pos.second - expected_time)/cur_ntime_ticks, 2)) > 0.75*rms && + saved_plane.size() <= 5 && saved_plane.size() < 0.2 * temp_plane.size()){ + saved_plane.clear(); + int channel = get_channel_for_wire(apa, face, result_idx == 0 ? 0 : 2, expected_wire); + WirePlaneLayer_t plane_layer = (result_idx == 0) ? kUlayer : kWlayer; + saved_plane.insert(Coord2D(apa, face, expected_time, expected_wire, channel, plane_layer)); + results.at(result_idx) = 0; + } + } + }; + + check_outliers(saved_2dut, results, 0, temp_2dut.associated_2d_points, wire_u, time_u); + check_outliers(saved_2dwt, results, 2, temp_2dwt.associated_2d_points, wire_w, time_w); + } + else if (saved_2dwt.size() == 0 && saved_2dut.size() != 0 && saved_2dvt.size() != 0){ + // Only W plane is dead, check U and V plane outliers + auto check_outliers = [&](std::set& saved_plane, std::vector& results, int result_idx, + const std::set& temp_plane, int expected_wire, int expected_time) { + if (!flag_end_point && saved_plane.size() > 0) + { + std::pair ave_pos = std::make_pair(0,0); + double total_charge = 0; + for (auto it1 = saved_plane.begin(); it1 != saved_plane.end(); it1++){ + CoordReadout coord_key(it1->apa, it1->time, it1->channel); + auto charge_it = m_charge_data.find(coord_key); + if (charge_it != m_charge_data.end()){ + ave_pos.first += it1->wire * charge_it->second.charge; + ave_pos.second += it1->time * charge_it->second.charge; + total_charge += charge_it->second.charge; + } + } + if (total_charge != 0){ + ave_pos.first /= total_charge; + ave_pos.second /= total_charge; + } + double rms = 0; + for (auto it1 = saved_plane.begin(); it1 != saved_plane.end(); it1++){ + rms += pow(it1->wire - ave_pos.first, 2) + pow((it1->time - ave_pos.second)/cur_ntime_ticks, 2); + } + rms = sqrt(rms/saved_plane.size()); + + if (sqrt(pow(ave_pos.first - expected_wire, 2) + pow((ave_pos.second - expected_time)/cur_ntime_ticks, 2)) > 0.75*rms && + saved_plane.size() <= 5 && saved_plane.size() < 0.2 * temp_plane.size()){ + saved_plane.clear(); + int channel = get_channel_for_wire(apa, face, result_idx, expected_wire); + WirePlaneLayer_t plane_layer = (result_idx == 0) ? kUlayer : kVlayer; + saved_plane.insert(Coord2D(apa, face, expected_time, expected_wire, channel, plane_layer)); + results.at(result_idx) = 0; + } + } + }; + + check_outliers(saved_2dut, results, 0, temp_2dut.associated_2d_points, wire_u, time_u); + check_outliers(saved_2dvt, results, 1, temp_2dvt.associated_2d_points, wire_v, time_v); + } + else if (saved_2dwt.size() == 0 && saved_2dut.size() == 0 && saved_2dvt.size() == 0){ + // All planes are dead, use fallback coordinates + int channel_u = get_channel_for_wire(apa, face, 0, wire_u); + int channel_v = get_channel_for_wire(apa, face, 1, wire_v); + int channel_w = get_channel_for_wire(apa, face, 2, wire_w); + saved_2dut.insert(Coord2D(apa, face, time_u, wire_u, channel_u, kUlayer)); + saved_2dvt.insert(Coord2D(apa, face, time_v, wire_v, channel_v, kVlayer)); + saved_2dwt.insert(Coord2D(apa, face, time_w, wire_w, channel_w, kWlayer)); + } + + // Update PlaneData with filtered results + temp_2dut.associated_2d_points = saved_2dut; + temp_2dvt.associated_2d_points = saved_2dvt; + temp_2dwt.associated_2d_points = saved_2dwt; + + // Update quantity fields with calculated results + temp_2dut.quantity = results.at(0); + temp_2dvt.quantity = results.at(1); + temp_2dwt.quantity = results.at(2); + + } + +void TrackFitting::form_map_graph(bool flag_exclusion, double end_point_factor, double mid_point_factor, int nlevel, double time_tick_cut, double charge_cut){ + // Clear existing mappings + m_3d_to_2d.clear(); + m_2d_to_3d.clear(); + + // Reset fit properties for all vertices first + for (auto vp = boost::vertices(*m_graph); vp.first != vp.second; ++vp.first) { + auto vd = *vp.first; + auto& v_bundle = (*m_graph)[vd]; + if (v_bundle.vertex) { + bool flag_fix = v_bundle.vertex->flag_fix(); + v_bundle.vertex->reset_fit_prop(); + v_bundle.vertex->flag_fix(flag_fix); + } + } + + // Collect segments and reset their fit properties + std::vector> segments; + auto edge_range = boost::edges(*m_graph); + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + if (edge_bundle.segment) { + segments.push_back(edge_bundle.segment); + edge_bundle.segment->reset_fit_prop(); + } + } + + int count = 0; + + // Process each segment + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + if (!edge_bundle.segment) continue; + + auto segment = edge_bundle.segment; + auto& fits = segment->fits(); + if (fits.empty()) continue; + + // Get start and end vertices for this segment + auto vd1 = boost::source(*e_it, *m_graph); + auto vd2 = boost::target(*e_it, *m_graph); + auto& v_bundle1 = (*m_graph)[vd1]; + auto& v_bundle2 = (*m_graph)[vd2]; + + std::shared_ptr start_v = nullptr, end_v = nullptr; + + if (v_bundle1.vertex && v_bundle2.vertex) { + // Determine which vertex is start and which is end by comparing with first/last fit points + auto& first_fit = fits.front(); + auto& last_fit = fits.back(); + + double dist1_first = (v_bundle1.vertex->fit().point - first_fit.point).magnitude(); + double dist1_last = (v_bundle1.vertex->fit().point - last_fit.point).magnitude(); + + if (dist1_first < dist1_last) { + start_v = v_bundle1.vertex; + end_v = v_bundle2.vertex; + } else { + start_v = v_bundle2.vertex; + end_v = v_bundle1.vertex; + } + } + + // Calculate distances between consecutive fit points + std::vector distances; + for (size_t i = 0; i + 1 < fits.size(); i++) { + distances.push_back((fits[i+1].point - fits[i].point).magnitude()); + } + + std::vector saved_pts; + std::vector saved_index; + std::vector saved_skip; + + // Process each fit point + for (size_t i = 0; i < fits.size(); i++) { + double dis_cut; + + if (i == 0) { + dis_cut = std::min(distances.empty() ? 0.0 : distances[0] * end_point_factor, + 4.0/3.0 * end_point_factor * units::cm); + if (start_v && start_v->fit_range() < 0) { + start_v->fit_range(dis_cut); + } else if (start_v && dis_cut < start_v->fit_range()) { + start_v->fit_range(dis_cut); + } + } else if (i + 1 == fits.size()) { + dis_cut = std::min(distances.empty() ? 0.0 : distances.back() * end_point_factor, + 4.0/3.0 * end_point_factor * units::cm); + if (end_v && end_v->fit_range() < 0) { + end_v->fit_range(dis_cut); + } else if (end_v && dis_cut < end_v->fit_range()) { + end_v->fit_range(dis_cut); + } + } else { + double dist_prev = i > 0 ? distances[i-1] : 0.0; + double dist_next = i < distances.size() ? distances[i] : 0.0; + dis_cut = std::min(std::max(dist_prev * mid_point_factor, dist_next * mid_point_factor), + 4.0/3.0 * mid_point_factor * units::cm); + } + + // Not the first and last point - process middle points + if (i != 0 && i + 1 != fits.size()) { + TrackFitting::PlaneData temp_2dut, temp_2dvt, temp_2dwt; + form_point_association(segment, fits[i].point, temp_2dut, temp_2dvt, temp_2dwt, dis_cut, nlevel, time_tick_cut); + + if (flag_exclusion) { + update_association(segment, temp_2dut, temp_2dvt, temp_2dwt); + } + + // Examine point association + bool is_end_point = (i == 1 || i + 2 == fits.size()); + examine_point_association(segment, fits[i].point, temp_2dut, temp_2dvt, temp_2dwt, is_end_point, charge_cut); + + if (temp_2dut.quantity + temp_2dvt.quantity + temp_2dwt.quantity > 0) { + // Store in mapping structures + m_3d_to_2d[count].set_plane_data(WirePlaneLayer_t::kUlayer, temp_2dut); + m_3d_to_2d[count].set_plane_data(WirePlaneLayer_t::kVlayer, temp_2dvt); + m_3d_to_2d[count].set_plane_data(WirePlaneLayer_t::kWlayer, temp_2dwt); + + // Fill reverse mappings + for (const auto& coord : temp_2dut.associated_2d_points) { + m_2d_to_3d[coord].insert(count); + } + for (const auto& coord : temp_2dvt.associated_2d_points) { + m_2d_to_3d[coord].insert(count); + } + for (const auto& coord : temp_2dwt.associated_2d_points) { + m_2d_to_3d[coord].insert(count); + } + + saved_pts.push_back(fits[i].point); + saved_index.push_back(count); + saved_skip.push_back(false); + count++; + } + } else if (i == 0) { + // First point + saved_pts.push_back(fits[i].point); + saved_index.push_back(count); + saved_skip.push_back(true); + if (start_v && start_v->fit_index() == -1) { + start_v->fit_index(count); + count++; + } + } else if (i + 1 == fits.size()) { + // Last point + saved_pts.push_back(fits[i].point); + saved_index.push_back(count); + saved_skip.push_back(true); + if (end_v && end_v->fit_index() == -1) { + end_v->fit_index(count); + count++; + } + } + } + + // Set fit associate vector for the segment + segment->set_fit_associate_vec(saved_pts, saved_index, saved_skip, m_dv, "fit"); + } + + // Deal with all vertices again + for (auto vp = boost::vertices(*m_graph); vp.first != vp.second; ++vp.first) { + auto vd = *vp.first; + auto& v_bundle = (*m_graph)[vd]; + if (!v_bundle.vertex) continue; + + auto vertex = v_bundle.vertex; + double dis_cut = vertex->fit_range(); + int vertex_count = vertex->fit_index(); + + if (dis_cut > 0 && vertex_count >= 0) { + WireCell::Point pt = vertex->fit().point; + + TrackFitting::PlaneData temp_2dut, temp_2dvt, temp_2dwt; + + // For vertex, we need to pass a dummy segment - use first available segment + std::shared_ptr dummy_segment = nullptr; + if (!segments.empty()) { + dummy_segment = segments[0]; + } + + if (dummy_segment) { + form_point_association(dummy_segment, pt, temp_2dut, temp_2dvt, temp_2dwt, dis_cut, nlevel, time_tick_cut); + examine_point_association(dummy_segment, pt, temp_2dut, temp_2dvt, temp_2dwt, true, charge_cut); + + // Store vertex associations + m_3d_to_2d[vertex_count].set_plane_data(WirePlaneLayer_t::kUlayer, temp_2dut); + m_3d_to_2d[vertex_count].set_plane_data(WirePlaneLayer_t::kVlayer, temp_2dvt); + m_3d_to_2d[vertex_count].set_plane_data(WirePlaneLayer_t::kWlayer, temp_2dwt); + + // Fill reverse mappings + for (const auto& coord : temp_2dut.associated_2d_points) { + m_2d_to_3d[coord].insert(vertex_count); + } + for (const auto& coord : temp_2dvt.associated_2d_points) { + m_2d_to_3d[coord].insert(vertex_count); + } + for (const auto& coord : temp_2dwt.associated_2d_points) { + m_2d_to_3d[coord].insert(vertex_count); + } + } + } + } +} + + +void TrackFitting::form_map(std::vector>>& ptss, double end_point_factor, double mid_point_factor, int nlevel, double time_tick_cut, double charge_cut) { + // Implementation of form_map function + + m_3d_to_2d.clear(); + m_2d_to_3d.clear(); + + std::vector>> saved_pts; + int count = 0; + + // Calculate distances between consecutive points + std::vector distances; + for (size_t i = 0; i + 1 != ptss.size(); i++) { + distances.push_back(sqrt(pow(ptss.at(i+1).first.x() - ptss.at(i).first.x(), 2) + + pow(ptss.at(i+1).first.y() - ptss.at(i).first.y(), 2) + + pow(ptss.at(i+1).first.z() - ptss.at(i).first.z(), 2))); + } + + // Loop over the path + for (size_t i = 0; i != ptss.size(); i++) { + double dis_cut; + if (i == 0) { + dis_cut = std::min(distances.at(i) * end_point_factor, 4/3. * end_point_factor * units::cm); + } else if (i + 1 == ptss.size()) { + dis_cut = std::min(distances.back() * end_point_factor, 4/3. * end_point_factor * units::cm); + } else { + dis_cut = std::min(std::max(distances.at(i-1) * mid_point_factor, distances.at(i) * mid_point_factor), + 4/3. * mid_point_factor * units::cm); + } + + // std::cout << i << " " << distances.at(i) << " " << end_point_factor << " " << dis_cut << std::endl; + + // check point's apa and face ... + // find the apa and face ... + auto wpid = m_dv->contained_by(ptss.at(i).first); + auto segment = ptss.at(i).second; + int apa = wpid.apa(); + int face = wpid.face(); + + if (apa != -1 && face != -1) { + + TrackFitting::PlaneData temp_2dut, temp_2dvt, temp_2dwt; + form_point_association(segment, ptss.at(i).first, temp_2dut, temp_2dvt, temp_2dwt, dis_cut, nlevel, time_tick_cut); + + // std::cout << i << " " << ptss.at(i).first << " " << temp_2dut.associated_2d_points.size() << " " << temp_2dvt.associated_2d_points.size() << " " << temp_2dwt.associated_2d_points.size() << std::endl; + + if (i == 0 || i == 1 || i + 1 == ptss.size() || i + 2 == ptss.size()) { + examine_point_association(segment, ptss.at(i).first, temp_2dut, temp_2dvt, temp_2dwt, true, charge_cut); + } else { + examine_point_association(segment, ptss.at(i).first, temp_2dut, temp_2dvt, temp_2dwt, false, charge_cut); + } + // std::cout << i << " E " << ptss.at(i).first << " " << temp_2dut.associated_2d_points.size() << " " << temp_2dvt.associated_2d_points.size() << " " << temp_2dwt.associated_2d_points.size() << std::endl; + + + // Fill the mapping data if we have valid associations + if (temp_2dut.quantity + temp_2dvt.quantity + temp_2dwt.quantity > 0) { + m_3d_to_2d[count].set_plane_data(WirePlaneLayer_t::kUlayer, temp_2dut); + m_3d_to_2d[count].set_plane_data(WirePlaneLayer_t::kVlayer, temp_2dvt); + m_3d_to_2d[count].set_plane_data(WirePlaneLayer_t::kWlayer, temp_2dwt); + + + // Fill reverse mapping for U plane + for (auto it = temp_2dut.associated_2d_points.begin(); it != temp_2dut.associated_2d_points.end(); it++) { + if (m_2d_to_3d.find(*it) == m_2d_to_3d.end()) { + std::set temp_set; + temp_set.insert(count); + m_2d_to_3d[*it] = temp_set; + } else { + m_2d_to_3d[*it].insert(count); + } + } + + for (auto it = temp_2dvt.associated_2d_points.begin(); it != temp_2dvt.associated_2d_points.end(); it++) { + if (m_2d_to_3d.find(*it) == m_2d_to_3d.end()) { + std::set temp_set; + temp_set.insert(count); + m_2d_to_3d[*it] = temp_set; + } else { + m_2d_to_3d[*it].insert(count); + } + } + + for (auto it = temp_2dwt.associated_2d_points.begin(); it != temp_2dwt.associated_2d_points.end(); it++) { + if (m_2d_to_3d.find(*it) == m_2d_to_3d.end()) { + std::set temp_set; + temp_set.insert(count); + m_2d_to_3d[*it] = temp_set; + } else { + m_2d_to_3d[*it].insert(count); + } + } + + saved_pts.push_back(std::make_pair(ptss.at(i).first, segment)); + count++; + } + } + } + + // std::cout << "Form Map: " << ptss.size() << " " << saved_pts.size() << " " << m_2d_to_3d.size() << " " << m_3d_to_2d.size() << std::endl; + + ptss = saved_pts; + + + // { + // int apa = 0, face = 0; + // auto cur_u = m_grouping->convert_3Dpoint_time_ch(ptss.back().first, apa, face, 0); + // auto cur_v = m_grouping->convert_3Dpoint_time_ch(ptss.back().first, apa, face, 1); + // auto cur_w = m_grouping->convert_3Dpoint_time_ch(ptss.back().first, apa, face, 2); + + // std::cout << std::get<0>(cur_u) << " " << std::get<1>(cur_u) << " " << std::get<1>(cur_v) << " " << std::get<1>(cur_w) << std::endl; + + // WirePlaneId wpid(kAllLayers, face, apa); + + // auto pt = std::get<0>(wpid_offsets[wpid]) + ptss.back().first.x() * std::get<0>(wpid_slopes[wpid]); + // auto pu = std::get<1>(wpid_offsets[wpid]) + std::get<1>(wpid_slopes[wpid]).first * ptss.back().first.y() + std::get<1>(wpid_slopes[wpid]).second * ptss.back().first.z(); + // auto pv = std::get<2>(wpid_offsets[wpid]) + std::get<2>(wpid_slopes[wpid]).first * ptss.back().first.y() + std::get<2>(wpid_slopes[wpid]).second * ptss.back().first.z(); + // auto pw = std::get<3>(wpid_offsets[wpid]) + std::get<3>(wpid_slopes[wpid]).first * ptss.back().first.y() + std::get<3>(wpid_slopes[wpid]).second * ptss.back().first.z(); + + // std::cout << pt << " " << pu << " " << pv << " " << pw << std::endl; + // } +} + + +WireCell::Point TrackFitting::fit_point(WireCell::Point& init_p, int i, std::shared_ptr segment, std::map, std::map, double>>& map_Udiv_fac, std::map, std::map, double>>& map_Vdiv_fac, std::map, std::map, double>>& map_Wdiv_fac, double offset_t, double slope_x, double offset_u, double slope_yu, double slope_zu, double offset_v, double slope_yv, double slope_zv, double offset_w, double slope_yw, double slope_zw){ + // Check if the point index exists in the 3D to 2D mapping + auto point_it = m_3d_to_2d.find(i); + if (point_it == m_3d_to_2d.end()) { + return init_p; // Return original point if no 2D associations found + } + + const auto& point_info = point_it->second; + + // Get plane data for each wire plane + auto plane_data_u = point_info.get_plane_data(WirePlaneLayer_t::kUlayer); + auto plane_data_v = point_info.get_plane_data(WirePlaneLayer_t::kVlayer); + auto plane_data_w = point_info.get_plane_data(WirePlaneLayer_t::kWlayer); + + int n_2D_u = 2 * plane_data_u.associated_2d_points.size(); + int n_2D_v = 2 * plane_data_v.associated_2d_points.size(); + int n_2D_w = 2 * plane_data_w.associated_2d_points.size(); + + // Set up Eigen matrices and vectors + Eigen::VectorXd temp_pos_3D(3), data_u_2D(n_2D_u), data_v_2D(n_2D_v), data_w_2D(n_2D_w); + Eigen::VectorXd temp_pos_3D_init(3); + Eigen::SparseMatrix RU(n_2D_u, 3); + Eigen::SparseMatrix RV(n_2D_v, 3); + Eigen::SparseMatrix RW(n_2D_w, 3); + + auto test_wpid = m_dv->contained_by(init_p); + auto cluster = segment->cluster(); + auto cluster_t0 = cluster->get_cluster_t0(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + // Initialization with its raw position + auto p_raw = transform->backward(init_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + + // Initialize with input point + temp_pos_3D_init(0) = p_raw.x(); + temp_pos_3D_init(1) = p_raw.y(); + temp_pos_3D_init(2) = p_raw.z(); + + // Initialize data vectors to zero + data_u_2D.setZero(); + data_v_2D.setZero(); + data_w_2D.setZero(); + + // Fill U plane data + int index = 0; + for (auto it = plane_data_u.associated_2d_points.begin(); it != plane_data_u.associated_2d_points.end(); it++) { + // Get charge measurement + CoordReadout charge_key(it->apa, it->time, it->channel); + double charge = m_params.default_charge_th, charge_err = m_params.default_charge_err; // Default values (100, 1000 from WCP) + + auto charge_it = m_charge_data.find(charge_key); + if (charge_it != m_charge_data.end()) { + charge = charge_it->second.charge; + charge_err = charge_it->second.charge_err; + } + + if (charge < m_params.default_charge_th) { + charge = m_params.default_charge_th; + charge_err = m_params.default_charge_err; + } + + // Get division factor + double div_factor = 1.0; + auto apa_face_key = std::make_pair(it->apa, it->face); + auto div_key = std::make_tuple(it->time, it->wire, i); + auto div_it1 = map_Udiv_fac.find(apa_face_key); + if (div_it1 != map_Udiv_fac.end()) { + auto div_it2 = div_it1->second.find(div_key); + if (div_it2 != div_it1->second.end()) { + div_factor = div_it2->second; + } + } + + double scaling = (charge / charge_err) * div_factor; + + if (plane_data_u.quantity < m_params.scaling_quality_th) { + if (plane_data_u.quantity != 0) { + scaling *= pow(plane_data_u.quantity / m_params.scaling_quality_th, 1); + } else { + scaling *= m_params.scaling_ratio; + } + } + + + if (scaling != 0) { + data_u_2D(2 * index) = scaling * (it->wire - offset_u); + data_u_2D(2 * index + 1) = scaling * (it->time - offset_t); + + RU.insert(2 * index, 1) = scaling * slope_yu; // Y --> U + RU.insert(2 * index, 2) = scaling * slope_zu; // Z --> U + RU.insert(2 * index + 1, 0) = scaling * slope_x; // X --> T + } + index++; + } + + // Fill V plane data + index = 0; + for (auto it = plane_data_v.associated_2d_points.begin(); it != plane_data_v.associated_2d_points.end(); it++) { + // Get charge measurement + CoordReadout charge_key(it->apa, it->time, it->channel); + double charge = m_params.default_charge_th, charge_err = m_params.default_charge_err; // Default values + + auto charge_it = m_charge_data.find(charge_key); + if (charge_it != m_charge_data.end()) { + charge = charge_it->second.charge; + charge_err = charge_it->second.charge_err; + } + + if (charge < m_params.default_charge_th) { + charge = m_params.default_charge_th; + charge_err = m_params.default_charge_err; + } + + // Get division factor + double div_factor = 1.0; + auto apa_face_key = std::make_pair(it->apa, it->face); + auto div_key = std::make_tuple(it->time, it->wire, i); + auto div_it1 = map_Vdiv_fac.find(apa_face_key); + if (div_it1 != map_Vdiv_fac.end()) { + auto div_it2 = div_it1->second.find(div_key); + if (div_it2 != div_it1->second.end()) { + div_factor = div_it2->second; + } + } + + double scaling = (charge / charge_err) * div_factor; + + if (plane_data_v.quantity < m_params.scaling_quality_th) { + if (plane_data_v.quantity != 0) { + scaling *= pow(plane_data_v.quantity / m_params.scaling_quality_th, 1); + } else { + scaling *= m_params.scaling_ratio; + } + } + + if (scaling != 0) { + data_v_2D(2 * index) = scaling * (it->wire - offset_v); + data_v_2D(2 * index + 1) = scaling * (it->time - offset_t); + + RV.insert(2 * index, 1) = scaling * slope_yv; // Y --> V + RV.insert(2 * index, 2) = scaling * slope_zv; // Z --> V + RV.insert(2 * index + 1, 0) = scaling * slope_x; // X --> T + } + index++; + } + + // Fill W plane data + index = 0; + for (auto it = plane_data_w.associated_2d_points.begin(); it != plane_data_w.associated_2d_points.end(); it++) { + // Get charge measurement + CoordReadout charge_key(it->apa, it->time, it->channel); + double charge = m_params.default_charge_th, charge_err = m_params.default_charge_err; // Default values + + auto charge_it = m_charge_data.find(charge_key); + if (charge_it != m_charge_data.end()) { + charge = charge_it->second.charge; + charge_err = charge_it->second.charge_err; + } + + if (charge < m_params.default_charge_th) { + charge = m_params.default_charge_th; + charge_err = m_params.default_charge_err; + } + + // Get division factor + double div_factor = 1.0; + auto apa_face_key = std::make_pair(it->apa, it->face); + auto div_key = std::make_tuple(it->time, it->wire, i); + auto div_it1 = map_Wdiv_fac.find(apa_face_key); + if (div_it1 != map_Wdiv_fac.end()) { + auto div_it2 = div_it1->second.find(div_key); + if (div_it2 != div_it1->second.end()) { + div_factor = div_it2->second; + } + } + + double scaling = (charge / charge_err) * div_factor; + + if (plane_data_w.quantity < m_params.scaling_quality_th) { + if (plane_data_w.quantity != 0) { + scaling *= pow(plane_data_w.quantity / m_params.scaling_quality_th, 1); + } else { + scaling *= m_params.scaling_ratio; + } + } + + if (scaling != 0) { + data_w_2D(2 * index) = scaling * (it->wire - offset_w); + data_w_2D(2 * index + 1) = scaling * (it->time - offset_t); + + RW.insert(2 * index, 2) = scaling * slope_zw; // Z --> W + RW.insert(2 * index + 1, 0) = scaling * slope_x; // X --> T + } + index++; + } + + // Solve the least squares problem + Eigen::SparseMatrix RUT = RU.transpose(); + Eigen::SparseMatrix RVT = RV.transpose(); + Eigen::SparseMatrix RWT = RW.transpose(); + + Eigen::BiCGSTAB> solver; + Eigen::VectorXd b = RUT * data_u_2D + RVT * data_v_2D + RWT * data_w_2D; + Eigen::SparseMatrix A = RUT * RU + RVT * RV + RWT * RW; + + solver.compute(A); + temp_pos_3D = solver.solveWithGuess(b, temp_pos_3D_init); + + WireCell::Point final_p_raw, final_p; + + // Check if solver succeeded + if (std::isnan(solver.error())) { + // Return initialization if solver failed + final_p_raw = WireCell::Point(temp_pos_3D_init(0), temp_pos_3D_init(1), temp_pos_3D_init(2)); + } else { + // Return fitted result + final_p_raw = WireCell::Point(temp_pos_3D(0), temp_pos_3D(1), temp_pos_3D(2)); + } + + final_p = transform->forward(final_p_raw, cluster_t0, test_wpid.face(), test_wpid.apa()); + + return final_p; +} + +void TrackFitting::multi_trajectory_fit(int charge_div_method, double div_sigma){ + if (!m_graph) return; + + // create pss_vec ... + std::map>> map_index_pss; + auto edge_range = boost::edges(*m_graph); + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + if (!edge_bundle.segment) continue; + auto segment = edge_bundle.segment; + const auto& fits = segment->fits(); + for (const auto& fit : fits) { + int idx = fit.index; + map_index_pss[idx] = std::make_pair(fit.point, segment); + } + } + + + // Create charge division factor maps for each APA/face combination + // Structure: [apa/face] -> [time/wire/3D_index] -> factor + std::map, std::map, double>> map_Udiv_fac; + std::map, std::map, double>> map_Vdiv_fac; + std::map, std::map, double>> map_Wdiv_fac; + + // Equal division - same as in trajectory_fit + for (auto it = m_2d_to_3d.begin(); it != m_2d_to_3d.end(); it++) { + for (auto it1 = it->second.begin(); it1 != it->second.end(); it1++) { + WirePlaneLayer_t plane = it->first.plane; + int apa = it->first.apa; + int face = it->first.face; + int time = it->first.time; + int wire = it->first.wire; + + auto apa_face_key = std::make_pair(apa, face); + auto div_key = std::make_tuple(time, wire, *it1); + + if (plane == WirePlaneLayer_t::kUlayer) { + map_Udiv_fac[apa_face_key][div_key] = 1.0 / it->second.size(); + } else if (plane == WirePlaneLayer_t::kVlayer) { + map_Vdiv_fac[apa_face_key][div_key] = 1.0 / it->second.size(); + } else if (plane == WirePlaneLayer_t::kWlayer) { + map_Wdiv_fac[apa_face_key][div_key] = 1.0 / it->second.size(); + } + } + } + + if (charge_div_method == 2) { + // Use div_sigma for Gaussian weighting + // Process each plane separately + std::map, std::map, double>>*> plane_maps = { + {WirePlaneLayer_t::kUlayer, &map_Udiv_fac}, + {WirePlaneLayer_t::kVlayer, &map_Vdiv_fac}, + {WirePlaneLayer_t::kWlayer, &map_Wdiv_fac} + }; + + for (auto& [plane, div_fac_map] : plane_maps) { + // Calculate Gaussian weights + for (auto& [apa_face, coord_idx_fac] : *div_fac_map) { + int apa = apa_face.first; + int face = apa_face.second; + + WirePlaneId wpid(kAllLayers, face, apa); + auto offset_it = wpid_offsets.find(wpid); + auto slope_it = wpid_slopes.find(wpid); + auto geom_it = wpid_geoms.find(wpid); + + auto offset_t = std::get<0>(offset_it->second); + auto offset_u = std::get<1>(offset_it->second); + auto offset_v = std::get<2>(offset_it->second); + auto offset_w = std::get<3>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yu = std::get<1>(slope_it->second).first; + auto slope_zu = std::get<1>(slope_it->second).second; + auto slope_yv = std::get<2>(slope_it->second).first; + auto slope_zv = std::get<2>(slope_it->second).second; + auto slope_yw = std::get<3>(slope_it->second).first; + auto slope_zw = std::get<3>(slope_it->second).second; + + auto time_tick_width = std::get<0>(geom_it->second); + auto pitch_u = std::get<1>(geom_it->second); + auto pitch_v = std::get<2>(geom_it->second); + auto pitch_w = std::get<3>(geom_it->second); + + + // double sum = 0; + std::map, double> map_tw_sum; + + // Calculate weights + for (auto& [coord_idx, fac] : coord_idx_fac) { + int time = std::get<0>(coord_idx); + int wire = std::get<1>(coord_idx); + int idx = std::get<2>(coord_idx); + + auto pss_vec_it = map_index_pss.find(idx); + if (pss_vec_it == map_index_pss.end()) continue; + + auto segment = pss_vec_it->second.second; + auto cluster = segment->cluster(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + + auto test_wpid = m_dv->contained_by(pss_vec_it->second.first); + // Initialization with its raw position + auto p_raw = transform->backward(pss_vec_it->second.first, cluster_t0, test_wpid.face(), test_wpid.apa()); + + double central_t = slope_x * p_raw.x() + offset_t; + double central_ch = 0; + + if (plane == WirePlaneLayer_t::kUlayer) { + central_ch = slope_yu * p_raw.y() + slope_zu * p_raw.z() + offset_u; + } else if (plane == WirePlaneLayer_t::kVlayer) { + central_ch = slope_yv * p_raw.y() + slope_zv * p_raw.z() + offset_v; + } else if (plane == WirePlaneLayer_t::kWlayer) { + central_ch = slope_yw * p_raw.y() + slope_zw * p_raw.z() + offset_w; + } + + double pitch = (plane == WirePlaneLayer_t::kUlayer) ? pitch_u : + (plane == WirePlaneLayer_t::kVlayer) ? pitch_v : pitch_w; + + double factor = exp(-0.5 * (pow((central_t - time) * time_tick_width, 2) + pow((central_ch - wire) * pitch, 2)) / pow(div_sigma, 2)); + + // std::cout << plane << " " << time << " " << wire << " " << idx << " " << central_t << " " << central_ch << " " << factor << std::endl; + + fac = factor; + // sum += factor; + + auto [it, inserted] = map_tw_sum.try_emplace(std::make_pair(time, wire), factor); + if (!inserted) { + it->second += factor; + } + + } + + // Normalize weights + for (auto& [coord_idx, fac] : coord_idx_fac) { + double sum = map_tw_sum[std::make_pair(std::get<0>(coord_idx), std::get<1>(coord_idx))]; + fac /= sum; + // std::cout << plane << " " << std::get<0>(coord_idx) << " " << std::get<1>(coord_idx) << " " << fac << " " << sum << std::endl; + } + } + } + } + + // Process vertices first + for (auto vp = boost::vertices(*m_graph); vp.first != vp.second; ++vp.first) { + auto vd = *vp.first; + auto& v_bundle = (*m_graph)[vd]; + if (!v_bundle.vertex) continue; + + auto vertex = v_bundle.vertex; + int i = vertex->fit_index(); + auto segment = map_index_pss[i].second; + auto cluster = segment->cluster(); + auto cluster_t0 = cluster->get_cluster_t0(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + + bool flag_fit_fix = vertex->flag_fix(); + WireCell::Point init_p = vertex->fit().point; + + if (!flag_fit_fix && i >= 0) { + // Get geometry parameters for this point + auto test_wpid = m_dv->contained_by(init_p); + if (test_wpid.apa() == -1 || test_wpid.face() == -1) continue; + + WirePlaneId wpid(kAllLayers, test_wpid.face(), test_wpid.apa()); + auto offset_it = wpid_offsets.find(wpid); + auto slope_it = wpid_slopes.find(wpid); + + if (offset_it == wpid_offsets.end() || slope_it == wpid_slopes.end()) continue; + + auto offset_t = std::get<0>(offset_it->second); + auto offset_u = std::get<1>(offset_it->second); + auto offset_v = std::get<2>(offset_it->second); + auto offset_w = std::get<3>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yu = std::get<1>(slope_it->second).first; + auto slope_zu = std::get<1>(slope_it->second).second; + auto slope_yv = std::get<2>(slope_it->second).first; + auto slope_zv = std::get<2>(slope_it->second).second; + auto slope_yw = std::get<3>(slope_it->second).first; + auto slope_zw = std::get<3>(slope_it->second).second; + + // Fit the vertex point + WireCell::Point fitted_p = fit_point(init_p, i, segment, map_Udiv_fac, map_Vdiv_fac, map_Wdiv_fac, + offset_t, slope_x, offset_u, slope_yu, slope_zu, + offset_v, slope_yv, slope_zv, offset_w, slope_yw, slope_zw); + + // Update vertex with fitted position and projections + auto& vertex_fit = vertex->fit(); + vertex_fit.point = fitted_p; + vertex_fit.index = i; + + // Store 2D projections (following WCP pattern with offsets) + auto fitted_p_raw = transform->backward(fitted_p, cluster_t0, test_wpid.apa(), test_wpid.face()); + vertex_fit.pu = offset_u + (slope_yu * fitted_p_raw.y() + slope_zu * fitted_p_raw.z()); + vertex_fit.pv = offset_v + (slope_yv * fitted_p_raw.y() + slope_zv * fitted_p_raw.z()); + vertex_fit.pw = offset_w + (slope_yw * fitted_p_raw.y() + slope_zw * fitted_p_raw.z()); + vertex_fit.pt = offset_t + slope_x * fitted_p_raw.x(); + + vertex_fit.paf = std::make_pair(test_wpid.apa(), test_wpid.face()); + } + } + + // Process segments (tracks) + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + if (!edge_bundle.segment) continue; + + auto segment = edge_bundle.segment; + auto cluster = segment->cluster(); + auto cluster_t0 = cluster->get_cluster_t0(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + auto& fits = segment->fits(); + if (fits.empty()) continue; + + // Get start and end vertices + auto vd1 = boost::source(*e_it, *m_graph); + auto vd2 = boost::target(*e_it, *m_graph); + auto& v_bundle1 = (*m_graph)[vd1]; + auto& v_bundle2 = (*m_graph)[vd2]; + + std::shared_ptr start_v = nullptr, end_v = nullptr; + if (v_bundle1.vertex && v_bundle2.vertex) { + // Determine which vertex is start and which is end based on fit indices + if (v_bundle1.vertex->fit_index() == 0 || + (v_bundle1.vertex->fit_index() < v_bundle2.vertex->fit_index() && v_bundle1.vertex->fit_index() >= 0)) { + start_v = v_bundle1.vertex; + end_v = v_bundle2.vertex; + } else { + start_v = v_bundle2.vertex; + end_v = v_bundle1.vertex; + } + } + + // Get initial points and fit info + std::vector init_ps; + std::vector init_indices; + std::vector init_fit_skip; + + for (const auto& fit : fits) { + init_ps.push_back(fit.point); + init_indices.push_back(fit.index); + init_fit_skip.push_back(fit.flag_fix); + } + + if (init_ps.empty()) continue; + + // Set endpoint positions from fitted vertices + if (start_v) init_ps.front() = start_v->fit().point; + if (end_v) init_ps.back() = end_v->fit().point; + + std::vector final_ps; + + // Fit each point in the segment + for (size_t i = 0; i < init_ps.size(); i++) { + if (i == 0) { + // Use start vertex position + final_ps.push_back(start_v ? start_v->fit().point : init_ps[i]); + } else if (i + 1 == init_ps.size()) { + // Use end vertex position + final_ps.push_back(end_v ? end_v->fit().point : init_ps[i]); + } else { + WireCell::Point temp_p = init_ps[i]; + + if (!init_fit_skip[i] && init_indices[i] >= 0) { + // Get geometry parameters + auto test_wpid = m_dv->contained_by(init_ps[i]); + if (test_wpid.apa() != -1 && test_wpid.face() != -1) { + WirePlaneId wpid(kAllLayers, test_wpid.face(), test_wpid.apa()); + auto offset_it = wpid_offsets.find(wpid); + auto slope_it = wpid_slopes.find(wpid); + + if (offset_it != wpid_offsets.end() && slope_it != wpid_slopes.end()) { + auto offset_t = std::get<0>(offset_it->second); + auto offset_u = std::get<1>(offset_it->second); + auto offset_v = std::get<2>(offset_it->second); + auto offset_w = std::get<3>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yu = std::get<1>(slope_it->second).first; + auto slope_zu = std::get<1>(slope_it->second).second; + auto slope_yv = std::get<2>(slope_it->second).first; + auto slope_zv = std::get<2>(slope_it->second).second; + auto slope_yw = std::get<3>(slope_it->second).first; + auto slope_zw = std::get<3>(slope_it->second).second; + + // Fit the point + temp_p = fit_point(init_ps[i], init_indices[i], segment, map_Udiv_fac, map_Vdiv_fac, map_Wdiv_fac, + offset_t, slope_x, offset_u, slope_yu, slope_zu, + offset_v, slope_yv, slope_zv, offset_w, slope_yw, slope_zw); + } + } + } + final_ps.push_back(temp_p); + } + } + + // Apply trajectory examination/smoothing + std::vector examined_ps = examine_segment_trajectory(segment, final_ps, init_ps); + + // Update segment with fitted results + std::vector new_fits; + for (size_t i = 0; i < examined_ps.size(); i++) { + PR::Fit fit; + fit.point = examined_ps[i]; + fit.index = -1; + fit.flag_fix = false; + + // Calculate 2D projections + auto test_wpid = m_dv->contained_by(examined_ps[i]); + if (test_wpid.apa() != -1 && test_wpid.face() != -1) { + WirePlaneId wpid(kAllLayers, test_wpid.face(), test_wpid.apa()); + auto offset_it = wpid_offsets.find(wpid); + auto slope_it = wpid_slopes.find(wpid); + + auto examined_p_raw = transform->backward(examined_ps[i], cluster_t0, test_wpid.apa(), test_wpid.face()); + fit.paf = std::make_pair(test_wpid.apa(), test_wpid.face()); + + if (offset_it != wpid_offsets.end() && slope_it != wpid_slopes.end()) { + auto offset_t = std::get<0>(offset_it->second); + auto offset_u = std::get<1>(offset_it->second); + auto offset_v = std::get<2>(offset_it->second); + auto offset_w = std::get<3>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yu = std::get<1>(slope_it->second).first; + auto slope_zu = std::get<1>(slope_it->second).second; + auto slope_yv = std::get<2>(slope_it->second).first; + auto slope_zv = std::get<2>(slope_it->second).second; + auto slope_yw = std::get<3>(slope_it->second).first; + auto slope_zw = std::get<3>(slope_it->second).second; + + fit.pu = offset_u + (slope_yu * examined_p_raw.y() + slope_zu * examined_p_raw.z()); + fit.pv = offset_v + (slope_yv * examined_p_raw.y() + slope_zv * examined_p_raw.z()); + fit.pw = offset_w + (slope_yw * examined_p_raw.y() + slope_zw * examined_p_raw.z()); + fit.pt = offset_t + slope_x * examined_p_raw.x(); + } + } + + new_fits.push_back(fit); + } + + // Update segment fits + segment->fits() = new_fits; + } +} + + +// track trajectory fitting // should fit all APA ... +void TrackFitting::trajectory_fit(std::vector>>& pss_vec, int charge_div_method, double div_sigma){ + if (pss_vec.empty()) return; + + // Create charge division factor maps + // apa/face --> time/wire, 3D indx --> fac + std::map, std::map, double>> map_Udiv_fac; + std::map, std::map, double>> map_Vdiv_fac; + std::map, std::map, double>> map_Wdiv_fac; + + // Charge division method + // Equal division + for (auto it = m_2d_to_3d.begin(); it != m_2d_to_3d.end(); it++) { + for (auto it1 = it->second.begin(); it1 != it->second.end(); it1++) { + WirePlaneLayer_t plane = it->first.plane; + int apa = it->first.apa; + int face = it->first.face; + int time = it->first.time; + int wire = it->first.wire; + if (plane == WirePlaneLayer_t::kUlayer) { + map_Udiv_fac[std::make_pair(apa, face)][std::make_tuple(time, wire, *it1)] = 1.0 / it->second.size(); + } else if (plane == WirePlaneLayer_t::kVlayer) { + map_Vdiv_fac[std::make_pair(apa, face)][std::make_tuple(time, wire, *it1)] = 1.0 / it->second.size(); + } else if (plane == WirePlaneLayer_t::kWlayer) { + map_Wdiv_fac[std::make_pair(apa, face)][std::make_tuple(time, wire, *it1)] = 1.0 / it->second.size(); + } + } + } + + if (charge_div_method == 2) { + // Use div_sigma for Gaussian weighting + // Process each plane separately + std::map, std::map, double>>*> plane_maps = { + {WirePlaneLayer_t::kUlayer, &map_Udiv_fac}, + {WirePlaneLayer_t::kVlayer, &map_Vdiv_fac}, + {WirePlaneLayer_t::kWlayer, &map_Wdiv_fac} + }; + + for (auto& [plane, div_fac_map] : plane_maps) { + // Calculate Gaussian weights + for (auto& [apa_face, coord_idx_fac] : *div_fac_map) { + int apa = apa_face.first; + int face = apa_face.second; + + WirePlaneId wpid(kAllLayers, face, apa); + auto offset_it = wpid_offsets.find(wpid); + auto slope_it = wpid_slopes.find(wpid); + auto geom_it = wpid_geoms.find(wpid); + + auto offset_t = std::get<0>(offset_it->second); + auto offset_u = std::get<1>(offset_it->second); + auto offset_v = std::get<2>(offset_it->second); + auto offset_w = std::get<3>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yu = std::get<1>(slope_it->second).first; + auto slope_zu = std::get<1>(slope_it->second).second; + auto slope_yv = std::get<2>(slope_it->second).first; + auto slope_zv = std::get<2>(slope_it->second).second; + auto slope_yw = std::get<3>(slope_it->second).first; + auto slope_zw = std::get<3>(slope_it->second).second; + + auto time_tick_width = std::get<0>(geom_it->second); + auto pitch_u = std::get<1>(geom_it->second); + auto pitch_v = std::get<2>(geom_it->second); + auto pitch_w = std::get<3>(geom_it->second); + + + // double sum = 0; + std::map, double> map_tw_sum; + + // Calculate weights + for (auto& [coord_idx, fac] : coord_idx_fac) { + int time = std::get<0>(coord_idx); + int wire = std::get<1>(coord_idx); + int idx = std::get<2>(coord_idx); + + + + auto segment = pss_vec.at(idx).second; + auto cluster = segment->cluster(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + + auto test_wpid = m_dv->contained_by(pss_vec[idx].first); + // Initialization with its raw position + auto p_raw = transform->backward(pss_vec[idx].first, cluster_t0, test_wpid.face(), test_wpid.apa()); + + double central_t = slope_x * p_raw.x() + offset_t; + double central_ch = 0; + + if (plane == WirePlaneLayer_t::kUlayer) { + central_ch = slope_yu * p_raw.y() + slope_zu * p_raw.z() + offset_u; + } else if (plane == WirePlaneLayer_t::kVlayer) { + central_ch = slope_yv * p_raw.y() + slope_zv * p_raw.z() + offset_v; + } else if (plane == WirePlaneLayer_t::kWlayer) { + central_ch = slope_yw * p_raw.y() + slope_zw * p_raw.z() + offset_w; + } + + double pitch = (plane == WirePlaneLayer_t::kUlayer) ? pitch_u : + (plane == WirePlaneLayer_t::kVlayer) ? pitch_v : pitch_w; + + double factor = exp(-0.5 * (pow((central_t - time) * time_tick_width, 2) + pow((central_ch - wire) * pitch, 2)) / pow(div_sigma, 2)); + + // std::cout << plane << " " << time << " " << wire << " " << idx << " " << central_t << " " << central_ch << " " << factor << std::endl; + + fac = factor; + // sum += factor; + + auto [it, inserted] = map_tw_sum.try_emplace(std::make_pair(time, wire), factor); + if (!inserted) { + it->second += factor; + } + + } + + + + // Normalize weights + for (auto& [coord_idx, fac] : coord_idx_fac) { + double sum = map_tw_sum[std::make_pair(std::get<0>(coord_idx), std::get<1>(coord_idx))]; + fac /= sum; + // std::cout << plane << " " << std::get<0>(coord_idx) << " " << std::get<1>(coord_idx) << " " << fac << " " << sum << std::endl; + } + } + } + } + + + // Main fitting loop using Eigen + Eigen::VectorXd pos_3D(3 * pss_vec.size()); + + for (size_t i = 0; i < pss_vec.size(); i++) { + // Get 2D associations for this 3D point + const auto& point_info = m_3d_to_2d[i]; + + auto segment = pss_vec.at(i).second; + auto cluster = segment->cluster(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + + auto plane_data_u = point_info.get_plane_data(WirePlaneLayer_t::kUlayer); + auto plane_data_v = point_info.get_plane_data(WirePlaneLayer_t::kVlayer); + auto plane_data_w = point_info.get_plane_data(WirePlaneLayer_t::kWlayer); + + int n_2D_u = 2 * plane_data_u.associated_2d_points.size(); + int n_2D_v = 2 * plane_data_v.associated_2d_points.size(); + int n_2D_w = 2 * plane_data_w.associated_2d_points.size(); + + // std::cout << i << " " << n_2D_u << " " << n_2D_v << " " << n_2D_w << std::endl; + + Eigen::VectorXd temp_pos_3D(3), data_u_2D(n_2D_u), data_v_2D(n_2D_v), data_w_2D(n_2D_w); + Eigen::VectorXd temp_pos_3D_init(3); + Eigen::SparseMatrix RU(n_2D_u, 3); + Eigen::SparseMatrix RV(n_2D_v, 3); + Eigen::SparseMatrix RW(n_2D_w, 3); + + auto test_wpid = m_dv->contained_by(pss_vec[i].first); + // Initialization with its raw position + auto p_raw = transform->backward(pss_vec[i].first, cluster_t0, test_wpid.face(), test_wpid.apa()); + + temp_pos_3D_init(0) = p_raw.x(); + temp_pos_3D_init(1) = p_raw.y(); + temp_pos_3D_init(2) = p_raw.z(); + + // Initialize data vectors + data_u_2D.setZero(); + data_v_2D.setZero(); + data_w_2D.setZero(); + + // Fill U plane data + int index = 0; + for (auto it = plane_data_u.associated_2d_points.begin(); it != plane_data_u.associated_2d_points.end(); it++) { + + // Get charge measurement + CoordReadout charge_key(it->apa, it->time, it->channel); + double charge = m_params.default_charge_th, charge_err = m_params.default_charge_err; // Default values + + auto charge_it = m_charge_data.find(charge_key); + if (charge_it != m_charge_data.end()) { + charge = charge_it->second.charge; + charge_err = charge_it->second.charge_err; + } + + if (charge < m_params.default_charge_th) { + charge = m_params.default_charge_th; + charge_err = m_params.default_charge_err; + } + + // Get division factor + double div_factor = 1.0; + auto apa_face_key = std::make_pair(it->apa, it->face); + auto div_key = std::make_tuple(it->time, it->wire, (int)i); + auto div_it1 = map_Udiv_fac.find(apa_face_key); + if (div_it1 != map_Udiv_fac.end()) { + auto div_it2 = div_it1->second.find(div_key); + if (div_it2 != div_it1->second.end()) { + div_factor = div_it2->second; + } + } + + double scaling = (charge / charge_err) * div_factor; + + // Apply quality factor (simplified version) + if (plane_data_u.quantity < m_params.scaling_quality_th) { + if (plane_data_u.quantity != 0) { + scaling *= pow(plane_data_u.quantity / m_params.scaling_quality_th, 1); + } else { + scaling *= m_params.scaling_ratio; + } + } + + WirePlaneId wpid(kAllLayers, it->face, it->apa); + auto offset_it = wpid_offsets.find(wpid); + auto slope_it = wpid_slopes.find(wpid); + + auto offset_t = std::get<0>(offset_it->second); + auto offset_u = std::get<1>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yu = std::get<1>(slope_it->second).first; + auto slope_zu = std::get<1>(slope_it->second).second; + + if (scaling != 0) { + data_u_2D(2 * index) = scaling * (it->wire - offset_u); + data_u_2D(2 * index + 1) = scaling * (it->time - offset_t); + + RU.insert(2 * index, 1) = scaling * slope_yu; // Y --> U + RU.insert(2 * index, 2) = scaling * slope_zu; // Z --> U + RU.insert(2 * index + 1, 0) = scaling * slope_x; // X --> T + } + // std::cout << index << " U " << n_2D_u << std::endl; + index++; + } + + // std::cout << "Fill V " << std::endl; + // Fill V plane data (similar to U) + index = 0; + for (auto it = plane_data_v.associated_2d_points.begin(); it != plane_data_v.associated_2d_points.end(); it++) { + + // Get charge measurement + CoordReadout charge_key(it->apa, it->time, it->channel); + double charge = m_params.default_charge_th, charge_err = m_params.default_charge_err; // Default values + + auto charge_it = m_charge_data.find(charge_key); + if (charge_it != m_charge_data.end()) { + charge = charge_it->second.charge; + charge_err = charge_it->second.charge_err; + } + + if (charge < m_params.default_charge_th) { + charge = m_params.default_charge_th; + charge_err = m_params.default_charge_err; + } + + // Get division factor + double div_factor = 1.0; + auto apa_face_key = std::make_pair(it->apa, it->face); + auto div_key = std::make_tuple(it->time, it->wire, (int)i); + auto div_it1 = map_Vdiv_fac.find(apa_face_key); + if (div_it1 != map_Vdiv_fac.end()) { + auto div_it2 = div_it1->second.find(div_key); + if (div_it2 != div_it1->second.end()) { + div_factor = div_it2->second; + } + } + + double scaling = (charge / charge_err) * div_factor; + + // Apply quality factor (simplified version) + if (plane_data_v.quantity < m_params.scaling_quality_th) { + if (plane_data_v.quantity != 0) { + scaling *= pow(plane_data_v.quantity / m_params.scaling_quality_th, 1); + } else { + scaling *= m_params.scaling_ratio; + } + } + + WirePlaneId wpid(kAllLayers, it->face, it->apa); + auto offset_it = wpid_offsets.find(wpid); + auto slope_it = wpid_slopes.find(wpid); + + auto offset_t = std::get<0>(offset_it->second); + auto offset_v = std::get<2>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yv = std::get<2>(slope_it->second).first; + auto slope_zv = std::get<2>(slope_it->second).second; + + // std::cout << "Test: " << std::endl; + if (scaling != 0) { + data_v_2D(2 * index) = scaling * (it->wire - offset_v); + data_v_2D(2 * index + 1) = scaling * (it->time - offset_t); + + RV.insert(2 * index, 1) = scaling * slope_yv; // Y --> V + RV.insert(2 * index, 2) = scaling * slope_zv; // Z --> V + RV.insert(2 * index + 1, 0) = scaling * slope_x; // X --> T + } + // std::cout << index << " V " << n_2D_v << std::endl; + + index++; + } + + // std::cout << "Fill W " << std::endl; + + // Fill W plane data (similar to U and V) + index = 0; + for (auto it = plane_data_w.associated_2d_points.begin(); it != plane_data_w.associated_2d_points.end(); it++) { + + // Get charge measurement + CoordReadout charge_key(it->apa, it->time, it->channel); + double charge = m_params.default_charge_th, charge_err = m_params.default_charge_err; // Default values + + auto charge_it = m_charge_data.find(charge_key); + if (charge_it != m_charge_data.end()) { + charge = charge_it->second.charge; + charge_err = charge_it->second.charge_err; + } + + if (charge < m_params.default_charge_th) { + charge = m_params.default_charge_th; + charge_err = m_params.default_charge_err; + } + + // Get division factor + double div_factor = 1.0; + auto apa_face_key = std::make_pair(it->apa, it->face); + auto div_key = std::make_tuple(it->time, it->wire, (int)i); + auto div_it1 = map_Wdiv_fac.find(apa_face_key); + if (div_it1 != map_Wdiv_fac.end()) { + auto div_it2 = div_it1->second.find(div_key); + if (div_it2 != div_it1->second.end()) { + div_factor = div_it2->second; + } + } + + double scaling = (charge / charge_err) * div_factor; + + // Apply quality factor (simplified version) + if (plane_data_w.quantity < m_params.scaling_quality_th) { + if (plane_data_w.quantity != 0) { + scaling *= pow(plane_data_w.quantity / m_params.scaling_quality_th, 1); + } else { + scaling *= m_params.scaling_ratio; + } + } + + WirePlaneId wpid(kAllLayers, it->face, it->apa); + auto offset_it = wpid_offsets.find(wpid); + auto slope_it = wpid_slopes.find(wpid); + + auto offset_t = std::get<0>(offset_it->second); + auto offset_w = std::get<3>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yw = std::get<3>(slope_it->second).first; + auto slope_zw = std::get<3>(slope_it->second).second; + + if (scaling != 0) { + data_w_2D(2 * index) = scaling * (it->wire - offset_w); + data_w_2D(2 * index + 1) = scaling * (it->time - offset_t); + + RW.insert(2 * index, 1) = scaling * slope_yw; // Y --> W + RW.insert(2 * index, 2) = scaling * slope_zw; // Z --> W + RW.insert(2 * index + 1, 0) = scaling * slope_x; // X --> T + } + // std::cout << index << " W " << n_2D_w << std::endl; + + index++; + } + + // Solve the least squares problem + Eigen::SparseMatrix RUT = RU.transpose(); + Eigen::SparseMatrix RVT = RV.transpose(); + Eigen::SparseMatrix RWT = RW.transpose(); + + Eigen::BiCGSTAB> solver; + Eigen::VectorXd b = RUT * data_u_2D + RVT * data_v_2D + RWT * data_w_2D; + Eigen::SparseMatrix A = RUT * RU + RVT * RV + RWT * RW; + + solver.compute(A); + temp_pos_3D = solver.solveWithGuess(b, temp_pos_3D_init); + + // Store result or use initial position if solver failed + // these are raw positions ... + if (std::isnan(solver.error())) { + pos_3D(3 * i) = temp_pos_3D_init(0); + pos_3D(3 * i + 1) = temp_pos_3D_init(1); + pos_3D(3 * i + 2) = temp_pos_3D_init(2); + } else { + pos_3D(3 * i) = temp_pos_3D(0); + pos_3D(3 * i + 1) = temp_pos_3D(1); + pos_3D(3 * i + 2) = temp_pos_3D(2); + } + // std::cout << "Track Fitting: " << i << " " << temp_pos_3D(0) << " " << temp_pos_3D(1) << " " << temp_pos_3D(2) << " " << temp_pos_3D_init(0) << " " << temp_pos_3D_init(1) << " " << temp_pos_3D_init(2) << std::endl; + } + + // Clear and rebuild fine tracking path + fine_tracking_path.clear(); + pu.clear(); + pv.clear(); + pw.clear(); + pt.clear(); + paf.clear(); + + std::vector>> temp_fine_tracking_path; + std::vector > saved_paf; + int skip_count = 0; + + for (size_t i = 0; i < pss_vec.size(); i++) { + WireCell::Point p_raw(pos_3D(3 * i), pos_3D(3 * i + 1), pos_3D(3 * i + 2)); + auto segment = pss_vec.at(i).second; + auto cluster = segment->cluster(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + auto test_wpid = m_dv->contained_by(pss_vec[i].first); + + auto p = transform->forward(p_raw, cluster_t0, test_wpid.face(), test_wpid.apa()); + auto apa_face = std::make_pair(test_wpid.face(), test_wpid.apa()); + // all corrected points ... + bool flag_skip = skip_trajectory_point(p, apa_face, i, pss_vec, fine_tracking_path); + + // std::cout << "Skip: " << i << " " << flag_skip << std::endl; + // Protection against too many consecutive skips + if (flag_skip) { + skip_count++; + if (skip_count <= 3) { + continue; + } else { + skip_count = 0; + } + } + + // now all corrected points ... + temp_fine_tracking_path.push_back(pss_vec[i]); + fine_tracking_path.push_back(std::make_pair(p, segment)); + saved_paf.push_back(std::make_pair(test_wpid.face(), test_wpid.apa())); + } + + // Apply trajectory smoothing (simplified version of the area-based correction) + for (size_t i = 0; i < fine_tracking_path.size(); i++) { + bool flag_replace = false; + + // Check triangle area for smoothness (-1, +1 neighbors) + if (i != 0 && i + 1 != fine_tracking_path.size()) { + double a = sqrt(pow(fine_tracking_path[i-1].first.x() - fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i-1].first.y() - fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i-1].first.z() - fine_tracking_path[i].first.z(), 2)); + double b = sqrt(pow(fine_tracking_path[i+1].first.x() - fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i+1].first.y() - fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i+1].first.z() - fine_tracking_path[i].first.z(), 2)); + double c = sqrt(pow(fine_tracking_path[i-1].first.x() - fine_tracking_path[i+1].first.x(), 2) + + pow(fine_tracking_path[i-1].first.y() - fine_tracking_path[i+1].first.y(), 2) + + pow(fine_tracking_path[i-1].first.z() - fine_tracking_path[i+1].first.z(), 2)); + + double s = (a + b + c) / 2.0; + double area1 = sqrt(s * (s - a) * (s - b) * (s - c)); + + // Compare with original point + a = sqrt(pow(fine_tracking_path[i-1].first.x() - temp_fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i-1].first.y() - temp_fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i-1].first.z() - temp_fine_tracking_path[i].first.z(), 2)); + b = sqrt(pow(fine_tracking_path[i+1].first.x() - temp_fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i+1].first.y() - temp_fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i+1].first.z() - temp_fine_tracking_path[i].first.z(), 2)); + + s = (a + b + c) / 2.0; + double area2 = sqrt(s * (s - a) * (s - b) * (s - c)); + + if (area1 > m_params.area_ratio1 * c && area1 > m_params.area_ratio2 * area2) { + flag_replace = true; + } + } + + //-2, +1 + if ((!flag_replace) && i>=2 && i+1 != fine_tracking_path.size()){ + double a = sqrt(pow(fine_tracking_path.at(i-2).first.x() - fine_tracking_path.at(i).first.x(),2) + +pow(fine_tracking_path.at(i-2).first.y() - fine_tracking_path.at(i).first.y(),2) + +pow(fine_tracking_path.at(i-2).first.z() - fine_tracking_path.at(i).first.z(),2)); + double b = sqrt(pow(fine_tracking_path.at(i+1).first.x() - fine_tracking_path.at(i).first.x(),2) + +pow(fine_tracking_path.at(i+1).first.y() - fine_tracking_path.at(i).first.y(),2) + +pow(fine_tracking_path.at(i+1).first.z() - fine_tracking_path.at(i).first.z(),2)); + double c = sqrt(pow(fine_tracking_path.at(i-2).first.x() - fine_tracking_path.at(i+1).first.x(),2) + +pow(fine_tracking_path.at(i-2).first.y() - fine_tracking_path.at(i+1).first.y(),2) + +pow(fine_tracking_path.at(i-2).first.z() - fine_tracking_path.at(i+1).first.z(),2)); + double s = (a+b+c)/2.; + double area1 = sqrt(s*(s-a)*(s-b)*(s-c)); + + a = sqrt(pow(fine_tracking_path.at(i-2).first.x() - temp_fine_tracking_path.at(i).first.x(),2) + +pow(fine_tracking_path.at(i-2).first.y() - temp_fine_tracking_path.at(i).first.y(),2) + +pow(fine_tracking_path.at(i-2).first.z() - temp_fine_tracking_path.at(i).first.z(),2)); + b = sqrt(pow(fine_tracking_path.at(i+1).first.x() - temp_fine_tracking_path.at(i).first.x(),2) + +pow(fine_tracking_path.at(i+1).first.y() - temp_fine_tracking_path.at(i).first.y(),2) + +pow(fine_tracking_path.at(i+1).first.z() - temp_fine_tracking_path.at(i).first.z(),2)); + s = (a+b+c)/2.; + double area2 = sqrt(s*(s-a)*(s-b)*(s-c)); + //std::cout << i << " B " << area1/c << " " << area2/c << std::endl; + if (area1 > 1.8*units::mm * c && area1 > 1.7 * area2) flag_replace = true; + } + //-1, +2 + if ((!flag_replace) && i>0 && i+2 1.8*units::mm * c && area1 > 1.7 * area2) flag_replace = true; + } + + + if (flag_replace) { + fine_tracking_path[i] = temp_fine_tracking_path[i]; + } + + // std::cout << i << " " << flag_replace << " " << std::endl; + } + + // Generate 2D projections + pu.clear(); + pv.clear(); + pw.clear(); + pt.clear(); + paf.clear(); + for (size_t i = 0; i < fine_tracking_path.size(); i++) { + WireCell::Point p = fine_tracking_path[i].first; + auto segment = fine_tracking_path[i].second; + auto cluster = segment->cluster(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + + int apa = saved_paf.at(i).first; + int face = saved_paf.at(i).second; + + auto p_raw = transform->backward(p, cluster_t0, apa, face); + WirePlaneId wpid(kAllLayers, face, apa); + auto offset_it = wpid_offsets.find(wpid); + auto slope_it = wpid_slopes.find(wpid); + + auto offset_t = std::get<0>(offset_it->second); + auto offset_u = std::get<1>(offset_it->second); + auto offset_v = std::get<2>(offset_it->second); + auto offset_w = std::get<3>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yu = std::get<1>(slope_it->second).first; + auto slope_zu = std::get<1>(slope_it->second).second; + auto slope_yv = std::get<2>(slope_it->second).first; + auto slope_zv = std::get<2>(slope_it->second).second; + auto slope_yw = std::get<3>(slope_it->second).first; + auto slope_zw = std::get<3>(slope_it->second).second; + + pu.push_back(offset_u + (slope_yu * p_raw.y() + slope_zu * p_raw.z())); + pv.push_back(offset_v + (slope_yv * p_raw.y() + slope_zv * p_raw.z())); + pw.push_back(offset_w + (slope_yw * p_raw.y() + slope_zw * p_raw.z())); + pt.push_back(offset_t + slope_x * p_raw.x()); + paf.push_back(std::make_pair(apa, face)); + + } + + // std::cout << m_params.DL << std::endl; + + + // Update the input vector with the fitted results + pss_vec = fine_tracking_path; +} + +std::vector TrackFitting::examine_segment_trajectory(std::shared_ptr segment, std::vector& final_ps_vec, std::vector& init_ps_vec){ + // Create local trajectory data structures + std::vector>> pss_vec; + std::vector>> fine_tracking_path; + std::vector>> temp_fine_tracking_path; + std::vector> saved_paf; + + // Initialize pss_vec from input vectors + if (final_ps_vec.size() != init_ps_vec.size()) { + return std::vector(); // Return empty if sizes don't match + } + + for (size_t i = 0; i < final_ps_vec.size(); i++) { + pss_vec.push_back(std::make_pair(final_ps_vec[i], segment)); + } + + // First pass: apply skip_trajectory_point logic + int skip_count = 0; + for (size_t i = 0; i < pss_vec.size(); i++) { + WireCell::Point p = final_ps_vec[i]; + + // Get APA and face information + auto test_wpid = m_dv->contained_by(p); + auto apa_face = std::make_pair(test_wpid.apa(), test_wpid.face()); + + // Apply skip trajectory point check + bool flag_skip = skip_trajectory_point(p, apa_face, i, pss_vec, fine_tracking_path); + + // Vertex points (first and last) should not be skipped + if (i == 0 || i + 1 == final_ps_vec.size()) { + flag_skip = false; + } + + // Protection against too many consecutive skips + if (flag_skip) { + skip_count++; + if (skip_count <= 3) { + continue; + } else { + skip_count = 0; + } + } + + // Store points for trajectory smoothing + temp_fine_tracking_path.push_back(std::make_pair(init_ps_vec[i], segment)); + fine_tracking_path.push_back(std::make_pair(p, segment)); + saved_paf.push_back(std::make_pair(test_wpid.apa(), test_wpid.face())); + } + + // Second pass: Apply trajectory smoothing (area-based correction) + for (size_t i = 0; i < fine_tracking_path.size(); i++) { + bool flag_replace = false; + + // Check triangle area for smoothness (-1, +1 neighbors) + if (i != 0 && i + 1 != fine_tracking_path.size()) { + double a = sqrt(pow(fine_tracking_path[i-1].first.x() - fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i-1].first.y() - fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i-1].first.z() - fine_tracking_path[i].first.z(), 2)); + double b = sqrt(pow(fine_tracking_path[i+1].first.x() - fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i+1].first.y() - fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i+1].first.z() - fine_tracking_path[i].first.z(), 2)); + double c = sqrt(pow(fine_tracking_path[i-1].first.x() - fine_tracking_path[i+1].first.x(), 2) + + pow(fine_tracking_path[i-1].first.y() - fine_tracking_path[i+1].first.y(), 2) + + pow(fine_tracking_path[i-1].first.z() - fine_tracking_path[i+1].first.z(), 2)); + + double s = (a + b + c) / 2.0; + double area1 = sqrt(s * (s - a) * (s - b) * (s - c)); + + // Compare with original point + a = sqrt(pow(fine_tracking_path[i-1].first.x() - temp_fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i-1].first.y() - temp_fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i-1].first.z() - temp_fine_tracking_path[i].first.z(), 2)); + b = sqrt(pow(fine_tracking_path[i+1].first.x() - temp_fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i+1].first.y() - temp_fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i+1].first.z() - temp_fine_tracking_path[i].first.z(), 2)); + + s = (a + b + c) / 2.0; + double area2 = sqrt(s * (s - a) * (s - b) * (s - c)); + + if (area1 > 1.8*units::mm * c && area1 > 1.7 * area2) { + flag_replace = true; + } + } + + // -2, +1 neighbor check + if ((!flag_replace) && i >= 2 && i + 1 != fine_tracking_path.size()) { + double a = sqrt(pow(fine_tracking_path[i-2].first.x() - fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i-2].first.y() - fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i-2].first.z() - fine_tracking_path[i].first.z(), 2)); + double b = sqrt(pow(fine_tracking_path[i+1].first.x() - fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i+1].first.y() - fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i+1].first.z() - fine_tracking_path[i].first.z(), 2)); + double c = sqrt(pow(fine_tracking_path[i-2].first.x() - fine_tracking_path[i+1].first.x(), 2) + + pow(fine_tracking_path[i-2].first.y() - fine_tracking_path[i+1].first.y(), 2) + + pow(fine_tracking_path[i-2].first.z() - fine_tracking_path[i+1].first.z(), 2)); + double s = (a + b + c) / 2.0; + double area1 = sqrt(s * (s - a) * (s - b) * (s - c)); + + a = sqrt(pow(fine_tracking_path[i-2].first.x() - temp_fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i-2].first.y() - temp_fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i-2].first.z() - temp_fine_tracking_path[i].first.z(), 2)); + b = sqrt(pow(fine_tracking_path[i+1].first.x() - temp_fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i+1].first.y() - temp_fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i+1].first.z() - temp_fine_tracking_path[i].first.z(), 2)); + s = (a + b + c) / 2.0; + double area2 = sqrt(s * (s - a) * (s - b) * (s - c)); + + if (area1 > 1.8*units::mm * c && area1 > 1.7 * area2) { + flag_replace = true; + } + } + + // -1, +2 neighbor check + if ((!flag_replace) && i > 0 && i + 2 < fine_tracking_path.size()) { + double a = sqrt(pow(fine_tracking_path[i-1].first.x() - fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i-1].first.y() - fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i-1].first.z() - fine_tracking_path[i].first.z(), 2)); + double b = sqrt(pow(fine_tracking_path[i+2].first.x() - fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i+2].first.y() - fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i+2].first.z() - fine_tracking_path[i].first.z(), 2)); + double c = sqrt(pow(fine_tracking_path[i-1].first.x() - fine_tracking_path[i+2].first.x(), 2) + + pow(fine_tracking_path[i-1].first.y() - fine_tracking_path[i+2].first.y(), 2) + + pow(fine_tracking_path[i-1].first.z() - fine_tracking_path[i+2].first.z(), 2)); + double s = (a + b + c) / 2.0; + double area1 = sqrt(s * (s - a) * (s - b) * (s - c)); + + a = sqrt(pow(fine_tracking_path[i-1].first.x() - temp_fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i-1].first.y() - temp_fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i-1].first.z() - temp_fine_tracking_path[i].first.z(), 2)); + b = sqrt(pow(fine_tracking_path[i+2].first.x() - temp_fine_tracking_path[i].first.x(), 2) + + pow(fine_tracking_path[i+2].first.y() - temp_fine_tracking_path[i].first.y(), 2) + + pow(fine_tracking_path[i+2].first.z() - temp_fine_tracking_path[i].first.z(), 2)); + s = (a + b + c) / 2.0; + double area2 = sqrt(s * (s - a) * (s - b) * (s - c)); + + if (area1 > 1.8*units::mm * c && area1 > 1.7 * area2) { + flag_replace = true; + } + } + + // Replace with original point if flagged + if (flag_replace) { + fine_tracking_path[i] = temp_fine_tracking_path[i]; + } + } + + // Extract the final trajectory points + std::vector result_ps; + for (const auto& point_pair : fine_tracking_path) { + result_ps.push_back(point_pair.first); + } + + return result_ps; +} + + +bool TrackFitting::skip_trajectory_point(WireCell::Point& p, std::pair& apa_face, int i, std::vector>>& pss_vec, std::vector>>& fine_tracking_path){ + // Extract APA and face information + int apa = apa_face.first; + int face = apa_face.second; + + // Get geometry parameters for this APA/face + WirePlaneId wpid(kAllLayers, face, apa); + + auto offset_it = wpid_offsets.find(wpid); + auto slope_it = wpid_slopes.find(wpid); + + if (offset_it == wpid_offsets.end() || slope_it == wpid_slopes.end()) { + return false; // Can't process without geometry info + } + + // Extract offsets and slopes + double offset_t = std::get<0>(offset_it->second); + double offset_u = std::get<1>(offset_it->second); + double offset_v = std::get<2>(offset_it->second); + double offset_w = std::get<3>(offset_it->second); + + double slope_x = std::get<0>(slope_it->second); + double slope_yu = std::get<1>(slope_it->second).first; + double slope_zu = std::get<1>(slope_it->second).second; + double slope_yv = std::get<2>(slope_it->second).first; + double slope_zv = std::get<2>(slope_it->second).second; + double slope_yw = std::get<3>(slope_it->second).first; + double slope_zw = std::get<3>(slope_it->second).second; + + auto segment = pss_vec.at(i).second; + auto cluster = segment->cluster(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + + auto first_blob = cluster->children()[0]; + int cur_ntime_ticks = first_blob->slice_index_max() - first_blob->slice_index_min(); + + // Initialization with its raw position + auto p_raw = transform->backward(p, cluster_t0, face, apa); + // Calculate 2D projections for current point p + int t1 = std::round((offset_t + slope_x * p_raw.x())/cur_ntime_ticks) * cur_ntime_ticks; // this needs some fix ... + int u1 = std::round(offset_u + (slope_yu * p_raw.y() + slope_zu * p_raw.z())); + int v1 = std::round(offset_v + (slope_yv * p_raw.y() + slope_zv * p_raw.z())); + int w1 = std::round(offset_w + (slope_yw * p_raw.y() + slope_zw * p_raw.z())); + + // // test ... + // auto cur_u = m_grouping->convert_3Dpoint_time_ch(p_raw, 0, 0, 0); + // auto cur_v = m_grouping->convert_3Dpoint_time_ch(p_raw, 0, 0, 1); + // auto cur_w = m_grouping->convert_3Dpoint_time_ch(p_raw, 0, 0, 2); + // // std::cout << t1 << " " << u1 << " " << v1 << " " << w1 << " " << std::get<0>(cur_u) << " " << std::get<1>(cur_u) << " " << std::get<1>(cur_v) << " " << std::get<1>(cur_w) << std::endl; + + + + // Calculate 2D projections for comparison point pss_vec[i] + WireCell::Point ps_point = pss_vec.at(i).first; + auto ps_point_raw = transform->backward(ps_point, cluster_t0, face, apa); + int t2 = std::round((offset_t + slope_x * ps_point_raw.x())/cur_ntime_ticks)*cur_ntime_ticks; // this needs some fix ... + int u2 = std::round(offset_u + (slope_yu * ps_point_raw.y() + slope_zu * ps_point_raw.z())); + int v2 = std::round(offset_v + (slope_yv * ps_point_raw.y() + slope_zv * ps_point_raw.z())); + int w2 = std::round(offset_w + (slope_yw * ps_point_raw.y() + slope_zw * ps_point_raw.z())); + + // Helper lambda to get charge from nearby coordinates + auto get_charge_sum = [&](int wire, int time, WirePlaneLayer_t plane) -> double { + double charge_sum = 0.0; + + // std::cout << m_charge_data.size() << std::endl; + // for (const auto& [coord_key, charge_measurement] : m_charge_data) { + // int apa = coord_key.apa; + // int time = coord_key.time; + // int channel = coord_key.channel; + // // Get wires for this channel + // std::cout << "apa: " << apa << ", time: " << time << ", channel: " << channel << std::endl; + // } + + // Convert WirePlaneLayer_t to plane number: kUlayer(1)->0, kVlayer(2)->1, kWlayer(4)->2 + int plane_num = -1; + if (plane == kUlayer) plane_num = 0; + else if (plane == kVlayer) plane_num = 1; + else if (plane == kWlayer) plane_num = 2; + + // Search in a 3x3 neighborhood (±1 in wire and time) + // Only consider the center, wire ±1, and time ±1 (total 5 combinations) + for (int dw = -1; dw <= 1; dw++) { + int channel = get_channel_for_wire(apa, face, plane_num, wire + dw); + if (channel < 0) continue; + // Center (dt = 0) + { + CoordReadout charge_key(apa, time, channel); + auto charge_it = m_charge_data.find(charge_key); + if (charge_it != m_charge_data.end() && charge_it->second.flag != 0) { + charge_sum += charge_it->second.charge; + } + } + } + // Time -1 + { + int channel = get_channel_for_wire(apa, face, plane_num, wire); + if (channel >= 0) { + CoordReadout charge_key(apa, time - cur_ntime_ticks, channel); + auto charge_it = m_charge_data.find(charge_key); + if (charge_it != m_charge_data.end() && charge_it->second.flag != 0) { + charge_sum += charge_it->second.charge; + } + } + } + // Time +1 + { + int channel = get_channel_for_wire(apa, face, plane_num, wire); + if (channel >= 0) { + CoordReadout charge_key(apa, time + cur_ntime_ticks, channel); + auto charge_it = m_charge_data.find(charge_key); + if (charge_it != m_charge_data.end() && charge_it->second.flag != 0) { + charge_sum += charge_it->second.charge; + } + } + } + + return charge_sum; + }; + + // Get charges for point p (c1) + double c1_u = get_charge_sum(u1, t1, kUlayer); + double c1_v = get_charge_sum(v1, t1, kVlayer); + double c1_w = get_charge_sum(w1, t1, kWlayer); + + // Get charges for comparison point (c2) + double c2_u = get_charge_sum(u2, t2, kUlayer); + double c2_v = get_charge_sum(v2, t2, kVlayer); + double c2_w = get_charge_sum(w2, t2, kWlayer); + + // std::cout << "Skip inside: " << t1 << " " << u1 << " " << v1 << " " << w1 << " | " << t2 << " " << u2 << " " << v2 << " " << w2 << " | " << c1_u << " " << c1_v << " " << c1_w << " " << c2_u << " " << c2_v << " " << c2_w << std::endl; + + // Calculate charge ratios + double ratio = 0; + double ratio_1 = 1; + + // U plane ratio + if (c2_u != 0) { + ratio += c1_u / c2_u; + if (c1_u != 0) + ratio_1 *= c1_u / c2_u; + else + ratio_1 *= m_params.skip_default_ratio_1; + } else { + ratio += 1; + } + + // V plane ratio + if (c2_v != 0) { + ratio += c1_v / c2_v; + if (c1_v != 0) + ratio_1 *= c1_v / c2_v; + else + ratio_1 *= m_params.skip_default_ratio_1; + } else { + ratio += 1; + } + + // W plane ratio + if (c2_w != 0) { + ratio += c1_w / c2_w; + if (c1_w != 0) + ratio_1 *= c1_w / c2_w; + else + ratio_1 *= m_params.skip_default_ratio_1; + } else { + ratio += 1; + } + + // std::cout << "Inside: " << ratio << " " << ratio_1 << std::endl; + + // Apply charge-based correction + if (ratio / 3.0 < m_params.skip_ratio_cut || ratio_1 < m_params.skip_ratio_1_cut) { + p = ps_point; + } + + // Angle constraint checking + if (fine_tracking_path.size() >= 2) { + // Get direction vectors for angle calculation + auto& last_point = fine_tracking_path[fine_tracking_path.size()-1].first; + auto& second_last_point = fine_tracking_path[fine_tracking_path.size()-2].first; + + // Vector from second-to-last to last point in fine tracking path + WireCell::Vector v1(last_point.x() - second_last_point.x(), + last_point.y() - second_last_point.y(), + last_point.z() - second_last_point.z()); + + // Vector from last point to current point p + WireCell::Vector v2(p.x() - last_point.x(), + p.y() - last_point.y(), + p.z() - last_point.z()); + + // std::cout << ratio << " " << ratio_1 << " (" << p.x() << " " << p.y() << " " < 0 && mag2 > 0) { + double cos_angle = dot_product / (mag1 * mag2); + // Clamp to [-1, 1] to handle numerical errors + cos_angle = std::max(-1.0, std::min(1.0, cos_angle)); + angle = acos(cos_angle) * 180.0 / M_PI; + } + + // Calculate angle between consecutive segments in original path for comparison + double angle1 = 180.0; + if (i >= 2) { + auto& p_i = pss_vec[i].first; + auto& p_i1 = pss_vec[i-1].first; + auto& p_i2 = pss_vec[i-2].first; + + WireCell::Vector v3(p_i1.x() - p_i2.x(), p_i1.y() - p_i2.y(), p_i1.z() - p_i2.z()); + WireCell::Vector v4(p_i.x() - p_i1.x(), p_i.y() - p_i1.y(), p_i.z() - p_i1.z()); + + double dot_34 = v3.dot(v4); + double mag3 = sqrt(v3.x()*v3.x() + v3.y()*v3.y() + v3.z()*v3.z()); + double mag4 = sqrt(v4.x()*v4.x() + v4.y()*v4.y() + v4.z()*v4.z()); + + if (mag3 > 0 && mag4 > 0) { + double cos_angle1 = dot_34 / (mag3 * mag4); + cos_angle1 = std::max(-1.0, std::min(1.0, cos_angle1)); + angle1 = acos(cos_angle1) * 180.0 / M_PI; + } + } + + // Get hit information for dead channel detection + // Check if we have valid 3D to 2D mapping for current point index + bool has_u_hits = false, has_v_hits = false, has_w_hits = false; + if (m_3d_to_2d.find(i) != m_3d_to_2d.end()) { + const auto& point_info = m_3d_to_2d.at(i); + has_u_hits = point_info.get_plane_data(kUlayer).quantity > 0; + has_v_hits = point_info.get_plane_data(kVlayer).quantity > 0; + has_w_hits = point_info.get_plane_data(kWlayer).quantity > 0; + } + + // Check for dead channel conditions + int dead_plane_count = 0; + if (!has_u_hits) dead_plane_count++; + if (!has_v_hits) dead_plane_count++; + if (!has_w_hits) dead_plane_count++; + + // std::cout << "Inside: " << angle << " " << angle1 << " " << dead_plane_count << " " << mag2 << std::endl; + + if (angle > m_params.skip_angle_cut_3 && dead_plane_count >= 2) { + return true; + } + + // Check for fold-back or extreme angles + if (angle > m_params.skip_angle_cut_1 || angle > angle1 + m_params.skip_angle_cut_2) { + return true; + } + + // Check last point protection + if (i + 1 == pss_vec.size() && angle > m_params.skip_angle_cut_3 && mag2 < m_params.skip_dis_cut) { + return true; + } + } + + + + return false; + + } + + + double TrackFitting::cal_gaus_integral(int tbin, int wbin, double t_center, double t_sigma, + double w_center, double w_sigma, int flag, double nsigma, int cur_ntime_ticks) { + // flag = 0: no boundary effect, pure Gaussian, time or collection plane + // flag = 1: taking into account boundary effect for induction plane + // flag = 2: more complex induction plane response + + double result = 0; + + // *** COORDINATE SYSTEM CLARIFICATION *** + // In this toolkit convention: + // - w_center = offset_u + (slope_yu * p.y + slope_zu * p.z) [continuous coordinate] + // - wbin = std::round(w_center) [bin index - nearest integer] + // - t_center = offset_t + slope_t * p.x [continuous coordinate] + // - tbin = std::round(t_center) [bin index - nearest integer] + // Therefore: compare tbin vs t_center and wbin vs w_center DIRECTLY + + // Check if we're within nsigma range of both time and wire centers + if (fabs(tbin - t_center) <= nsigma * t_sigma && // Direct comparison: bin index vs continuous center + fabs(wbin - w_center) <= nsigma * w_sigma) { // Direct comparison: bin index vs continuous center + + // Time dimension integration + // If tbin = std::round(t_center), then bin spans [tbin-0.5, tbin+0.5] + result = 0.5 * (std::erf((tbin + 0.5*cur_ntime_ticks - t_center) / sqrt(2.) / t_sigma) - + std::erf((tbin - 0.5*cur_ntime_ticks - t_center) / sqrt(2.) / t_sigma)); + + if (flag == 0) { + // Pure Gaussian case - simple wire dimension integration + // If wbin = std::round(w_center), then bin spans [wbin-0.5, wbin+0.5] + result *= 0.5 * (std::erf((wbin + 0.5 - w_center) / sqrt(2.) / w_sigma) - + std::erf((wbin - 0.5 - w_center) / sqrt(2.) / w_sigma)); + + // std::cout << tbin << " " << t_center << " " << t_sigma << " " << 0.5 * (std::erf((tbin + 0.5*cur_ntime_ticks - t_center) / sqrt(2.) / t_sigma) - + // std::erf((tbin - 0.5*cur_ntime_ticks - t_center) / sqrt(2.) / t_sigma)) << " | " << 0.5 * (std::erf((wbin + 0.5 - w_center) / sqrt(2.) / w_sigma) - + // std::erf((wbin - 0.5 - w_center) / sqrt(2.) / w_sigma)) << std::endl; + + } else if (flag == 1) { + // Induction plane with bipolar response + // All boundaries shift by -0.5 due to bin convention change + + // First part: positive lobe (was wbin+0.5 to wbin+1.5, now wbin+0.0 to wbin+1.0) + double x2 = wbin + 1.0; // was wbin + 1.5, shifted by -0.5 + double x1 = wbin + 0.0; // was wbin + 0.5, shifted by -0.5 (bin center) + double x0 = w_center; + + double content1 = 0.5 * (std::erf((x2 - x0) / sqrt(2.) / w_sigma) - + std::erf((x1 - x0) / sqrt(2.) / w_sigma)); + + // Weight calculation for positive lobe + double w1 = -pow(w_sigma, 2) / (-1) / sqrt(2. * 3.1415926) / w_sigma * + (exp(-pow(x0 - x2, 2) / 2. / pow(w_sigma, 2)) - + exp(-pow(x0 - x1, 2) / 2. / pow(w_sigma, 2))) / + (0.5 * std::erf((x2 - x0) / sqrt(2.) / w_sigma) - + 0.5 * std::erf((x1 - x0) / sqrt(2.) / w_sigma)) + + (x0 - x2) / (-1); + + // Second part: negative lobe (was wbin-0.5 to wbin+0.5, now wbin-1.0 to wbin+0.0) + x2 = wbin + 0.0; // was wbin + 0.5, shifted by -0.5 (bin center) + x1 = wbin - 1.0; // was wbin - 0.5, shifted by -0.5 + + double content2 = 0.5 * (std::erf((x2 - x0) / sqrt(2.) / w_sigma) - + std::erf((x1 - x0) / sqrt(2.) / w_sigma)); + + // Weight calculation for negative lobe + double w2 = -pow(w_sigma, 2) / (-1) / sqrt(2. * 3.1415926) / w_sigma * + (exp(-pow(x0 - x2, 2) / 2. / pow(w_sigma, 2)) - + exp(-pow(x0 - x1, 2) / 2. / pow(w_sigma, 2))) / + (0.5 * std::erf((x2 - x0) / sqrt(2.) / w_sigma) - + 0.5 * std::erf((x1 - x0) / sqrt(2.) / w_sigma)) + + (x0 - x2) / (-1); + + // Combine positive and negative contributions + result *= (content1 * w1 + content2 * (1 - w2)); + + } else if (flag == 2) { + // More complex induction response with multiple components + // All boundaries shift by -0.5 due to bin convention change + double sum = 0; + + // Component 1: (was wbin+0.5 to wbin+1.0, now wbin+0.0 to wbin+0.5) + double x2 = wbin + 0.5; // was wbin + 1.0, shifted by -0.5 + double x1 = wbin + 0.0; // was wbin + 0.5, shifted by -0.5 (bin center) + double x0 = w_center; + + double content1 = 0.5 * (std::erf((x2 - x0) / sqrt(2.) / w_sigma) - + std::erf((x1 - x0) / sqrt(2.) / w_sigma)); + double w1 = -pow(w_sigma, 2) / (-1) / sqrt(2. * 3.1415926) / w_sigma * + (exp(-pow(x0 - x2, 2) / 2. / pow(w_sigma, 2)) - + exp(-pow(x0 - x1, 2) / 2. / pow(w_sigma, 2))) / + (0.5 * std::erf((x2 - x0) / sqrt(2.) / w_sigma) - + 0.5 * std::erf((x1 - x0) / sqrt(2.) / w_sigma)) + + (x0 - x2) / (-1); + + sum += content1 * (0.545 + 0.697 * w1); + + // Component 2: (was wbin+1.0 to wbin+1.5, now wbin+0.5 to wbin+1.0) + x2 = wbin + 1.0; // was wbin + 1.5, shifted by -0.5 + x1 = wbin + 0.5; // was wbin + 1.0, shifted by -0.5 + + content1 = 0.5 * (std::erf((x2 - x0) / sqrt(2.) / w_sigma) - + std::erf((x1 - x0) / sqrt(2.) / w_sigma)); + w1 = -pow(w_sigma, 2) / (-1) / sqrt(2. * 3.1415926) / w_sigma * + (exp(-pow(x0 - x2, 2) / 2. / pow(w_sigma, 2)) - + exp(-pow(x0 - x1, 2) / 2. / pow(w_sigma, 2))) / + (0.5 * std::erf((x2 - x0) / sqrt(2.) / w_sigma) - + 0.5 * std::erf((x1 - x0) / sqrt(2.) / w_sigma)) + + (x0 - x2) / (-1); + + sum += content1 * (0.11364 + 0.1 * w1); + + // Component 3: (was wbin+0.0 to wbin+0.5, now wbin-0.5 to wbin+0.0) + x2 = wbin + 0.0; // was wbin + 0.5, shifted by -0.5 (bin center) + x1 = wbin - 0.5; // was wbin + 0.0, shifted by -0.5 + + content1 = 0.5 * (std::erf((x2 - x0) / sqrt(2.) / w_sigma) - + std::erf((x1 - x0) / sqrt(2.) / w_sigma)); + w1 = -pow(w_sigma, 2) / (-1) / sqrt(2. * 3.1415926) / w_sigma * + (exp(-pow(x0 - x2, 2) / 2. / pow(w_sigma, 2)) - + exp(-pow(x0 - x1, 2) / 2. / pow(w_sigma, 2))) / + (0.5 * std::erf((x2 - x0) / sqrt(2.) / w_sigma) - + 0.5 * std::erf((x1 - x0) / sqrt(2.) / w_sigma)) + + (x0 - x2) / (-1); + + sum += content1 * (0.545 + 0.697 * (1 - w1)); + + // Component 4: (was wbin-0.5 to wbin+0.0, now wbin-1.0 to wbin-0.5) + x2 = wbin - 0.5; // was wbin + 0.0, shifted by -0.5 + x1 = wbin - 1.0; // was wbin - 0.5, shifted by -0.5 + + content1 = 0.5 * (std::erf((x2 - x0) / sqrt(2.) / w_sigma) - + std::erf((x1 - x0) / sqrt(2.) / w_sigma)); + w1 = -pow(w_sigma, 2) / (-1) / sqrt(2. * 3.1415926) / w_sigma * + (exp(-pow(x0 - x2, 2) / 2. / pow(w_sigma, 2)) - + exp(-pow(x0 - x1, 2) / 2. / pow(w_sigma, 2))) / + (0.5 * std::erf((x2 - x0) / sqrt(2.) / w_sigma) - + 0.5 * std::erf((x1 - x0) / sqrt(2.) / w_sigma)) + + (x0 - x2) / (-1); + + sum += content1 * (0.11364 + 0.1 * (1 - w1)); + + result *= sum; + } + } + + return result; +} + + +double TrackFitting::cal_gaus_integral_seg(int tbin, int wbin, std::vector& t_centers, std::vector& t_sigmas, std::vector& w_centers, std::vector& w_sigmas, std::vector& weights, int flag, double nsigma, int cur_ntime_ticks){ + double result = 0; + double result1 = 0; + + for (size_t i=0;i!=t_centers.size();i++){ + result += cal_gaus_integral(tbin,wbin,t_centers.at(i), t_sigmas.at(i), w_centers.at(i), w_sigmas.at(i),flag,nsigma,cur_ntime_ticks) * weights.at(i); + result1 += weights.at(i); + + // std::cout << cal_gaus_integral(tbin,wbin,t_centers.at(i), t_sigmas.at(i), w_centers.at(i), w_sigmas.at(i),flag,nsigma,cur_ntime_ticks) << " " << weights.at(i) << std::endl; + } + + result /= result1; + + return result; +} + + +void TrackFitting::update_dQ_dx_data() { + // Step 1: Loop over m_clusters to collect all track blobs + std::set track_blobs_set; + for (auto cluster : m_clusters) { + // Collect blobs from each cluster using toolkit convention + for (auto blob : cluster->children()) { + track_blobs_set.insert(blob); + } + } + + // Step 2: Check each measurement in global_rb_map + for (const auto& [coord_key, blob_set] : global_rb_map) { + // coord_key is of type CoordReadout + // blob_set is of type std::set + + bool is_shared = false; + + for (auto blob : blob_set) { + if (track_blobs_set.find(blob) == track_blobs_set.end()) { + // Found a blob not belonging to our track clusters + is_shared = true; + break; + } + } + + if (is_shared) { + // Find and modify the measurement if it exists + auto charge_it = m_charge_data.find(coord_key); + if (charge_it != m_charge_data.end()) { + // Get current measurement and increase charge error for shared measurements + ChargeMeasurement& measurement = charge_it->second; + m_orig_charge_data[coord_key] = measurement; + measurement.charge_err = m_params.share_charge_err; // High penalty for shared wires + } + } + } +} + +void TrackFitting::recover_original_charge_data(){ + for (const auto& [coord_key, measurement] : m_orig_charge_data) { + m_charge_data[coord_key] = measurement; + } + m_orig_charge_data.clear(); +} + +std::vector> TrackFitting::calculate_compact_matrix_multi(std::vector >& connected_vec,Eigen::SparseMatrix& weight_matrix, const Eigen::SparseMatrix& response_matrix_transpose, int n_2d_measurements, int n_3d_positions, double cut_position){ + // Initialize results vector - returns sharing ratios for each 3D position + std::vector> results(n_3d_positions); + + // Initialize count vector for 2D measurements + std::vector count_2d(n_2d_measurements, 1); + + // Maps for storing relationships between 2D and 3D indices + std::map> map_2d_to_3d; + std::map> map_3d_to_2d; + std::map, double> map_pair_values; + + // Build mapping structures by iterating through sparse matrix + for (int k = 0; k < response_matrix_transpose.outerSize(); ++k) { + int count = 0; + + for (Eigen::SparseMatrix::InnerIterator it(response_matrix_transpose, k); it; ++it) { + int row = it.row(); + int col = it.col(); + double value = it.value(); + + // Build 2D to 3D mapping + if (map_2d_to_3d.find(col) != map_2d_to_3d.end()) { + map_2d_to_3d[col].insert(row); + } else { + std::set temp_set; + temp_set.insert(row); + map_2d_to_3d[col] = temp_set; + } + + // Build 3D to 2D mapping + if (map_3d_to_2d.find(row) != map_3d_to_2d.end()) { + map_3d_to_2d[row].insert(col); + } else { + std::set temp_set; + temp_set.insert(col); + map_3d_to_2d[row] = temp_set; + } + + // Store pair values for later lookup + map_pair_values[std::make_pair(row, col)] = value; + count++; + } + + count_2d.at(k) = count; + } + + // Calculate average count for 3D positions + std::vector> average_count(n_3d_positions); + for (auto it = map_3d_to_2d.begin(); it != map_3d_to_2d.end(); ++it) { + int row = it->first; + double sum1 = 0.0; + double sum2 = 0.0; + int flag = 0; + + for (auto it1 = it->second.begin(); it1 != it->second.end(); ++it1) { + int col = *it1; + double val = map_pair_values[std::make_pair(row, col)]; + sum1 += count_2d[col] * val; + sum2 += val; + if (count_2d[col] > 2) { + flag = 1; + } + } + average_count.at(row) = std::make_pair(sum1 / sum2, flag); + } + + // Update 2D measurement weights based on 3D position sharing + for (auto it = map_2d_to_3d.begin(); it != map_2d_to_3d.end(); ++it) { + int col = it->first; + double sum1 = 0.0; + double sum2 = 0.0; + int flag = 0; + + for (auto it1 = it->second.begin(); it1 != it->second.end(); ++it1) { + int row = *it1; + double val = map_pair_values[std::make_pair(row, col)]; + if (average_count.at(row).second == 1) { + flag = 1; + } + sum1 += average_count.at(row).first * val; + sum2 += val; + } + + // Adjust weight matrix coefficients based on sharing criteria + if (flag == 1 && weight_matrix.coeffRef(col, col) == 1 && sum1 > cut_position * sum2) { + weight_matrix.coeffRef(col, col) = std::pow(1.0 / (sum1 / sum2 - cut_position + 1), 2); + } + } + + // Calculate sharing ratios between connected 3D positions (key difference from regular version) + for (auto it = map_3d_to_2d.begin(); it != map_3d_to_2d.end(); ++it) { + int row = it->first; + + // Skip if row is out of bounds for connected_vec + if (row >= static_cast(connected_vec.size())) continue; + + // For each connected neighbor defined in connected_vec + for (size_t i = 0; i < connected_vec.at(row).size(); i++) { + double sum[2] = {0.0, 0.0}; + + // Find the connected neighbor + auto it1 = map_3d_to_2d.find(connected_vec.at(row).at(i)); + + // Count total connections for current 3D position + for (auto it3 = it->second.begin(); it3 != it->second.end(); ++it3) { + sum[0] += 1.0; // Total count (using 1 instead of val as in WCP) + } + + // Count shared connections with this connected neighbor + if (it1 != map_3d_to_2d.end()) { + std::vector common_results(it->second.size()); + auto it3 = std::set_intersection( + it->second.begin(), it->second.end(), + it1->second.begin(), it1->second.end(), + common_results.begin() + ); + common_results.resize(it3 - common_results.begin()); + + for (auto it4 = common_results.begin(); it4 != common_results.end(); ++it4) { + sum[1] += 1.0; // Shared count (using 1 instead of val as in WCP) + } + } + + // Calculate sharing ratio for this connected neighbor + results.at(row).push_back(sum[1] / (sum[0] + 1e-9)); + } + } + + // Ensure all result vectors have the correct size + for (size_t i = 0; i < results.size(); i++) { + if (i < connected_vec.size() && results.at(i).size() != connected_vec.at(i).size()) { + results.at(i).resize(connected_vec.at(i).size(), 0.0); + } + } + + // Convert to the expected return type (pair format for compatibility) + // Note: The WCP version returns vector>, but our signature expects vector> + // We'll return the first two sharing ratios as a pair, or (0,0) if less than 2 connections + std::vector> pair_results(results.size()); + for (size_t i = 0; i < results.size(); i++) { + double first = (results[i].size() > 0) ? results[i][0] : 0.0; + double second = (results[i].size() > 1) ? results[i][1] : 0.0; + pair_results[i] = std::make_pair(first, second); + } + + return pair_results; +} + + +std::vector> TrackFitting::calculate_compact_matrix( + Eigen::SparseMatrix& weight_matrix, + const Eigen::SparseMatrix& response_matrix_transpose, + int n_2d_measurements, + int n_3d_positions, + double cut_position){ + std::vector > results(n_3d_positions, std::make_pair(0,0)); + + // Initialize count vector for 2D measurements + std::vector count_2d(n_2d_measurements, 1); + + // Maps for storing relationships between 2D and 3D indices + std::map> map_2d_to_3d; + std::map> map_3d_to_2d; + std::map, double> map_pair_values; + + // Build mapping structures by iterating through sparse matrix + for (int k = 0; k < response_matrix_transpose.outerSize(); ++k) { + int count = 0; + + for (Eigen::SparseMatrix::InnerIterator it(response_matrix_transpose, k); it; ++it) { + int row = it.row(); + int col = it.col(); + double value = it.value(); + + // std::cout << "Row: " << row << ", Col: " << col << ", Value: " << value << std::endl; + + // Build 2D to 3D mapping + if (map_2d_to_3d.find(col) != map_2d_to_3d.end()) { + map_2d_to_3d[col].insert(row); + } else { + std::set temp_set; + temp_set.insert(row); + map_2d_to_3d[col] = temp_set; + } + + // Build 3D to 2D mapping + if (map_3d_to_2d.find(row) != map_3d_to_2d.end()) { + map_3d_to_2d[row].insert(col); + } else { + std::set temp_set; + temp_set.insert(col); + map_3d_to_2d[row] = temp_set; + } + + // Store pair values for later lookup + map_pair_values[std::make_pair(row, col)] = value; + count++; + } + + count_2d.at(k) = count; + } + + // Calculate average count for 3D positions + std::vector> average_count(n_3d_positions); + for (auto it = map_3d_to_2d.begin(); it != map_3d_to_2d.end(); ++it) { + int row = it->first; + double sum1 = 0.0; + double sum2 = 0.0; + int flag = 0; + + for (auto it1 = it->second.begin(); it1 != it->second.end(); ++it1) { + int col = *it1; + double val = map_pair_values[std::make_pair(row, col)]; + sum1 += count_2d[col] * val; + sum2 += val; + if (count_2d[col] > 2) { + flag = 1; + } + } + // std::cout << row << " " << sum1 << " " << sum2 << " " << flag << std::endl; + average_count.at(row) = std::make_pair(sum1 / sum2, flag); + } + + // Update 2D measurement weights based on 3D position sharing + for (auto it = map_2d_to_3d.begin(); it != map_2d_to_3d.end(); ++it) { + int col = it->first; + double sum1 = 0.0; + double sum2 = 0.0; + int flag = 0; + + for (auto it1 = it->second.begin(); it1 != it->second.end(); ++it1) { + int row = *it1; + double val = map_pair_values[std::make_pair(row, col)]; + if (average_count.at(row).second == 1) { + flag = 1; + } + sum1 += average_count.at(row).first * val; + sum2 += val; + } + + // Adjust weight matrix coefficients based on sharing criteria + if (flag == 1 && weight_matrix.coeffRef(col, col) == 1 && sum1 > cut_position * sum2) { + weight_matrix.coeffRef(col, col) = std::pow(1.0 / (sum1 / sum2 - cut_position + 1), 2); + } + } + + // Calculate sharing ratios between neighboring 3D positions + + for (auto it = map_3d_to_2d.begin(); it != map_3d_to_2d.end(); ++it) { + int row = it->first; + auto it_prev = map_3d_to_2d.find(row - 1); + auto it_next = map_3d_to_2d.find(row + 1); + + double sum[3] = {0.0, 0.0, 0.0}; + + // Count total connections for current 3D position + for (auto it3 = it->second.begin(); it3 != it->second.end(); ++it3) { + sum[0] += 1.0; // Total count + } + + // Count shared connections with previous neighbor + if (it_prev != map_3d_to_2d.end()) { + std::vector common_results(it->second.size()); + auto it3 = std::set_intersection( + it->second.begin(), it->second.end(), + it_prev->second.begin(), it_prev->second.end(), + common_results.begin() + ); + common_results.resize(it3 - common_results.begin()); + + for (auto it4 = common_results.begin(); it4 != common_results.end(); ++it4) { + sum[1] += 1.0; // Shared with previous + } + } + + // Count shared connections with next neighbor + if (it_next != map_3d_to_2d.end()) { + std::vector common_results(it->second.size()); + auto it3 = std::set_intersection( + it->second.begin(), it->second.end(), + it_next->second.begin(), it_next->second.end(), + common_results.begin() + ); + common_results.resize(it3 - common_results.begin()); + + for (auto it4 = common_results.begin(); it4 != common_results.end(); ++it4) { + sum[2] += 1.0; // Shared with next + } + } + + // std::cout << row << " " << sum[0] << " " << sum[1] << " " << sum[2] << std::endl; + + // Calculate overlap ratios + if (sum[0] > 0) { + results.at(row).first = sum[1] / sum[0]; // Previous neighbor ratio + results.at(row).second = sum[2] / sum[0]; // Next neighbor ratio + } + } + + return results; +} + +void TrackFitting::dQ_dx_fill(double dis_end_point_ext) { + if (fine_tracking_path.size() <= 1) return; + + // Resize vectors to match fine_tracking_path size + dQ.resize(fine_tracking_path.size(), 0); + dx.resize(fine_tracking_path.size(), 0); + reduced_chi2.resize(fine_tracking_path.size(), 0); + + // Loop through each point in the fine tracking path + for (size_t i = 0; i != fine_tracking_path.size(); i++) { + WireCell::Point curr_rec_pos = fine_tracking_path.at(i).first; + WireCell::Point prev_rec_pos, next_rec_pos; + + if (i == 0) { + // First point: extrapolate backward from the direction to next point + next_rec_pos = WireCell::Point( + (fine_tracking_path.at(i).first.x() + fine_tracking_path.at(i+1).first.x()) / 2.0, + (fine_tracking_path.at(i).first.y() + fine_tracking_path.at(i+1).first.y()) / 2.0, + (fine_tracking_path.at(i).first.z() + fine_tracking_path.at(i+1).first.z()) / 2.0 + ); + + double length = sqrt( + pow(fine_tracking_path.at(i+1).first.x() - fine_tracking_path.at(i).first.x(), 2) + + pow(fine_tracking_path.at(i+1).first.y() - fine_tracking_path.at(i).first.y(), 2) + + pow(fine_tracking_path.at(i+1).first.z() - fine_tracking_path.at(i).first.z(), 2) + ); + + if (length == 0) { + prev_rec_pos = fine_tracking_path.at(i).first; + } else { + prev_rec_pos = WireCell::Point( + fine_tracking_path.at(i).first.x() - (fine_tracking_path.at(i+1).first.x() - fine_tracking_path.at(i).first.x()) / length * dis_end_point_ext, + fine_tracking_path.at(i).first.y() - (fine_tracking_path.at(i+1).first.y() - fine_tracking_path.at(i).first.y()) / length * dis_end_point_ext, + fine_tracking_path.at(i).first.z() - (fine_tracking_path.at(i+1).first.z() - fine_tracking_path.at(i).first.z()) / length * dis_end_point_ext + ); + } + } else if (i + 1 == fine_tracking_path.size()) { + // Last point: extrapolate forward from the direction from previous point + prev_rec_pos = WireCell::Point( + (fine_tracking_path.at(i).first.x() + fine_tracking_path.at(i-1).first.x()) / 2.0, + (fine_tracking_path.at(i).first.y() + fine_tracking_path.at(i-1).first.y()) / 2.0, + (fine_tracking_path.at(i).first.z() + fine_tracking_path.at(i-1).first.z()) / 2.0 + ); + + double length = sqrt( + pow(fine_tracking_path.at(i-1).first.x() - fine_tracking_path.at(i).first.x(), 2) + + pow(fine_tracking_path.at(i-1).first.y() - fine_tracking_path.at(i).first.y(), 2) + + pow(fine_tracking_path.at(i-1).first.z() - fine_tracking_path.at(i).first.z(), 2) + ); + + if (length == 0) { + next_rec_pos = fine_tracking_path.at(i).first; + } else { + next_rec_pos = WireCell::Point( + fine_tracking_path.at(i).first.x() - (fine_tracking_path.at(i-1).first.x() - fine_tracking_path.at(i).first.x()) / length * dis_end_point_ext, + fine_tracking_path.at(i).first.y() - (fine_tracking_path.at(i-1).first.y() - fine_tracking_path.at(i).first.y()) / length * dis_end_point_ext, + fine_tracking_path.at(i).first.z() - (fine_tracking_path.at(i-1).first.z() - fine_tracking_path.at(i).first.z()) / length * dis_end_point_ext + ); + } + } else { + // Middle points: use midpoints to neighboring points + prev_rec_pos = WireCell::Point( + (fine_tracking_path.at(i).first.x() + fine_tracking_path.at(i-1).first.x()) / 2.0, + (fine_tracking_path.at(i).first.y() + fine_tracking_path.at(i-1).first.y()) / 2.0, + (fine_tracking_path.at(i).first.z() + fine_tracking_path.at(i-1).first.z()) / 2.0 + ); + + next_rec_pos = WireCell::Point( + (fine_tracking_path.at(i).first.x() + fine_tracking_path.at(i+1).first.x()) / 2.0, + (fine_tracking_path.at(i).first.y() + fine_tracking_path.at(i+1).first.y()) / 2.0, + (fine_tracking_path.at(i).first.z() + fine_tracking_path.at(i+1).first.z()) / 2.0 + ); + } + + // Calculate dx as sum of distances to previous and next positions + dx.at(i) = sqrt( + pow(curr_rec_pos.x() - prev_rec_pos.x(), 2) + + pow(curr_rec_pos.y() - prev_rec_pos.y(), 2) + + pow(curr_rec_pos.z() - prev_rec_pos.z(), 2) + ) + sqrt( + pow(curr_rec_pos.x() - next_rec_pos.x(), 2) + + pow(curr_rec_pos.y() - next_rec_pos.y(), 2) + + pow(curr_rec_pos.z() - next_rec_pos.z(), 2) + ); + + // Set placeholder dQ value (5000 * dx as in original) + dQ.at(i) = m_params.default_dQ_dx * dx.at(i); + + // Initialize reduced_chi2 to 0 + reduced_chi2.at(i) = 0; + } + +} + +void TrackFitting::dQ_dx_multi_fit(double dis_end_point_ext, bool flag_dQ_dx_fit_reg){ + if (!m_graph) return; + + // Update charge data for shared wires + update_dQ_dx_data(); + + // Use parameters from member variable + const double DL = m_params.DL; + const double DT = m_params.DT; + const double col_sigma_w_T = m_params.col_sigma_w_T; + const double ind_sigma_u_T = m_params.ind_sigma_u_T; + const double ind_sigma_v_T = m_params.ind_sigma_v_T; + const double rel_uncer_ind = m_params.rel_uncer_ind; + const double rel_uncer_col = m_params.rel_uncer_col; + const double add_uncer_ind = m_params.add_uncer_ind; + const double add_uncer_col = m_params.add_uncer_col; + const double add_sigma_L = m_params.add_sigma_L; + + // Prepare charge data maps similar to dQ_dx_fit + std::map>> map_U_charge_2D, map_V_charge_2D, map_W_charge_2D; + + // Fill the maps from m_charge_data + for (const auto& [coord_readout, charge_measurement] : m_charge_data) { + int apa = coord_readout.apa; + int time = coord_readout.time; + int channel = coord_readout.channel; + + auto wires_info = get_wires_for_channel(apa, channel); + if (wires_info.empty()) continue; + + std::set associated_coords; + int plane = -1; + + for (const auto& wire_info : wires_info) { + int face = std::get<0>(wire_info); + plane = std::get<1>(wire_info); + int wire = std::get<2>(wire_info); + + WirePlaneLayer_t plane_layer = (plane == 0) ? kUlayer : + (plane == 1) ? kVlayer : kWlayer; + + TrackFitting::Coord2D coord_2d(apa, face, time, wire, channel, plane_layer); + associated_coords.insert(coord_2d); + } + + std::pair> charge_coord_pair = + std::make_pair(charge_measurement, associated_coords); + + switch (plane) { + case 0: map_U_charge_2D[coord_readout] = charge_coord_pair; break; + case 1: map_V_charge_2D[coord_readout] = charge_coord_pair; break; + case 2: map_W_charge_2D[coord_readout] = charge_coord_pair; break; + } + } + + // Count total 3D positions from all segments and vertices + int n_3D_pos = 0; + std::map, int> vertex_index_map; + std::map, int>, int> segment_point_index_map; + + // First pass: assign indices to vertices and segments + auto edge_range = boost::edges(*m_graph); + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + if (!edge_bundle.segment) continue; + + auto segment = edge_bundle.segment; + auto& fits = segment->fits(); + if (fits.empty()) continue; + + // Get start and end vertices + auto vd1 = boost::source(*e_it, *m_graph); + auto vd2 = boost::target(*e_it, *m_graph); + auto& v_bundle1 = (*m_graph)[vd1]; + auto& v_bundle2 = (*m_graph)[vd2]; + + std::shared_ptr start_v = nullptr, end_v = nullptr; + if (v_bundle1.vertex && v_bundle2.vertex) { + if (v_bundle1.vertex->fit().index <= v_bundle2.vertex->fit().index) { + start_v = v_bundle1.vertex; + end_v = v_bundle2.vertex; + } else { + start_v = v_bundle2.vertex; + end_v = v_bundle1.vertex; + } + } + + // Assign indices to points + for (size_t i = 0; i < fits.size(); i++) { + if (i == 0) { + // Start vertex + if (start_v && vertex_index_map.find(start_v) == vertex_index_map.end()) { + vertex_index_map[start_v] = start_v->fit().index; + } + if (start_v) { + segment_point_index_map[std::make_pair(segment, i)] = vertex_index_map[start_v]; + } else { + segment_point_index_map[std::make_pair(segment, i)] = segment->fits()[i].index; + } + } else if (i + 1 == fits.size()) { + // End vertex + if (end_v && vertex_index_map.find(end_v) == vertex_index_map.end()) { + vertex_index_map[end_v] = end_v->fit().index; + } + if (end_v) { + segment_point_index_map[std::make_pair(segment, i)] = vertex_index_map[end_v]; + } else { + segment_point_index_map[std::make_pair(segment, i)] = segment->fits()[i].index; + } + } else { + // Middle points + segment_point_index_map[std::make_pair(segment, i)] = segment->fits()[i].index; + } + } + } + + if (n_3D_pos == 0) return; + + int n_2D_u = map_U_charge_2D.size(); + int n_2D_v = map_V_charge_2D.size(); + int n_2D_w = map_W_charge_2D.size(); + + if (n_2D_u == 0 && n_2D_v == 0 && n_2D_w == 0) return; + + // Initialize Eigen matrices and vectors + Eigen::VectorXd pos_3D(n_3D_pos), data_u_2D(n_2D_u), data_v_2D(n_2D_v), data_w_2D(n_2D_w); + Eigen::VectorXd pred_data_u_2D(n_2D_u), pred_data_v_2D(n_2D_v), pred_data_w_2D(n_2D_w); + Eigen::SparseMatrix RU(n_2D_u, n_3D_pos); + Eigen::SparseMatrix RV(n_2D_v, n_3D_pos); + Eigen::SparseMatrix RW(n_2D_w, n_3D_pos); + + std::vector traj_pts(n_3D_pos); + std::vector local_dx(n_3D_pos, 0); + std::vector traj_reduced_chi2(n_3D_pos, 0); + std::vector reg_flag_u(n_3D_pos, 0), reg_flag_v(n_3D_pos, 0), reg_flag_w(n_3D_pos, 0); + + // Initialize solution vector + Eigen::VectorXd pos_3D_init(n_3D_pos); + for (int i = 0; i < n_3D_pos; i++) { + pos_3D_init(i) = 50000.0; // Initial guess for single MIP + } + + // Fill data vectors with charge/uncertainty ratios + { + int n_u = 0; + for (const auto& [coord_key, result] : map_U_charge_2D) { + const auto& measurement = result.first; + if (measurement.charge > 0) { + double charge = measurement.charge; + double charge_err = measurement.charge_err; + double total_err = sqrt(pow(charge_err, 2) + pow(charge * rel_uncer_ind, 2) + pow(add_uncer_ind, 2)); + data_u_2D(n_u) = charge / total_err; + } else { + data_u_2D(n_u) = 0; + } + n_u++; + } + + int n_v = 0; + for (const auto& [coord_key, result] : map_V_charge_2D) { + const auto& measurement = result.first; + if (measurement.charge > 0) { + double charge = measurement.charge; + double charge_err = measurement.charge_err; + double total_err = sqrt(pow(charge_err, 2) + pow(charge * rel_uncer_ind, 2) + pow(add_uncer_ind, 2)); + data_v_2D(n_v) = charge / total_err; + } else { + data_v_2D(n_v) = 0; + } + n_v++; + } + + int n_w = 0; + for (const auto& [coord_key, result] : map_W_charge_2D) { + const auto& measurement = result.first; + if (measurement.charge > 0) { + double charge = measurement.charge; + double charge_err = measurement.charge_err; + double total_err = sqrt(pow(charge_err, 2) + pow(charge * rel_uncer_col, 2) + pow(add_uncer_col, 2)); + data_w_2D(n_w) = charge / total_err; + } else { + data_w_2D(n_w) = 0; + } + n_w++; + } + } + + // Fill trajectory points and calculate dx values + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + if (!edge_bundle.segment) continue; + + auto segment = edge_bundle.segment; + auto& fits = segment->fits(); + if (fits.empty()) continue; + + // Fill trajectory points + for (size_t i = 0; i < fits.size(); i++) { + int idx = segment_point_index_map[std::make_pair(segment, i)]; + traj_pts[idx] = fits[i].point; + } + + // Calculate dx values for middle points + for (size_t i = 1; i + 1 < fits.size(); i++) { + int idx = segment_point_index_map[std::make_pair(segment, i)]; + + WireCell::Point prev_pos = fits[i-1].point; + WireCell::Point curr_pos = fits[i].point; + WireCell::Point next_pos = fits[i+1].point; + + WireCell::Point prev_mid = 0.5 * (prev_pos + curr_pos); + WireCell::Point next_mid = 0.5 * (next_pos + curr_pos); + + double dx = (curr_pos - prev_mid).magnitude() + (curr_pos - next_mid).magnitude(); + local_dx[idx] = dx; + } + } + + // Calculate dx for vertices (endpoints) + for (const auto& [vertex, vertex_idx] : vertex_index_map) { + std::vector connected_pts; + + // Find connected segments + auto vertex_desc = vertex->get_descriptor(); + if (vertex_desc != PR::Graph::null_vertex()) { + auto adj_edges = boost::adjacent_vertices(vertex_desc, *m_graph); + for (auto v_it = adj_edges.first; v_it != adj_edges.second; ++v_it) { + auto edge_desc = boost::edge(vertex_desc, *v_it, *m_graph); + if (edge_desc.second) { + auto& edge_bundle = (*m_graph)[edge_desc.first]; + if (edge_bundle.segment && !edge_bundle.segment->fits().empty()) { + auto& fits = edge_bundle.segment->fits(); + if (fits.size() > 1) { + // Add second point from segment + connected_pts.push_back(fits[1].point); + } + } + } + } + } + + // If only one connection, extend endpoint + if (connected_pts.size() == 1) { + WireCell::Point curr_pos = vertex->fit().point; + WireCell::Vector dir = (connected_pts[0] - curr_pos).norm(); + WireCell::Point extended = curr_pos - dir * dis_end_point_ext; + connected_pts.push_back(extended); + } + + // Calculate total dx + double total_dx = 0; + for (const auto& pt : connected_pts) { + total_dx += (pt - vertex->fit().point).magnitude(); + } + local_dx[vertex_idx] = total_dx; + } + + // Get time ticks from cluster + int cur_ntime_ticks = 10; // Default value, should be calculated from cluster + auto edge_range_temp = boost::edges(*m_graph); + for (auto e_it = edge_range_temp.first; e_it != edge_range_temp.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + if (edge_bundle.segment && edge_bundle.segment->cluster()) { + auto cluster = edge_bundle.segment->cluster(); + auto first_blob = cluster->children()[0]; + cur_ntime_ticks = first_blob->slice_index_max() - first_blob->slice_index_min(); + break; + } + } + + // Build response matrices using cal_gaus_integral_seg + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + if (!edge_bundle.segment) continue; + + auto segment = edge_bundle.segment; + auto& fits = segment->fits(); + if (fits.empty()) continue; + + // Process middle points + for (size_t i = 1; i + 1 < fits.size(); i++) { + int idx = segment_point_index_map[std::make_pair(segment, i)]; + + WireCell::Point prev_pos = fits[i-1].point; + WireCell::Point curr_pos = fits[i].point; + WireCell::Point next_pos = fits[i+1].point; + + // Create sampling points for Gaussian integration + std::vector centers_U, centers_V, centers_W, centers_T; + std::vector sigmas_T, sigmas_U, sigmas_V, sigmas_W; + std::vector weights; + + // Sample 5 points each from prev->curr and curr->next + for (int j = 0; j < 5; j++) { + // First half: prev -> curr + WireCell::Point reco_pos = prev_pos + (curr_pos - prev_pos) * (j + 0.5) / 5.0; + + // Get geometry parameters + auto test_wpid = m_dv->contained_by(reco_pos); + if (test_wpid.apa() == -1 || test_wpid.face() == -1) continue; + + WirePlaneId wpid(kAllLayers, test_wpid.face(), test_wpid.apa()); + auto offset_it = wpid_offsets.find(wpid); + auto slope_it = wpid_slopes.find(wpid); + auto geom_it = wpid_geoms.find(wpid); + + if (offset_it == wpid_offsets.end() || slope_it == wpid_slopes.end() || geom_it == wpid_geoms.end()) continue; + + auto cluster = segment->cluster(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + auto reco_pos_raw = transform->backward(reco_pos, cluster_t0, test_wpid.face(), test_wpid.apa()); + + auto offset_t = std::get<0>(offset_it->second); + auto offset_u = std::get<1>(offset_it->second); + auto offset_v = std::get<2>(offset_it->second); + auto offset_w = std::get<3>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yu = std::get<1>(slope_it->second).first; + auto slope_zu = std::get<1>(slope_it->second).second; + auto slope_yv = std::get<2>(slope_it->second).first; + auto slope_zv = std::get<2>(slope_it->second).second; + auto slope_yw = std::get<3>(slope_it->second).first; + auto slope_zw = std::get<3>(slope_it->second).second; + + double central_T = offset_t + slope_x * reco_pos_raw.x(); + double central_U = offset_u + (slope_yu * reco_pos_raw.y() + slope_zu * reco_pos_raw.z()); + double central_V = offset_v + (slope_yv * reco_pos_raw.y() + slope_zv * reco_pos_raw.z()); + double central_W = offset_w + (slope_yw * reco_pos_raw.y() + slope_zw * reco_pos_raw.z()); + + double weight = (prev_pos - curr_pos).magnitude(); + + // Calculate diffusion sigmas (simplified - would need flash time in full implementation) + auto time_tick_width = std::get<0>(geom_it->second); + double drift_time = std::max(50.0 * units::microsecond, + reco_pos_raw.x() / time_tick_width * 0.5 * units::microsecond); + + double diff_sigma_L = sqrt(2 * DL * drift_time); + double diff_sigma_T = sqrt(2 * DT * drift_time); + + auto pitch_u = std::get<1>(geom_it->second); + auto pitch_v = std::get<2>(geom_it->second); + auto pitch_w = std::get<3>(geom_it->second); + + double sigma_L = sqrt(pow(diff_sigma_L, 2) + pow(add_sigma_L, 2)) / time_tick_width; + double sigma_T_u = sqrt(pow(diff_sigma_T, 2) + pow(ind_sigma_u_T, 2)) / pitch_u; + double sigma_T_v = sqrt(pow(diff_sigma_T, 2) + pow(ind_sigma_v_T, 2)) / pitch_v; + double sigma_T_w = sqrt(pow(diff_sigma_T, 2) + pow(col_sigma_w_T, 2)) / pitch_w; + + centers_U.push_back(central_U); + centers_V.push_back(central_V); + centers_W.push_back(central_W); + centers_T.push_back(central_T); + weights.push_back(weight); + sigmas_U.push_back(sigma_T_u); + sigmas_V.push_back(sigma_T_v); + sigmas_W.push_back(sigma_T_w); + sigmas_T.push_back(sigma_L); + + // Second half: curr -> next + reco_pos = next_pos + (curr_pos - next_pos) * (j + 0.5) / 5.0; + // ... (repeat similar calculations for second half) + } + + // Fill response matrices using Gaussian integrals + int n_u = 0; + for (const auto& [coord_key, result] : map_U_charge_2D) { + const auto& coord_2d_set = result.second; + for (const auto& coord_2d : coord_2d_set) { + if (abs(coord_2d.wire - centers_U.front()) <= 10 && + abs(coord_2d.time - centers_T.front()) <= 10) { + + double value = cal_gaus_integral_seg(coord_2d.time, coord_2d.wire, centers_T, sigmas_T, + centers_U, sigmas_U, weights, 0, 4, cur_ntime_ticks); + + if (result.first.flag == 0 && value > 0) reg_flag_u[idx] = 1; + + if (value > 0 && result.first.charge > 0 && result.first.flag != 0) { + double total_err = sqrt(pow(result.first.charge_err, 2) + + pow(result.first.charge * rel_uncer_ind, 2) + + pow(add_uncer_ind, 2)); + RU.insert(n_u, idx) = value / total_err; + } + } + break; // Only process first coord_2d for now + } + n_u++; + } + + // Similar processing for V and W planes... + } + } + + // Build connected_vec for regularization + std::vector> connected_vec(n_3D_pos); + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + if (!edge_bundle.segment) continue; + + auto segment = edge_bundle.segment; + auto& fits = segment->fits(); + if (fits.empty()) continue; + + for (size_t i = 1; i + 1 < fits.size(); i++) { + int idx = segment_point_index_map[std::make_pair(segment, i)]; + int prev_idx = segment_point_index_map[std::make_pair(segment, i-1)]; + int next_idx = segment_point_index_map[std::make_pair(segment, i+1)]; + + connected_vec[idx].push_back(prev_idx); + connected_vec[idx].push_back(next_idx); + } + } + + // Add vertex connections + for (const auto& [vertex, vertex_idx] : vertex_index_map) { + // Find connected segments + auto vertex_desc = vertex->get_descriptor(); + if (vertex_desc != PR::Graph::null_vertex()) { + auto adj_edges = boost::adjacent_vertices(vertex_desc, *m_graph); + for (auto v_it = adj_edges.first; v_it != adj_edges.second; ++v_it) { + auto edge_desc = boost::edge(vertex_desc, *v_it, *m_graph); + if (edge_desc.second) { + auto& edge_bundle = (*m_graph)[edge_desc.first]; + if (edge_bundle.segment && !edge_bundle.segment->fits().empty()) { + auto& fits = edge_bundle.segment->fits(); + // Find connected segment points + if (vertex_idx == fits.front().index) { + connected_vec[vertex_idx].push_back(fits[1].index); + } else if (vertex_idx == fits.back().index) { + connected_vec[vertex_idx].push_back(fits[fits.size() - 2].index); + } + } + } + } + } + } + + // Build weight matrices and apply compact matrix analysis + Eigen::SparseMatrix MU(n_2D_u, n_2D_u), MV(n_2D_v, n_2D_v), MW(n_2D_w, n_2D_w); + for (int k = 0; k < n_2D_u; k++) MU.insert(k, k) = 1; + for (int k = 0; k < n_2D_v; k++) MV.insert(k, k) = 1; + for (int k = 0; k < n_2D_w; k++) MW.insert(k, k) = 1; + + Eigen::SparseMatrix RUT = RU.transpose(); + Eigen::SparseMatrix RVT = RV.transpose(); + Eigen::SparseMatrix RWT = RW.transpose(); + + // Apply compact matrix regularization + auto overlap_u = calculate_compact_matrix_multi(connected_vec, MU, RUT, n_2D_u, n_3D_pos, 3.0); + auto overlap_v = calculate_compact_matrix_multi(connected_vec, MV, RVT, n_2D_v, n_3D_pos, 3.0); + auto overlap_w = calculate_compact_matrix_multi(connected_vec, MW, RWT, n_2D_w, n_3D_pos, 2.0); + + // Build regularization matrix + Eigen::SparseMatrix FMatrix(n_3D_pos, n_3D_pos); + + double dead_ind_weight = 0.3; + double dead_col_weight = 0.9; + double close_ind_weight = 0.25; + double close_col_weight = 0.75; + + for (size_t i = 0; i < n_3D_pos; i++) { + if (i >= connected_vec.size()) continue; + + bool flag_u = reg_flag_u[i]; + bool flag_v = reg_flag_v[i]; + bool flag_w = reg_flag_w[i]; + + double weight = 0; + if (flag_u) weight += dead_ind_weight; + if (flag_v) weight += dead_ind_weight; + if (flag_w) weight += dead_col_weight; + + double scaling = (connected_vec[i].size() > 2) ? 2.0 / connected_vec[i].size() : 1.0; + + for (size_t j = 0; j < connected_vec[i].size(); j++) { + if (j >= overlap_u.size() || i >= overlap_u.size()) continue; + + double weight1 = weight; + int row = i; + int col = connected_vec[i][j]; + + if (overlap_u[i].first > 0.5) weight1 += close_ind_weight * pow(overlap_u[i].first - 0.5, 2); + if (overlap_v[i].first > 0.5) weight1 += close_ind_weight * pow(overlap_v[i].first - 0.5, 2); + if (overlap_w[i].first > 0.5) weight1 += close_col_weight * pow(overlap_w[i].first - 0.5, 2); + + double dx_norm = (local_dx[row] + 0.001 * units::cm) / (0.6 * units::cm); + FMatrix.coeffRef(row, row) += -weight1 * scaling / dx_norm; + FMatrix.coeffRef(row, col) += weight1 * scaling / dx_norm; + } + } + + // Apply regularization strength + double lambda = 0.0008; + if (!flag_dQ_dx_fit_reg) lambda *= 0.01; + FMatrix *= lambda; + + Eigen::SparseMatrix FMatrixT = FMatrix.transpose(); + + // Solve the system + Eigen::BiCGSTAB> solver; + Eigen::VectorXd b = RUT * MU * data_u_2D + RVT * MV * data_v_2D + RWT * MW * data_w_2D; + Eigen::SparseMatrix A = RUT * MU * RU + RVT * MV * RV + RWT * MW * RW + FMatrixT * FMatrix; + + solver.compute(A); + pos_3D = solver.solveWithGuess(b, pos_3D_init); + + if (std::isnan(solver.error())) { + pos_3D = solver.solve(b); + } + + // Calculate predictions + pred_data_u_2D = RU * pos_3D; + pred_data_v_2D = RV * pos_3D; + pred_data_w_2D = RW * pos_3D; + + // Calculate reduced chi2 + traj_reduced_chi2.clear(); + traj_reduced_chi2.resize(n_3D_pos, 0.0); + + // Update vertex and segment fit results + for (const auto& [vertex, vertex_idx] : vertex_index_map) { + if (vertex_idx >= n_3D_pos) continue; + + double dQ = pos_3D(vertex_idx); + double dx = local_dx[vertex_idx]; + double reduced_chi2 = traj_reduced_chi2[vertex_idx]; + + // Update vertex fit information + auto& vertex_fit = vertex->fit(); + vertex_fit.dQ = dQ; + vertex_fit.dx = dx; + vertex_fit.reduced_chi2 = reduced_chi2; + } + + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + if (!edge_bundle.segment) continue; + + auto segment = edge_bundle.segment; + auto& fits = segment->fits(); + if (fits.empty()) continue; + + // Update segment fit information + for (size_t i = 0; i < fits.size(); i++) { + int idx = segment_point_index_map[std::make_pair(segment, i)]; + if (idx >= n_3D_pos) continue; + + fits[i].dQ = pos_3D(idx); + fits[i].dx = local_dx[idx]; + fits[i].reduced_chi2 = traj_reduced_chi2[idx]; + } + } +} + + +void WireCell::Clus::TrackFitting::dQ_dx_fit(double dis_end_point_ext, bool flag_dQ_dx_fit_reg) { + if (fine_tracking_path.size() <= 1) return; + + // Clear output vectors + dQ.clear(); + dx.clear(); + reduced_chi2.clear(); + + // Update charge data for shared wires (uses existing toolkit function) + update_dQ_dx_data(); + + const double DL = m_params.DL; // WAS: const double DL = 6.4e-7; + const double DT = m_params.DT; // WAS: const double DT = 9.8e-7; + const double col_sigma_w_T = m_params.col_sigma_w_T; // WAS: const double col_sigma_w_T = 0.188060 * 0.2; + const double ind_sigma_u_T = m_params.ind_sigma_u_T; // WAS: const double ind_sigma_u_T = 0.402993 * 0.3; + const double ind_sigma_v_T = m_params.ind_sigma_v_T; // WAS: const double ind_sigma_v_T = 0.402993 * 0.5; + const double rel_uncer_ind = m_params.rel_uncer_ind; // WAS: const double rel_uncer_ind = 0.075; + const double rel_uncer_col = m_params.rel_uncer_col; // WAS: const double rel_uncer_col = 0.05; + const double add_uncer_ind = m_params.add_uncer_ind; // WAS: const double add_uncer_ind = 0.0; + const double add_uncer_col = m_params.add_uncer_col; // WAS: const double add_uncer_col = 300.0; + const double add_sigma_L = m_params.add_sigma_L; // WAS: const double add_sigma_L = 1.428249 * 0.5; + + std::map>> map_U_charge_2D, map_V_charge_2D, map_W_charge_2D; + // Fill the maps from m_charge_data + // Fill the maps from m_charge_data + for (const auto& [coord_readout, charge_measurement] : m_charge_data) { + int apa = coord_readout.apa; + int time = coord_readout.time; + int channel = coord_readout.channel; + + // Get wires for this channel using the dedicated function + auto wires_info = get_wires_for_channel(apa, channel); + if (wires_info.empty()) continue; // Skip if no wire mapping found + + std::set associated_coords; + int plane = -1; // asssuming all wires are from the same plane name ... + // Process each wire associated with this channel + for (const auto& wire_info : wires_info) { + int face = std::get<0>(wire_info); + plane = std::get<1>(wire_info); + int wire = std::get<2>(wire_info); + + // Convert plane int to WirePlaneLayer_t + WirePlaneLayer_t plane_layer = (plane == 0) ? kUlayer : + (plane == 1) ? kVlayer : kWlayer; + + // Create TrackFitting::Coord2D with all fields filled + TrackFitting::Coord2D coord_2d(apa, face, time, wire, channel, plane_layer); + associated_coords.insert(coord_2d); + } + + // Create the pair for storage + std::pair> charge_coord_pair = std::make_pair(charge_measurement, associated_coords); + + // Store in appropriate plane map + switch (plane) { + case 0: // U plane + map_U_charge_2D[coord_readout] = charge_coord_pair; + break; + case 1: // V plane + map_V_charge_2D[coord_readout] = charge_coord_pair; + break; + case 2: // W plane + map_W_charge_2D[coord_readout] = charge_coord_pair; + break; + } + } + + + std::cout << "dQ/dx: " << map_U_charge_2D.size() << " " << map_V_charge_2D.size() << " " << map_W_charge_2D.size() << std::endl; + // for (const auto& [coord_key, result] : map_U_charge_2D) { + // std::cout << "CoordReadout: APA=" << coord_key.apa + // << ", Time=" << coord_key.time + // << ", Channel=" << coord_key.channel << std::endl; + // const auto& measurement = result.first; + // std::cout << " Charge: " << measurement.charge + // << ", ChargeErr: " << measurement.charge_err + // << ", Flag: " << measurement.flag << std::endl; + // std::cout << " Associated Coord2D set size: " << result.second.size() << std::endl; + // for (const auto& coord2d : result.second) { + // std::cout << " Coord2D: APA=" << coord2d.apa + // << ", Face=" << coord2d.face + // << ", Time=" << coord2d.time + // << ", Wire=" << coord2d.wire + // << ", Channel=" << coord2d.channel + // << ", Plane=" << coord2d.plane << std::endl; + // } + // } + + + int n_3D_pos = fine_tracking_path.size(); + // need to separate measurements into U, V, W and form separate matrices ... + // need to store measurement --> U, V, W --> measurements + int n_2D_u = map_U_charge_2D.size(); + int n_2D_v = map_V_charge_2D.size(); + int n_2D_w = map_W_charge_2D.size(); + + if (n_2D_u == 0 && n_2D_v == 0 && n_2D_w == 0) return; + + // // Initialize Eigen matrices and vectors + Eigen::VectorXd pos_3D(n_3D_pos), data_u_2D(n_2D_u), data_v_2D(n_2D_v), data_w_2D(n_2D_w); + Eigen::VectorXd pred_data_u_2D(n_2D_u), pred_data_v_2D(n_2D_v), pred_data_w_2D(n_2D_w); + + Eigen::SparseMatrix RU(n_2D_u, n_3D_pos); + Eigen::SparseMatrix RV(n_2D_v, n_3D_pos); + Eigen::SparseMatrix RW(n_2D_w, n_3D_pos); + + Eigen::VectorXd pos_3D_init(n_3D_pos); + std::vector reg_flag_u(n_3D_pos, 0), reg_flag_v(n_3D_pos, 0), reg_flag_w(n_3D_pos, 0); + + + // Initialize solution vector + for (int i = 0; i < n_3D_pos; i++) { + pos_3D_init(i) = 50000.0; // Initial guess + } + + // Fill data vectors with charge/uncertainty ratios + { + int n_u = 0; + for (const auto& [coord_key, result] : map_U_charge_2D) { + const auto& measurement = result.first; + if (measurement.charge >0) { + double charge = measurement.charge; + double charge_err = measurement.charge_err; + double total_err = sqrt(pow(charge_err, 2) + pow(charge * rel_uncer_ind, 2) + pow(add_uncer_ind, 2)); + data_u_2D(n_u) = charge / total_err; + } else { + data_u_2D(n_u) = 0; + } + // std::cout << coord_key.time << " " << coord_key.channel << " " << measurement.charge << " " << measurement.charge_err << " " << data_u_2D(n_u) << std::endl; + n_u++; + } + int n_v = 0; + for (const auto& [coord_key, result] : map_V_charge_2D) { + const auto& measurement = result.first; + if (measurement.charge >0) { + double charge = measurement.charge; + double charge_err = measurement.charge_err; + double total_err = sqrt(pow(charge_err, 2) + pow(charge * rel_uncer_ind, 2) + pow(add_uncer_ind, 2)); + data_v_2D(n_v) = charge / total_err; + } else { + data_v_2D(n_v) = 0; + } + n_v++; + } + int n_w = 0; + for (const auto& [coord_key, result] : map_W_charge_2D) { + const auto& measurement = result.first; + if (measurement.charge >0) { + double charge = measurement.charge; + double charge_err = measurement.charge_err; + double total_err = sqrt(pow(charge_err, 2) + pow(charge * rel_uncer_col, 2) + pow(add_uncer_col, 2)); + data_w_2D(n_w) = charge / total_err; + } else { + data_w_2D(n_w) = 0; + } + n_w++; + } + } + + // Calculate dx values (path segment lengths) + dx.resize(n_3D_pos); + for (int i = 0; i < n_3D_pos; i++) { + WireCell::Point prev_rec_pos, next_rec_pos; + WireCell::Point curr_rec_pos = fine_tracking_path.at(i).first; + + if (i == 0) { + // First point: extrapolate backward + if (n_3D_pos > 1) { + WireCell::Point next_point = fine_tracking_path.at(i+1).first; + WireCell::Vector dir = next_point - curr_rec_pos; + double length = dir.magnitude(); + if (length > 0) { + prev_rec_pos = curr_rec_pos - (dir / length) * dis_end_point_ext; + } else { + prev_rec_pos = curr_rec_pos; + } + next_rec_pos = (curr_rec_pos + next_point) * 0.5; + } else { + prev_rec_pos = curr_rec_pos; + next_rec_pos = curr_rec_pos; + } + } else if (i == n_3D_pos - 1) { + // Last point: extrapolate forward + WireCell::Point prev_point = fine_tracking_path.at(i-1).first; + WireCell::Vector dir = curr_rec_pos - prev_point; + double length = dir.magnitude(); + if (length > 0) { + next_rec_pos = curr_rec_pos + (dir / length) * dis_end_point_ext; + } else { + next_rec_pos = curr_rec_pos; + } + prev_rec_pos = (curr_rec_pos + prev_point) * 0.5; + } else { + // Middle point + prev_rec_pos = (curr_rec_pos + fine_tracking_path.at(i-1).first) * 0.5; + next_rec_pos = (curr_rec_pos + fine_tracking_path.at(i+1).first) * 0.5; + } + + dx[i] = (curr_rec_pos - prev_rec_pos).magnitude() + (curr_rec_pos - next_rec_pos).magnitude(); + + // std::cout << i << " " << dx[i] << std::endl; + } + + // Build response matrices using geometry information + for (int i = 0; i < n_3D_pos; i++) { + WireCell::Point curr_rec_pos = fine_tracking_path.at(i).first; + auto segment = fine_tracking_path.at(i).second; + auto cluster = segment->cluster(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + auto first_blob = cluster->children()[0]; + int cur_ntime_ticks = first_blob->slice_index_max() - first_blob->slice_index_min(); + + + int apa = paf.at(i).first; + int face = paf.at(i).second; + + WirePlaneId wpid_key(kAllLayers, face, apa); + + // Get geometry parameters from wpid_offsets and wpid_slopes + auto offset_it = wpid_offsets.find(wpid_key); + auto slope_it = wpid_slopes.find(wpid_key); + auto geom_it = wpid_geoms.find(wpid_key); + + if (offset_it == wpid_offsets.end() || slope_it == wpid_slopes.end() || geom_it == wpid_geoms.end()) continue; + + double offset_t = std::get<0>(offset_it->second); + double offset_u = std::get<1>(offset_it->second); + double offset_v = std::get<2>(offset_it->second); + double offset_w = std::get<3>(offset_it->second); + + double slope_x = std::get<0>(slope_it->second); + auto slope_yu = std::get<1>(slope_it->second).first; + auto slope_zu = std::get<1>(slope_it->second).second; + auto slope_yv = std::get<2>(slope_it->second).first; + auto slope_zv = std::get<2>(slope_it->second).second; + auto slope_yw = std::get<3>(slope_it->second).first; + auto slope_zw = std::get<3>(slope_it->second).second; + + double time_tick_width = std::get<0>(geom_it->second); + double pitch_u = std::get<1>(geom_it->second); + double pitch_v = std::get<2>(geom_it->second); + double pitch_w = std::get<3>(geom_it->second); + + + // Calculate previous and next positions for Gaussian integration + WireCell::Point prev_rec_pos, next_rec_pos; + if (i == 0) { + if (n_3D_pos > 1) { + WireCell::Point next_point = fine_tracking_path.at(i+1).first; + next_rec_pos = (curr_rec_pos + next_point) * 0.5; + WireCell::Vector dir = next_point - curr_rec_pos; + double length = dir.magnitude(); + if (length > 0) { + prev_rec_pos = curr_rec_pos - (dir / length) * dis_end_point_ext; + } else { + prev_rec_pos = curr_rec_pos; + } + } else { + prev_rec_pos = next_rec_pos = curr_rec_pos; + } + } else if (i == n_3D_pos - 1) { + WireCell::Point prev_point = fine_tracking_path.at(i-1).first; + prev_rec_pos = (curr_rec_pos + prev_point) * 0.5; + WireCell::Vector dir = curr_rec_pos - prev_point; + double length = dir.magnitude(); + if (length > 0) { + next_rec_pos = curr_rec_pos + (dir / length) * dis_end_point_ext; + } else { + next_rec_pos = curr_rec_pos; + } + } else { + prev_rec_pos = (curr_rec_pos + fine_tracking_path.at(i-1).first) * 0.5; + next_rec_pos = (curr_rec_pos + fine_tracking_path.at(i+1).first) * 0.5; + } + + // Create Gaussian integration points and weights + std::vector centers_U, centers_V, centers_W, centers_T; + std::vector sigmas_U, sigmas_V, sigmas_W, sigmas_T; + std::vector weights; + + // Sample 5 points along each half-segment + for (int j = 0; j < 5; j++) { + // First half (prev to curr) + WireCell::Point reco_pos = prev_rec_pos + (curr_rec_pos - prev_rec_pos) * (j + 0.5) / 5.0; + // find out the raw position ... + auto reco_pos_raw = transform->backward(reco_pos, cluster_t0, apa, face); + + double central_T = offset_t + slope_x * reco_pos_raw.x(); + double central_U = offset_u + (slope_yu * reco_pos_raw.y() + slope_zu * reco_pos_raw.z()); + double central_V = offset_v + (slope_yv * reco_pos_raw.y() + slope_zv * reco_pos_raw.z()); + double central_W = offset_w + (slope_yw * reco_pos_raw.y() + slope_zw * reco_pos_raw.z()); + double weight = (curr_rec_pos - prev_rec_pos).magnitude(); + + // Calculate drift time and diffusion + double drift_time = std::max(m_params.min_drift_time, reco_pos.x() / time_tick_width * 0.5*units::us ); + double diff_sigma_L = sqrt(2 * DL * drift_time); + double diff_sigma_T = sqrt(2 * DT * drift_time); + + double sigma_L = sqrt(pow(diff_sigma_L, 2) + pow(add_sigma_L, 2)) / time_tick_width; + double sigma_T_u = sqrt(pow(diff_sigma_T, 2) + pow(ind_sigma_u_T, 2)) / pitch_u; + double sigma_T_v = sqrt(pow(diff_sigma_T, 2) + pow(ind_sigma_v_T, 2)) / pitch_v; + double sigma_T_w = sqrt(pow(diff_sigma_T, 2) + pow(col_sigma_w_T, 2)) / pitch_w; + + centers_U.push_back(central_U); + centers_V.push_back(central_V); + centers_W.push_back(central_W); + centers_T.push_back(central_T); + weights.push_back(weight); + sigmas_U.push_back(sigma_T_u); + sigmas_V.push_back(sigma_T_v); + sigmas_W.push_back(sigma_T_w); + sigmas_T.push_back(sigma_L); + + // Second half (curr to next) + reco_pos = next_rec_pos + (curr_rec_pos - next_rec_pos) * (j + 0.5) / 5.0; + reco_pos_raw = transform->backward(reco_pos, cluster_t0, apa, face); + + central_T = offset_t + slope_x * reco_pos_raw.x(); + central_U = offset_u + (slope_yu * reco_pos_raw.y() + slope_zu * reco_pos_raw.z()); + central_V = offset_v + (slope_yv * reco_pos_raw.y() + slope_zv * reco_pos_raw.z()); + central_W = offset_w + (slope_yw * reco_pos_raw.y() + slope_zw * reco_pos_raw.z()); + weight = (curr_rec_pos - next_rec_pos).magnitude(); + + drift_time = std::max(m_params.min_drift_time, reco_pos.x() / time_tick_width * 0.5*units::us ); + diff_sigma_L = sqrt(2 * DL * drift_time); + diff_sigma_T = sqrt(2 * DT * drift_time); + + // std::cout << drift_time << " " << DL << " " << DT << " " << diff_sigma_L << " " << diff_sigma_T << std::endl; + + + sigma_L = sqrt(pow(diff_sigma_L, 2) + pow(add_sigma_L, 2)) / time_tick_width; + sigma_T_u = sqrt(pow(diff_sigma_T, 2) + pow(ind_sigma_u_T, 2)) / pitch_u; + sigma_T_v = sqrt(pow(diff_sigma_T, 2) + pow(ind_sigma_v_T, 2)) / pitch_v; + sigma_T_w = sqrt(pow(diff_sigma_T, 2) + pow(col_sigma_w_T, 2)) / pitch_w; + + centers_U.push_back(central_U); + centers_V.push_back(central_V); + centers_W.push_back(central_W); + centers_T.push_back(central_T); + weights.push_back(weight); + sigmas_U.push_back(sigma_T_u); + sigmas_V.push_back(sigma_T_v); + sigmas_W.push_back(sigma_T_w); + sigmas_T.push_back(sigma_L); + } + + // std::cout << i << " U "; + // for (size_t idx = 0; idx < centers_U.size(); ++idx) { + // std::cout << centers_U[idx] << " "; + // } + // std::cout << std::endl; + + // std::cout << i << " V "; + // for (size_t idx = 0; idx < centers_V.size(); ++idx) { + // std::cout << centers_V[idx] << " "; + // } + // std::cout << std::endl; + + // std::cout << i << " W "; + // for (size_t idx = 0; idx < centers_W.size(); ++idx) { + // std::cout << centers_W[idx] << " "; + // } + // std::cout << std::endl; + + // std::cout << i << " T "; + // for (size_t idx = 0; idx < centers_T.size(); ++idx) { + // std::cout << centers_T[idx] << " "; + // } + // std::cout << std::endl; + + // std::cout << i << " Weights "; + // for (size_t idx = 0; idx < weights.size(); ++idx) { + // std::cout << weights[idx] << " "; + // } + // std::cout << std::endl; + + // std::cout <> set_UT; + for (const auto& [coord_key, result] : map_U_charge_2D) { + const auto& measurement = result.first; + const auto& Coord2D_set = result.second; + + for (const auto& coord2d : Coord2D_set) { + // coord2d: TrackFitting::Coord2D + // Only process if plane matches U + if (coord2d.plane != kUlayer || coord2d.apa != apa || coord2d.face != face) continue; + int wire = coord2d.wire; + int time = coord2d.time; + + set_UT.insert(std::make_pair(wire, time)); + + // if (wire !=938 || time != 7176) continue; + + if (abs(wire - centers_U.front()) <= m_params.search_range && abs(time - centers_T.front()) <= m_params.search_range * cur_ntime_ticks) { + double value = cal_gaus_integral_seg(time, wire, centers_T, sigmas_T, centers_U, sigmas_U, weights, 0, 4, cur_ntime_ticks); + + + if (measurement.flag == 0 && value > 0) reg_flag_u[i] = 1; // Dead channel + + if (value > 0 && measurement.charge > 0 && measurement.flag != 0) { + double charge = measurement.charge; + double charge_err = measurement.charge_err; + double total_err = sqrt(pow(charge_err, 2) + pow(charge * rel_uncer_ind, 2) + pow(add_uncer_ind, 2)); + RU.insert(n_u, i) = value / total_err; + + // std::cout << n_u << " " << i << " " << time << " " << wire << " " << i << " " << value / total_err << std::endl; + } + } + } + n_u++; + } + int n_v = 0; + std::set> set_VT; + for (const auto& [coord_key, result] : map_V_charge_2D) { + const auto& measurement = result.first; + const auto& Coord2D_set = result.second; + + for (const auto& coord2d : Coord2D_set) { + // coord2d: TrackFitting::Coord2D + // Only process if plane matches V + if (coord2d.plane != kVlayer || coord2d.apa != apa || coord2d.face != face) continue; + int wire = coord2d.wire; + int time = coord2d.time; + set_VT.insert(std::make_pair(wire, time)); + + if (abs(wire - centers_V.front()) <= m_params.search_range && abs(time - centers_T.front()) <= m_params.search_range * cur_ntime_ticks) { + double value = cal_gaus_integral_seg(time, wire, centers_T, sigmas_T, centers_V, sigmas_V, weights, 0, 4, cur_ntime_ticks); + + if (measurement.flag == 0 && value > 0) reg_flag_v[i] = 1; // Dead channel + + if (value > 0 && measurement.charge > 0 && measurement.flag != 0) { + double charge = measurement.charge; + double charge_err = measurement.charge_err; + double total_err = sqrt(pow(charge_err, 2) + pow(charge * rel_uncer_ind, 2) + pow(add_uncer_ind, 2)); + RV.insert(n_v, i) = value / total_err; + } + + } + } + n_v++; + } + int n_w = 0; + std::set> set_WT; + for (const auto& [coord_key, result] : map_W_charge_2D) { + const auto& measurement = result.first; + const auto& Coord2D_set = result.second; + + for (const auto& coord2d : Coord2D_set) { + // coord2d: TrackFitting::Coord2D + // Only process if plane matches W + if (coord2d.plane != kWlayer || coord2d.apa != apa || coord2d.face != face) continue; + int wire = coord2d.wire; + int time = coord2d.time; + set_WT.insert(std::make_pair(wire, time)); + if (abs(wire - centers_W.front()) <= m_params.search_range && abs(time - centers_T.front()) <= m_params.search_range * cur_ntime_ticks) { + double value = cal_gaus_integral_seg(time, wire, centers_T, sigmas_T, centers_W, sigmas_W, weights, 0, 4, cur_ntime_ticks); + + if (measurement.flag == 0 && value > 0) reg_flag_w[i] = 1; // Dead channel + + if (value > 0 && measurement.charge > 0 && measurement.flag != 0) { + double charge = measurement.charge; + double charge_err = measurement.charge_err; + double total_err = sqrt(pow(charge_err, 2) + pow(charge * rel_uncer_col, 2) + pow(add_uncer_col, 2)); + RW.insert(n_w, i) = value / total_err; + + // std::cout << n_w << " " << i << " " << time << " " << wire << " " << i << " " << value / total_err << std::endl; + + } + } + } + n_w++; + } + + + // Additional dead channel checks + if (reg_flag_u[i] == 0) { // apa, face + for (size_t kk = 0; kk < centers_U.size(); kk++) { + if (set_UT.find(std::make_pair(std::round(centers_U[kk]), std::round(centers_T[kk]/cur_ntime_ticks)*cur_ntime_ticks)) == set_UT.end()) { + reg_flag_u[i] = 1; + break; + } + } + } + if (reg_flag_v[i] == 0) { // apa, face + for (size_t kk = 0; kk < centers_V.size(); kk++) { + if (set_VT.find(std::make_pair(std::round(centers_V[kk]), std::round(centers_T[kk]/cur_ntime_ticks)*cur_ntime_ticks)) == set_VT.end()) { + reg_flag_v[i] = 1; + break; + } + } + } + if (reg_flag_w[i] == 0) { // apa, face + for (size_t kk = 0; kk < centers_W.size(); kk++) { + if (set_WT.find(std::make_pair(std::round(centers_W[kk]), std::round(centers_T[kk]/cur_ntime_ticks)*cur_ntime_ticks)) == set_WT.end()) { + reg_flag_w[i] = 1; + break; + } + } + } + // std::cout << i << " " << reg_flag_u[i] << " " << reg_flag_v[i] << " " << reg_flag_w[i] << std::endl; + + } + + // Calculate compact matrices for overlap analysis + Eigen::SparseMatrix RUT = RU.transpose(); + Eigen::SparseMatrix RVT = RV.transpose(); + Eigen::SparseMatrix RWT = RW.transpose(); + + Eigen::SparseMatrix MU(n_2D_u, n_2D_u), MV(n_2D_v, n_2D_v), MW(n_2D_w, n_2D_w); + for (int k = 0; k < n_2D_u; k++) MU.insert(k, k) = 1; + for (int k = 0; k < n_2D_v; k++) MV.insert(k, k) = 1; + for (int k = 0; k < n_2D_w; k++) MW.insert(k, k) = 1; + + // std::cout << "U: " << std::endl; + std::vector> overlap_u = calculate_compact_matrix(MU, RUT, n_2D_u, n_3D_pos, 3); + // std::cout << "V: " <> overlap_v = calculate_compact_matrix(MV, RVT, n_2D_v, n_3D_pos, 3); + // std::cout << "W: " << std::endl; + std::vector> overlap_w = calculate_compact_matrix(MW, RWT, n_2D_w, n_3D_pos, 2); + +// for (size_t i=0;i!=n_3D_pos;i++){ +// // std::cout << i << " " << reg_flag_u.at(i) << " " << reg_flag_v.at(i) << " " << reg_flag_w.at(i) << std::endl; +// std::cout << i << " " << (overlap_u.at(i).first + overlap_u.at(i).second)/2. << " " +// << (overlap_v.at(i).first + overlap_v.at(i).second)/2. << " " +// << (overlap_w.at(i).first + overlap_w.at(i).second)/2. << " " << std::endl; +// // << MU.coeffRef(i,i) << " " << MV.coeffRef(i,i) << " " << MW.coeffRef(i,i) << std::endl; +// } + + // int n_w = 0; + // for (const auto& [coord_key, result] : map_W_charge_2D) { + // const auto& measurement = result.first; + // const auto& Coord2D_set = result.second; + // int wire, time; + // for (const auto& coord2d : Coord2D_set) { + // wire = coord2d.wire; + // time = coord2d.time; + // } + // std::cout << n_w << " " << wire << " " << time << " " << MW.coeffRef(n_w,n_w) << " " << measurement.charge << " " << measurement.charge_err << std::endl; + // n_w++; + // } + + + // Add regularization based on dead channels and overlaps + Eigen::SparseMatrix FMatrix(n_3D_pos, n_3D_pos); + + const double dead_ind_weight = m_params.dead_ind_weight; + const double dead_col_weight = m_params.dead_col_weight; + const double close_ind_weight = m_params.close_ind_weight; + const double close_col_weight = m_params.close_col_weight; + + for (int i = 0; i < n_3D_pos; i++) { + bool flag_u = reg_flag_u[i]; + bool flag_v = reg_flag_v[i]; + bool flag_w = reg_flag_w[i]; + + if (n_3D_pos != 1) { + double weight = 0; + if (flag_u) weight += dead_ind_weight; + if (flag_v) weight += dead_ind_weight; + if (flag_w) weight += dead_col_weight; + + if (i==0){ + if (overlap_u[i].second > m_params.overlap_th) weight += close_ind_weight * pow(2 * overlap_u[i].second - 1, 2); + if (overlap_v[i].second > m_params.overlap_th) weight += close_ind_weight * pow(2 * overlap_v[i].second - 1, 2); + if (overlap_w[i].second > m_params.overlap_th) weight += close_col_weight * pow(2 * overlap_w[i].second - 1, 2); + }else if (i==n_3D_pos-1){ + if (overlap_u[i].first > m_params.overlap_th) weight += close_ind_weight * pow(2 * overlap_u[i].first - 1, 2); + if (overlap_v[i].first > m_params.overlap_th) weight += close_ind_weight * pow(2 * overlap_v[i].first - 1, 2); + if (overlap_w[i].first > m_params.overlap_th) weight += close_col_weight * pow(2 * overlap_w[i].first - 1, 2); + }else{ + if (overlap_u.at(i).first + overlap_u.at(i).second > 2*m_params.overlap_th) weight += close_ind_weight * pow(overlap_u.at(i).first + overlap_u.at(i).second - 1,2); + if (overlap_v.at(i).first + overlap_v.at(i).second > 2*m_params.overlap_th) weight += close_ind_weight * pow(overlap_v.at(i).first + overlap_v.at(i).second - 1,2); + if (overlap_w.at(i).first + overlap_w.at(i).second > 2*m_params.overlap_th) weight += close_col_weight * pow(overlap_w.at(i).first + overlap_w.at(i).second - 1,2); + + } + + double dx_norm = (dx[i] + 0.001*units::cm) / m_params.dx_norm_length; // Normalize by 0.6 mm + + if (i == 0) { + FMatrix.insert(0, 0) = -weight / dx_norm; + if (n_3D_pos > 1) FMatrix.insert(0, 1) = weight / ((dx[1] + 0.001*units::cm) / m_params.dx_norm_length) ; + } else if (i == n_3D_pos - 1) { + FMatrix.insert(i, i) = -weight / dx_norm; + FMatrix.insert(i, i-1) = weight / ((dx[i-1] + 0.001*units::cm) / m_params.dx_norm_length); + } else { + FMatrix.insert(i, i) = -2.0 * weight / dx_norm; + FMatrix.insert(i, i+1) = weight / ((dx[i+1] + 0.001*units::cm) / m_params.dx_norm_length); + FMatrix.insert(i, i-1) = weight / ((dx[i-1] + 0.001*units::cm) / m_params.dx_norm_length); + } + // std::cout << i << " " << flag_u << " " << flag_v << " " << flag_w << " " << overlap_u.at(i).first << " | " << overlap_u.at(i).second << " " << overlap_v.at(i).first << " | " << overlap_v.at(i).second << " " << overlap_w.at(i).first << " | " << overlap_w.at(i).second << " " << weight << " " << dx_norm << std::endl; + // std::cout << i << " " << FMatrix.coeff(i, i) << std::endl; + } + } + + // Apply regularization scaling + double lambda = m_params.lambda; + FMatrix *= lambda; + if (!flag_dQ_dx_fit_reg) FMatrix *= 0.01; + + // Solve the linear system + Eigen::SparseMatrix FMatrixT = FMatrix.transpose(); + Eigen::BiCGSTAB> solver; + + Eigen::VectorXd b = RUT * MU * data_u_2D + RVT * MV * data_v_2D + RWT * MW * data_w_2D; + Eigen::SparseMatrix A = RUT * MU * RU + RVT * MV * RV + RWT * MW * RW + FMatrixT * FMatrix; + + // for (int i = 0; i < b.size(); ++i) { + // // Example: print or process each element of b + // std::cout << "b[" << i << "] = " << b[i] << " " << A.coeff(i, i) << " " << lambda << " " << flag_dQ_dx_fit_reg << std::endl; + // // You can add your processing logic here + // } + + solver.compute(A); + pos_3D = solver.solveWithGuess(b, pos_3D_init); + + if (std::isnan(solver.error())) { + pos_3D = solver.solve(b); + } + + // Extract dQ values and apply corrections + dQ.resize(n_3D_pos); + for (int i=0;i!=n_3D_pos;i++){ + dQ[i] = pos_3D(i); + } + + // Calculate predictions and reduced chi-squared + pred_data_u_2D = RU * pos_3D; + pred_data_v_2D = RV * pos_3D; + pred_data_w_2D = RW * pos_3D; + + // Calculate reduced chi-squared for each 3D point + reduced_chi2.resize(n_3D_pos); + for (int k = 0; k < n_3D_pos; k++) { + double sum[3] = {0, 0, 0}; + double sum1[3] = {0, 0, 0}; + + for (Eigen::SparseMatrix::InnerIterator it(RU, k); it; ++it) { + if (pred_data_u_2D(it.row()) > 0) { + sum[0] += pow(data_u_2D(it.row()) - pred_data_u_2D(it.row()), 2) * + (it.value() * pos_3D(k)) / pred_data_u_2D(it.row()); + sum1[0] += (it.value() * pos_3D(k)) / pred_data_u_2D(it.row()); + } + } + + for (Eigen::SparseMatrix::InnerIterator it(RV, k); it; ++it) { + if (pred_data_v_2D(it.row()) > 0) { + sum[1] += pow(data_v_2D(it.row()) - pred_data_v_2D(it.row()), 2) * + (it.value() * pos_3D(k)) / pred_data_v_2D(it.row()); + sum1[1] += (it.value() * pos_3D(k)) / pred_data_v_2D(it.row()); + } + } + + for (Eigen::SparseMatrix::InnerIterator it(RW, k); it; ++it) { + if (pred_data_w_2D(it.row()) > 0) { + sum[2] += pow(data_w_2D(it.row()) - pred_data_w_2D(it.row()), 2) * + (it.value() * pos_3D(k)) / pred_data_w_2D(it.row()); + sum1[2] += (it.value() * pos_3D(k)) / pred_data_w_2D(it.row()); + } + } + + double total_chi2 = sum[0] + sum[1] + sum[2] / 4.0; // Weight collection plane differently + double total_weight = sum1[0] + sum1[1] + sum1[2]; + + reduced_chi2[k] = (total_weight > 0) ? sqrt(total_chi2 / total_weight) : 0; + } + + // Restore original charge data + recover_original_charge_data(); +} + +void TrackFitting::do_multi_tracking(bool flag_dQ_dx_fit_reg, bool flag_dQ_dx_fit, bool flag_force_load_data, bool flag_exclusion, bool flag_hack){ + // Reset fit properties for all vertices first + for (auto vp = boost::vertices(*m_graph); vp.first != vp.second; ++vp.first) { + auto vd = *vp.first; + auto& v_bundle = (*m_graph)[vd]; + if (v_bundle.vertex) { + bool flag_fix = v_bundle.vertex->flag_fix(); + if (!flag_fix){ + WireCell::Point p = v_bundle.vertex->wcpt().point; + auto& vertex_fit = v_bundle.vertex->fit(); + vertex_fit.point = p; + } + // v_bundle.vertex->reset_fit_prop(); + // v_bundle.vertex->flag_fix(flag_fix); + } + } + + bool flag_1st_tracking = true; + bool flag_2nd_tracking = true; + bool flag_dQ_dx = flag_dQ_dx_fit; + + // Prepare the data for the fit - collect charge information from 2D projections + if (flag_force_load_data || global_rb_map.size() == 0){ + prepare_data(); + fill_global_rb_map(); + } + + // First round of organizing the path from the path_wcps (shortest path) + double low_dis_limit = m_params.low_dis_limit; + double end_point_limit = m_params.end_point_limit; + organize_segments_path(low_dis_limit, end_point_limit); + + + + if (flag_1st_tracking){ + form_map_graph(flag_exclusion, m_params.end_point_factor, m_params.mid_point_factor, m_params.nlevel, m_params.time_tick_cut, m_params.charge_cut); + multi_trajectory_fit(1, m_params.div_sigma); + } + + + if (flag_2nd_tracking){ + // second round trajectory fit ... + low_dis_limit = m_params.low_dis_limit/2.; + end_point_limit = m_params.end_point_limit/2.; + + // organize path + organize_segments_path_2nd(low_dis_limit, end_point_limit); + + form_map_graph(flag_exclusion, m_params.end_point_factor, m_params.mid_point_factor, m_params.nlevel, m_params.time_tick_cut, m_params.charge_cut); + + multi_trajectory_fit(1, m_params.div_sigma); + + // organize path + low_dis_limit = 0.6*units::cm; + organize_segments_path_3rd(low_dis_limit); + } + + + if (flag_dQ_dx){ + for (auto vp = boost::vertices(*m_graph); vp.first != vp.second; ++vp.first) { + auto vd = *vp.first; + auto& v_bundle = (*m_graph)[vd]; + if (v_bundle.vertex) { + bool flag_fix = v_bundle.vertex->flag_fix(); + v_bundle.vertex->reset_fit_prop(); + v_bundle.vertex->flag_fix(flag_fix); + } + } + + auto edge_range = boost::edges(*m_graph); + for (auto e_it = edge_range.first; e_it != edge_range.second; ++e_it) { + auto& edge_bundle = (*m_graph)[*e_it]; + if (edge_bundle.segment) { + edge_bundle.segment->reset_fit_prop(); + } + } + dQ_dx_multi_fit(end_point_limit, flag_dQ_dx_fit_reg); + } +} + + +void TrackFitting::do_single_tracking(std::shared_ptr segment, bool flag_dQ_dx_fit_reg, bool flag_dQ_dx_fit, bool flag_force_load_data, bool flag_hack) { + // Clear all internal tracking vectors + fine_tracking_path.clear(); + dQ.clear(); + dx.clear(); + pu.clear(); + pv.clear(); + pw.clear(); + pt.clear(); + paf.clear(); + reduced_chi2.clear(); + + bool flag_1st_tracking = true; + bool flag_2nd_tracking = true; + bool flag_dQ_dx = flag_dQ_dx_fit; + + // Prepare the data for the fit - collect charge information from 2D projections + if (flag_force_load_data || global_rb_map.size() == 0){ + prepare_data(); + fill_global_rb_map(); + } + + // std::cout << "Global Blob Map: " << global_rb_map.size() << std::endl; + + // First round of organizing the path from the path_wcps (shortest path) + double low_dis_limit = m_params.low_dis_limit; + double end_point_limit = m_params.end_point_limit; + + if (m_segments.find(segment) == m_segments.end()) { + // Handle empty segments case - could log warning or return + return; + } + // auto segment = *m_segments.begin(); + + auto pts = organize_orig_path(segment, low_dis_limit, end_point_limit); + if (pts.size() == 0) return; + else if (pts.size() == 1) { + const auto& segment_wcpts = segment->wcpts(); + if (!segment_wcpts.empty()) { + const auto& last_segment_point = segment_wcpts.back().point; + if (sqrt(pow(last_segment_point.x() - pts.back().x(), 2) + + pow(last_segment_point.y() - pts.back().y(), 2) + + pow(last_segment_point.z() - pts.back().z(), 2)) < 0.01*units::cm) { + return; + } else { + WireCell::Point p2(last_segment_point.x(), last_segment_point.y(), last_segment_point.z()); + pts.push_back(p2); + } + } + } + + std::cout << "After organization " << pts.size() << std::endl; + + std::vector>> ptss; + for (const auto& pt : pts) { + ptss.emplace_back(pt, segment); + } + + if (flag_1st_tracking) { + form_map(ptss, m_params.end_point_factor, m_params.mid_point_factor, m_params.nlevel, m_params.time_tick_cut, m_params.charge_cut); + trajectory_fit(ptss, 1, m_params.div_sigma); + } + // Check for very close start/end points and reset if needed + if (ptss.size() == 2) { + if (sqrt(pow(ptss.front().first.x() - ptss.back().first.x(), 2) + + pow(ptss.front().first.y() - ptss.back().first.y(), 2) + + pow(ptss.front().first.z() - ptss.back().first.z(), 2)) < 0.1*units::cm) { + ptss.clear(); + const auto& segment_wcpts = segment->wcpts(); + WireCell::Point p1(segment_wcpts.front().point.x(), segment_wcpts.front().point.y(), segment_wcpts.front().point.z()); + ptss.push_back(std::make_pair(p1,segment)); + WireCell::Point p2(segment_wcpts.back().point.x(), segment_wcpts.back().point.y(), segment_wcpts.back().point.z()); + ptss.push_back(std::make_pair(p2,segment)); + } + } + + if (ptss.size() <= 1) return; + + if (flag_2nd_tracking) { + // Second round trajectory fit with tighter parameters + low_dis_limit = m_params.low_dis_limit/2.; + end_point_limit = m_params.end_point_limit/2.; + + pts.clear(); + for (const auto& pt_pair : ptss) { + pts.push_back(pt_pair.first); + } + + + // // hack pts + // pts.resize(18); + // pts[0] = WireCell::Point(2192.13, -873.682, 2094.73); + // pts[1] = WireCell::Point(2190.05, -877.433, 2095.44); + // pts[2] = WireCell::Point(2187.79, -882.693, 2096.37); + // pts[3] = WireCell::Point(2181.57, -896.034, 2099.36); + // pts[4] = WireCell::Point(2178.83, -904.709, 2100.54); + // pts[5] = WireCell::Point(2173.46, -917.087, 2103.09); + // pts[6] = WireCell::Point(2168.32, -927.231, 2105.47); + // pts[7] = WireCell::Point(2162.27, -938.16, 2108.49); + // pts[8] = WireCell::Point(2158.9, -946.832, 2110.14); + // pts[9] = WireCell::Point(2153.28, -958.607, 2113.05); + // pts[10] = WireCell::Point(2147.69, -966.479, 2115.99); + // pts[11] = WireCell::Point(2143.06, -977.06, 2118.02); + // pts[12] = WireCell::Point(2139.68, -984.335, 2119.6); + // pts[13] = WireCell::Point(2134.63, -997.786, 2121.84); + // pts[14] = WireCell::Point(2127.28, -1006.52, 2125.44); + // pts[15] = WireCell::Point(2123.03, -1016.8, 2127.48); + // pts[16] = WireCell::Point(2121.65, -1024.26, 2128.3); + // pts[17] = WireCell::Point(2122.02, -1026.47, 2128.18); + // // + + organize_ps_path(segment, pts, low_dis_limit, end_point_limit); + + // std::cout << pts.size() << std::endl; + // for (size_t i = 0; i < pts.size(); ++i) { + // std::cout << "pts[" << i << "] = (" << pts[i].x() << ", " << pts[i].y() << ", " << pts[i].z() << ")" << std::endl; + // } + + ptss.clear(); + for (const auto& pt : pts) { + ptss.emplace_back(pt, segment); + } + form_map(ptss, m_params.end_point_factor, m_params.mid_point_factor, m_params.nlevel, m_params.time_tick_cut, m_params.charge_cut); + trajectory_fit(ptss, 2, m_params.div_sigma); + + pts.clear(); + for (const auto& pt_pair : ptss) { + pts.push_back(pt_pair.first); + } + + // Final path organization + organize_ps_path(segment, pts, low_dis_limit, 0); + + // Check for very close start/end points and reset if needed + if (pts.size() == 2) { + if (sqrt(pow(pts.front().x() - pts.back().x(), 2) + + pow(pts.front().y() - pts.back().y(), 2) + + pow(pts.front().z() - pts.back().z(), 2)) < 0.1*units::cm) { + pts.clear(); + const auto& segment_wcpts = segment->wcpts(); + WireCell::Point p1(segment_wcpts.front().point.x(), segment_wcpts.front().point.y(), segment_wcpts.front().point.z()); + pts.push_back(p1); + WireCell::Point p2(segment_wcpts.back().point.x(), segment_wcpts.back().point.y(), segment_wcpts.back().point.z()); + pts.push_back(p2); + } + } + + // std::cout << pts.size() << std::endl; + // for (size_t i = 0; i < pts.size(); ++i) { + // std::cout << "pts[" << i << "] = (" << pts[i].x() << ", " << pts[i].y() << ", " << pts[i].z() << ")" << std::endl; + // } + + // if (flag_hack){ + // // hack pts ... + // pts.clear(); + // pts.push_back(WireCell::Point(219.209*units::cm, -87.2848*units::cm, 209.453*units::cm)); + // pts.push_back(WireCell::Point(219.011*units::cm, -87.8189*units::cm, 209.55*units::cm)); + // pts.push_back(WireCell::Point(218.613*units::cm, -88.663*units::cm, 209.722*units::cm)); + // pts.push_back(WireCell::Point(218.329*units::cm, -89.169*units::cm, 209.853*units::cm)); + // pts.push_back(WireCell::Point(218.09*units::cm, -89.8885*units::cm, 209.969*units::cm)); + // pts.push_back(WireCell::Point(217.858*units::cm, -90.3128*units::cm, 210.076*units::cm)); + // pts.push_back(WireCell::Point(217.627*units::cm, -90.7371*units::cm, 210.184*units::cm)); + // pts.push_back(WireCell::Point(217.423*units::cm, -91.4211*units::cm, 210.266*units::cm)); + // pts.push_back(WireCell::Point(217.111*units::cm, -91.8551*units::cm, 210.423*units::cm)); + // pts.push_back(WireCell::Point(216.84*units::cm, -92.3369*units::cm, 210.55*units::cm)); + // pts.push_back(WireCell::Point(216.248*units::cm, -92.7898*units::cm, 210.798*units::cm)); + + // // pts.clear(); + // // pts.push_back(WireCell::Point(216.165*units::cm, -93.8529*units::cm, 210.894*units::cm)); + // // pts.push_back(WireCell::Point(215.996*units::cm, -94.3005*units::cm, 210.942*units::cm)); + // // pts.push_back(WireCell::Point(215.888*units::cm, -94.8903*units::cm, 211.002*units::cm)); + // // pts.push_back(WireCell::Point(215.38*units::cm, -95.4503*units::cm, 211.276*units::cm)); + // // pts.push_back(WireCell::Point(215.128*units::cm, -96.2677*units::cm, 211.414*units::cm)); + // // pts.push_back(WireCell::Point(214.801*units::cm, -96.7483*units::cm, 211.583*units::cm)); + // // pts.push_back(WireCell::Point(214.429*units::cm, -97.4961*units::cm, 211.745*units::cm)); + // // pts.push_back(WireCell::Point(214.225*units::cm, -98.0668*units::cm, 211.862*units::cm)); + // // pts.push_back(WireCell::Point(213.766*units::cm, -98.833*units::cm, 212.054*units::cm)); + // // pts.push_back(WireCell::Point(213.401*units::cm, -99.5325*units::cm, 212.199*units::cm)); + // // pts.push_back(WireCell::Point(213.182*units::cm, -100.042*units::cm, 212.303*units::cm)); + // // pts.push_back(WireCell::Point(212.763*units::cm, -100.63*units::cm, 212.518*units::cm)); + // // pts.push_back(WireCell::Point(212.414*units::cm, -101.107*units::cm, 212.697*units::cm)); + // // pts.push_back(WireCell::Point(212.25*units::cm, -101.563*units::cm, 212.785*units::cm)); + // // pts.push_back(WireCell::Point(212.086*units::cm, -102.018*units::cm, 212.872*units::cm)); + // // pts.push_back(WireCell::Point(212.224*units::cm, -102.854*units::cm, 212.839*units::cm)); + // // + + // // pts.clear(); + // // pts.push_back(WireCell::Point(219.209 * units::cm, -87.2848 * units::cm, 209.453 * units::cm)); + // // pts.push_back(WireCell::Point(218.997 * units::cm, -87.8182 * units::cm, 209.557 * units::cm)); + // // pts.push_back(WireCell::Point(218.806 * units::cm, -88.253 * units::cm, 209.641 * units::cm)); + // // pts.push_back(WireCell::Point(218.615 * units::cm, -88.6879 * units::cm, 209.725 * units::cm)); + // // pts.push_back(WireCell::Point(218.337 * units::cm, -89.1079 * units::cm, 209.836 * units::cm)); + // // pts.push_back(WireCell::Point(218.051 * units::cm, -89.8565 * units::cm, 209.978 * units::cm)); + // // pts.push_back(WireCell::Point(217.77 * units::cm, -90.5317 * units::cm, 210.12 * units::cm)); + // // pts.push_back(WireCell::Point(217.472 * units::cm, -91.2331 * units::cm, 210.25 * units::cm)); + // // pts.push_back(WireCell::Point(217.058 * units::cm, -91.8449 * units::cm, 210.441 * units::cm)); + // // pts.push_back(WireCell::Point(216.822 * units::cm, -92.5268 * units::cm, 210.548 * units::cm)); + // // pts.push_back(WireCell::Point(216.61 * units::cm, -92.9509 * units::cm, 210.65 * units::cm)); + // // pts.push_back(WireCell::Point(216.308 * units::cm, -93.6788 * units::cm, 210.81 * units::cm)); + // // pts.push_back(WireCell::Point(215.992 * units::cm, -94.2301 * units::cm, 210.939 * units::cm)); + // // pts.push_back(WireCell::Point(215.791 * units::cm, -94.7826 * units::cm, 211.062 * units::cm)); + // // pts.push_back(WireCell::Point(215.532 * units::cm, -95.1674 * units::cm, 211.193 * units::cm)); + // // pts.push_back(WireCell::Point(215.274 * units::cm, -95.8492 * units::cm, 211.346 * units::cm)); + // // pts.push_back(WireCell::Point(215.038 * units::cm, -96.3231 * units::cm, 211.467 * units::cm)); + // // pts.push_back(WireCell::Point(214.741 * units::cm, -96.7608 * units::cm, 211.604 * units::cm)); + // // pts.push_back(WireCell::Point(214.444 * units::cm, -97.1985 * units::cm, 211.741 * units::cm)); + // // pts.push_back(WireCell::Point(214.282 * units::cm, -97.8976 * units::cm, 211.824 * units::cm)); + // // pts.push_back(WireCell::Point(214.026 * units::cm, -98.3316 * units::cm, 211.943 * units::cm)); + // // pts.push_back(WireCell::Point(213.682 * units::cm, -99.0357 * units::cm, 212.085 * units::cm)); + // // pts.push_back(WireCell::Point(213.368 * units::cm, -99.7199 * units::cm, 212.216 * units::cm)); + // // pts.push_back(WireCell::Point(213.101 * units::cm, -100.164 * units::cm, 212.341 * units::cm)); + // // pts.push_back(WireCell::Point(212.742 * units::cm, -100.639 * units::cm, 212.527 * units::cm)); + // // pts.push_back(WireCell::Point(212.514 * units::cm, -101.179 * units::cm, 212.647 * units::cm)); + // // pts.push_back(WireCell::Point(212.117 * units::cm, -101.817 * units::cm, 212.831 * units::cm)); + // // pts.push_back(WireCell::Point(211.977 * units::cm, -102.455 * units::cm, 212.891 * units::cm)); + // } + + // Generate 2D projections + pu.clear(); + pv.clear(); + pw.clear(); + pt.clear(); + ptss.clear(); + paf.clear(); + for (const auto& p : pts) { + auto cluster = segment->cluster(); + const auto transform = m_pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + + auto test_wpid = m_dv->contained_by(p); + + if (test_wpid.apa()==-1) continue; + int apa = test_wpid.apa(); + int face = test_wpid.face(); + + auto p_raw = transform->backward(p, cluster_t0, apa, face); + WirePlaneId wpid(kAllLayers, face, apa); + auto offset_it = wpid_offsets.find(wpid); + auto slope_it = wpid_slopes.find(wpid); + + auto offset_t = std::get<0>(offset_it->second); + auto offset_u = std::get<1>(offset_it->second); + auto offset_v = std::get<2>(offset_it->second); + auto offset_w = std::get<3>(offset_it->second); + auto slope_x = std::get<0>(slope_it->second); + auto slope_yu = std::get<1>(slope_it->second).first; + auto slope_zu = std::get<1>(slope_it->second).second; + auto slope_yv = std::get<2>(slope_it->second).first; + auto slope_zv = std::get<2>(slope_it->second).second; + auto slope_yw = std::get<3>(slope_it->second).first; + auto slope_zw = std::get<3>(slope_it->second).second; + + ptss.emplace_back(p, segment); + pu.push_back(offset_u + (slope_yu * p_raw.y() + slope_zu * p_raw.z())); + pv.push_back(offset_v + (slope_yv * p_raw.y() + slope_zv * p_raw.z())); + pw.push_back(offset_w + (slope_yw * p_raw.y() + slope_zw * p_raw.z())); + pt.push_back(offset_t + slope_x * p_raw.x()); + paf.push_back(std::make_pair(apa, face)); + + } + } + + + fine_tracking_path = ptss; + + if (flag_dQ_dx) { + // Store the fine tracking path as pairs of (Point, Segment) + // Perform dQ/dx fit using the prepared charge data + dQ_dx_fit(end_point_limit, flag_dQ_dx_fit_reg); + } else { + dQ_dx_fill(end_point_limit); + } + + // Now put the results back into the + // Create vector of Fit objects from the internal tracking results + std::vector segment_fits; + segment_fits.reserve(fine_tracking_path.size()); + + // Check that all vectors have consistent sizes + size_t npoints = fine_tracking_path.size(); + if (dQ.size() != npoints || dx.size() != npoints || + pu.size() != npoints || pv.size() != npoints || + pw.size() != npoints || pt.size() != npoints || + reduced_chi2.size() != npoints) { + throw std::runtime_error("TrackFitting::do_single_tracking: inconsistent vector sizes for fit output!"); + } + + // Calculate cumulative range (distance along track) + std::vector cumulative_range(npoints, 0.0); + if (npoints > 1) { + for (size_t i = 1; i < npoints; ++i) { + const auto& p1 = fine_tracking_path[i-1].first; + const auto& p2 = fine_tracking_path[i].first; + double step_distance = sqrt(pow(p2.x() - p1.x(), 2) + + pow(p2.y() - p1.y(), 2) + + pow(p2.z() - p1.z(), 2)); + cumulative_range[i] = cumulative_range[i-1] + step_distance; + } + } + + // Convert internal results to PR::Fit objects + std::vector path_points; + path_points.reserve(fine_tracking_path.size()); + for (size_t i = 0; i < npoints; ++i) { + PR::Fit fit; + + // Set the fitted 3D point + fit.point = fine_tracking_path[i].first; + path_points.push_back(fit.point); + + // Set physics quantities + fit.dQ = dQ[i]; + fit.dx = dx[i]; + fit.pu = pu[i]; + fit.pv = pv[i]; + fit.pw = pw[i]; + fit.pt = pt[i]; + fit.paf = paf[i]; + fit.reduced_chi2 = reduced_chi2[i]; + + // Set trajectory information + fit.index = static_cast(i); + fit.range = cumulative_range[i]; + + // Set fix flags (typically fix endpoints for track fitting) + fit.flag_fix = false; + segment_fits.push_back(fit); + } + + // Assign the fits to the segment + segment->fits(segment_fits); + + // replace point cloud after track fitting ... + PR::create_segment_point_cloud(segment, path_points, m_dv, "main"); + PR::create_segment_fit_point_cloud(segment, m_dv, "fit"); + +} diff --git a/clus/src/TrackFitting_Util.cxx b/clus/src/TrackFitting_Util.cxx new file mode 100644 index 000000000..e47c67188 --- /dev/null +++ b/clus/src/TrackFitting_Util.cxx @@ -0,0 +1,41 @@ +#include "WireCellClus/TrackFitting_Util.h" +#include +#include + +using namespace WireCell::Clus::TrackFittingUtil; + +void WireCell::Clus::TrackFittingUtil::calculate_ranges_simplified( + double angle_u, double angle_v, double angle_w, + double rem_dis_sq_cut_u, double rem_dis_sq_cut_v, double rem_dis_sq_cut_w, + double min_u_dis, double min_v_dis, double min_w_dis, + double pitch_u, double pitch_v, double pitch_w, + float& range_sq_u, float& range_sq_v, float& range_sq_w) { + + + // Geometric coupling coefficients + double coupling_uv = fabs(cos(angle_u - angle_v)); + double coupling_uw = fabs(cos(angle_u - angle_w)); + double coupling_vw = fabs(cos(angle_v - angle_w)); + + // std::cout << "Angles: " << rem_dis_cut_u << " " << rem_dis_cut_v << " " << rem_dis_cut_w << " " << coupling_uv << " " << coupling_uw << " " << coupling_vw << " | " << angle_u << " " << angle_v << " " << angle_w << " " << min_u_dis << " " << min_v_dis << " " << min_w_dis << std::endl; + + + // Cost from other planes (weighted by coupling) + double cost_u_from_v = coupling_uv * pow(min_v_dis * pitch_v, 2)/coupling_vw; + double cost_u_from_w = coupling_uw * pow(min_w_dis * pitch_w, 2)/coupling_vw; + + double cost_v_from_u = coupling_uv * pow(min_u_dis * pitch_u, 2)/coupling_uw; + double cost_v_from_w = coupling_vw * pow(min_w_dis * pitch_w, 2)/coupling_uw; + + double cost_w_from_u = coupling_uw * pow(min_u_dis * pitch_u, 2)/coupling_uv; + double cost_w_from_v = coupling_vw * pow(min_v_dis * pitch_v, 2)/coupling_uv; + + // Calculate available ranges + double available_u = rem_dis_sq_cut_u*(coupling_uv + coupling_uw + coupling_vw) - cost_u_from_v - cost_u_from_w; + double available_v = rem_dis_sq_cut_v*(coupling_uv + coupling_uw + coupling_vw) - cost_v_from_u - cost_v_from_w; + double available_w = rem_dis_sq_cut_w*(coupling_uv + coupling_uw + coupling_vw) - cost_w_from_u - cost_w_from_v; + + range_sq_u = (available_u > 0) ? available_u : 0; + range_sq_v = (available_v > 0) ? available_v : 0; + range_sq_w = (available_w > 0) ? available_w : 0; +} \ No newline at end of file diff --git a/clus/src/clustering_close.cxx b/clus/src/clustering_close.cxx index d1ea8eb08..74149a93d 100644 --- a/clus/src/clustering_close.cxx +++ b/clus/src/clustering_close.cxx @@ -1,78 +1,56 @@ -#include +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" #include "WireCellUtil/ExecMon.h" -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wparentheses" +class ClusteringClose; +WIRECELL_FACTORY(ClusteringClose, ClusteringClose, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) using namespace WireCell; using namespace WireCell::Clus; -using namespace WireCell::Aux; -using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; -using namespace WireCell::PointCloud::Tree; -void WireCell::PointCloud::Facade::clustering_close( - Grouping& live_grouping, - cluster_set_t& cluster_connected_dead, // in/out - const double length_cut) -{ - // bool flag_print = false; - // ExecMon em("starting"); +using namespace WireCell::Clus::Facade; - cluster_set_t used_clusters; - - - // prepare graph ... - typedef cluster_connectivity_graph_t Graph; - Graph g; - std::unordered_map ilive2desc; // added live index to graph descriptor - std::map map_cluster_index; - const auto& live_clusters = live_grouping.children(); - - for (size_t ilive = 0; ilive < live_clusters.size(); ++ilive) { - const auto& live = live_clusters.at(ilive); - map_cluster_index[live] = ilive; - ilive2desc[ilive] = boost::add_vertex(ilive, g); - } +static void clustering_close(Grouping& live_clusters, // - for (size_t i=0;i!=live_clusters.size();i++){ - auto cluster_1 = live_clusters.at(i); - if (cluster_1->get_length() < 1.5*units::cm) continue; - if (used_clusters.find(cluster_1)!=used_clusters.end()) continue; - for (size_t j=i+1;jget_length() < 1.5*units::cm) continue; - if (Clustering_3rd_round(*cluster_1,*cluster_2, - cluster_1->get_length(), cluster_2->get_length(), length_cut)){ - //to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); - boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], - ilive2desc[map_cluster_index[cluster_2]], g); + const Tree::Scope& scope, + const double length_cut = 1*units::cm // + ); +class ClusteringClose : public IConfigurable, public Clus::IEnsembleVisitor, private NeedScope { +public: + ClusteringClose() {} + virtual ~ClusteringClose() {} - - if (cluster_1->get_length() < 5*units::cm){ - used_clusters.insert(cluster_1); - break; - } - if (cluster_2->get_length() < 5*units::cm){ - used_clusters.insert(cluster_2); - } - } - } + void configure(const WireCell::Configuration& config) { + NeedScope::configure(config); + + length_cut_ = get(config, "length_cut", 1*units::cm); + } + virtual Configuration default_configuration() const { + Configuration cfg; + return cfg; } - // if (flag_print) std::cout << em("core alg") << std::endl; - - // new function to merge clusters ... - merge_clusters(g, live_grouping, cluster_connected_dead); - - // if (flag_print) std::cout << em("merge clusters") << std::endl; -} + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + clustering_close(live, m_scope, length_cut_); + } + +private: + double length_cut_{1*units::cm}; +}; +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wparentheses" -bool WireCell::PointCloud::Facade::Clustering_3rd_round( +static bool Clustering_3rd_round( const Cluster& cluster1, const Cluster& cluster2, double length_1, @@ -85,7 +63,7 @@ bool WireCell::PointCloud::Facade::Clustering_3rd_round( bool flag_print = false; ExecMon em("starting"); - double dis = WireCell::PointCloud::Facade::Find_Closest_Points(cluster1, cluster2, + double dis = WireCell::Clus::Facade::Find_Closest_Points(cluster1, cluster2, length_1, length_2, length_cut, p1,p2); @@ -144,10 +122,6 @@ bool WireCell::PointCloud::Facade::Clustering_3rd_round( geo_point_t tempV1(p2.x() - p1.x(), p2.y() - p1.y(), p2.z() - p1.z()); geo_point_t tempV2(cluster2_ave_pos.x() - cluster1_ave_pos.x(), cluster2_ave_pos.y() - cluster1_ave_pos.y(), cluster2_ave_pos.z() - cluster1_ave_pos.z()); - /* if (length_1 > 150*units::cm || length_2 > 150*units::cm) */ - /* std::cout << cluster1.get_cluster_id() << " " << cluster2.get_cluster_id() << " " << length_1/units::cm << " " << length_2/units::cm << " " << num_p1 << " " << num_p2 << " " << num_tp1 << " " << num_tp2 << std::endl; */ - /* return false; */ - // one small the other one is big if (length_1 < 12 *units::cm && num_p1 > 0.5*num_tp1 && (num_p2> 50 || num_p2 > 0.25*num_tp2) || length_2 < 12*units::cm && num_p2 > 0.5*num_tp2 && (num_p1>50 || num_p1 > 0.25*num_tp1) ) @@ -160,42 +134,116 @@ bool WireCell::PointCloud::Facade::Clustering_3rd_round( double angle5 = tempV1.angle(tempV2); if (length_1 < 60*units::cm || length_2 < 60*units::cm){ - if (angle5 < 30/180.*3.1415926) - return true; - if (angle5 < 90/180.*3.1415926 && (num_p1 > 50 && num_p2 > 50) && (num_p1>75 || num_p2>75)) - return true; + if (angle5 < 30/180.*3.1415926) + return true; + if (angle5 < 90/180.*3.1415926 && (num_p1 > 50 && num_p2 > 50) && (num_p1>75 || num_p2>75)) + return true; } if ((length_1 < 60*units::cm || num_p1 >40) && (length_2 < 60*units::cm || num_p2 > 40)){ - if ((3.1415926 - dir1.angle(dir2))/3.1415926*180 < 30 && - (3.1415926 - dir1.angle(tempV1))/3.1415926*180. < 60 && - dir2.angle(tempV1)/3.1415926*180.<60 || - (3.1415926 - dir1.angle(dir2))/3.1415926*180 < 15) - return true; - - geo_point_t dir3 = cluster1.vhough_transform(cluster1_ave_pos,50*units::cm); // cluster 1 direction based on hough - geo_point_t dir4 = cluster2.vhough_transform(cluster2_ave_pos,50*units::cm); // cluster 1 direction based on hough - - if ((3.1415926 - dir3.angle(dir4))/3.1415926*180 < 25 && - (3.1415926 - dir3.angle(tempV2))/3.1415926*180. < 15 && - dir4.angle(tempV2)/3.1415926*180.<15 || - (3.1415926 - dir3.angle(dir4))/3.1415926*180 < 15) - return true; - - if (dis<0.6*units::cm && ((3.1415926 - dir3.angle(tempV2))/3.1415926*180. < 45 && dir4.angle(tempV2)/3.1415926*180. < 90 || (3.1415926 - dir3.angle(tempV2))/3.1415926*180. < 90 && dir4.angle(tempV2)/3.1415926*180. < 45)) - return true; + if ((3.1415926 - dir1.angle(dir2))/3.1415926*180 < 30 && + (3.1415926 - dir1.angle(tempV1))/3.1415926*180. < 60 && + dir2.angle(tempV1)/3.1415926*180.<60 || + (3.1415926 - dir1.angle(dir2))/3.1415926*180 < 15) + return true; + + geo_point_t dir3 = cluster1.vhough_transform(cluster1_ave_pos,50*units::cm); // cluster 1 direction based on hough + geo_point_t dir4 = cluster2.vhough_transform(cluster2_ave_pos,50*units::cm); // cluster 1 direction based on hough + + if ((3.1415926 - dir3.angle(dir4))/3.1415926*180 < 25 && + (3.1415926 - dir3.angle(tempV2))/3.1415926*180. < 15 && + dir4.angle(tempV2)/3.1415926*180.<15 || + (3.1415926 - dir3.angle(dir4))/3.1415926*180 < 15) + return true; + + if (dis<0.6*units::cm && ((3.1415926 - dir3.angle(tempV2))/3.1415926*180. < 45 && dir4.angle(tempV2)/3.1415926*180. < 90 || (3.1415926 - dir3.angle(tempV2))/3.1415926*180. < 90 && dir4.angle(tempV2)/3.1415926*180. < 45)) + return true; } } // if (flag_print) std::cout << em("additional running") << std::endl; } - - return false; +} + + +// This function can handle multiple APA/Faces +static void clustering_close( + Grouping& live_grouping, + + const Tree::Scope& scope, + const double length_cut) +{ + + cluster_set_t used_clusters; - } + // prepare graph ... + typedef cluster_connectivity_graph_t Graph; + Graph g; + std::unordered_map ilive2desc; // added live index to graph descriptor + std::map map_cluster_index; + const auto& live_clusters = live_grouping.children(); + + for (size_t ilive = 0; ilive < live_clusters.size(); ++ilive) { + const auto& live = live_clusters.at(ilive); + if (live->get_default_scope().hash() != scope.hash()) { + live->set_default_scope(scope); + } + map_cluster_index[live] = ilive; + ilive2desc[ilive] = boost::add_vertex(ilive, g); + } + + for (size_t i=0;i!=live_clusters.size();i++){ + auto cluster_1 = live_clusters.at(i); + // nor process this cluster if it is not in the filter ... + if (!cluster_1->get_scope_filter(scope)) continue; + if (cluster_1->get_length() < 1.5*units::cm) continue; + if (used_clusters.find(cluster_1)!=used_clusters.end()) continue; + for (size_t j=i+1;jget_scope_filter(scope)) continue; + if (used_clusters.find(cluster_2)!=used_clusters.end()) continue; + if (cluster_2->get_length() < 1.5*units::cm) continue; + if (Clustering_3rd_round(*cluster_1,*cluster_2, + cluster_1->get_length(), cluster_2->get_length(), length_cut)){ + //to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); + boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], + ilive2desc[map_cluster_index[cluster_2]], g); + + + + if (cluster_1->get_length() < 5*units::cm){ + used_clusters.insert(cluster_1); + break; + } + if (cluster_2->get_length() < 5*units::cm){ + used_clusters.insert(cluster_2); + } + } + } + } + + // if (flag_print) std::cout << em("core alg") << std::endl; + + // new function to merge clusters ... + merge_clusters(g, live_grouping); + + // if (flag_print) std::cout << em("merge clusters") << std::endl; + + +} + + + + + + + + + #pragma GCC diagnostic pop // Local Variables: diff --git a/clus/src/clustering_connect.cxx b/clus/src/clustering_connect.cxx index 3eb8945f9..10cc283fb 100644 --- a/clus/src/clustering_connect.cxx +++ b/clus/src/clustering_connect.cxx @@ -1,17 +1,48 @@ -#include +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" -// The original developers do not care. -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wparentheses" +#include "WireCellIface/IConfigurable.h" +#include "WireCellUtil/NamedFactory.h" + +class ClusteringConnect1; +WIRECELL_FACTORY(ClusteringConnect1, ClusteringConnect1, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) using namespace WireCell; using namespace WireCell::Clus; -using namespace WireCell::Aux; -using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; +using namespace WireCell::Clus::Facade; using namespace WireCell::PointCloud::Tree; +static void clustering_connect1(Grouping& live_grouping, + IDetectorVolumes::pointer dv, + const Tree::Scope& scope); + +class ClusteringConnect1 : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedScope { +public: + ClusteringConnect1() {} + virtual ~ClusteringConnect1() {} + + void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedScope::configure(config); + } + + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + clustering_connect1(live, m_dv, m_scope); + } + virtual Configuration default_configuration() const { + Configuration cfg; + return cfg; + } +}; + + +// The original developers do not care. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wparentheses" // #define __DEBUG__ #ifdef __DEBUG__ @@ -20,28 +51,100 @@ using namespace WireCell::PointCloud::Tree; #define LogDebug(x) #endif -void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) +// This is for only one APA/face +void clustering_connect1( + Grouping& live_grouping, + const IDetectorVolumes::pointer dv, + const Tree::Scope& scope) { - const auto &tp = live_grouping.get_params(); - auto global_point_cloud = std::make_shared(tp.angle_u, tp.angle_v, tp.angle_w); - for (const Cluster *cluster : live_grouping.children()) { - global_point_cloud->add_points(cluster, 0); + // Check that live_grouping has less than one wpid + if (live_grouping.wpids().size() > 1) { + for (const auto& wpid : live_grouping.wpids()) { + std::cout << "Live grouping wpid: " << wpid.name() << std::endl; + } + raise("Live %d > 1", live_grouping.wpids().size()); + } + // Example usage in clustering_parallel_prolong() + auto [drift_dir, angle_u, angle_v, angle_w] = extract_geometry_params(live_grouping, dv); + geo_point_t drift_dir_abs(1,0,0); + + int apa = (*live_grouping.wpids().begin()).apa(); + int face = (*live_grouping.wpids().begin()).face(); + + std::map>& dead_u_index = live_grouping.get_dead_winds(apa, face, 0); + std::map>& dead_v_index = live_grouping.get_dead_winds(apa, face, 1); + std::map>& dead_w_index = live_grouping.get_dead_winds(apa, face, 2); + + // Get all the wire plane IDs from the grouping + const auto& wpids = live_grouping.wpids(); + // Key: pair, Value: drift_dir, angle_u, angle_v, angle_w + std::map> wpid_params; + std::set apas; + + std::map>>> af_dead_u_index; + std::map>>> af_dead_v_index; + std::map>>> af_dead_w_index; + + for (const auto& wpid : wpids) { + int apa = wpid.apa(); + int face = wpid.face(); + apas.insert(apa); + + // Create wpids for all three planes with this APA and face + WirePlaneId wpid_u(kUlayer, face, apa); + WirePlaneId wpid_v(kVlayer, face, apa); + WirePlaneId wpid_w(kWlayer, face, apa); + + // Get drift direction based on face orientation + int face_dirx = dv->face_dirx(wpid_u); + geo_point_t drift_dir(face_dirx, 0, 0); + + // Get wire directions for all planes + Vector wire_dir_u = dv->wire_direction(wpid_u); + Vector wire_dir_v = dv->wire_direction(wpid_v); + Vector wire_dir_w = dv->wire_direction(wpid_w); + + // Calculate angles + double angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + double angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + double angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + wpid_params[wpid] = std::make_tuple(drift_dir, angle_u, angle_v, angle_w); + + + af_dead_u_index[apa][face] = live_grouping.get_dead_winds(apa, face, 0); + af_dead_v_index[apa][face] = live_grouping.get_dead_winds(apa, face, 1); + af_dead_w_index[apa][face] = live_grouping.get_dead_winds(apa, face, 2); } - std::map>& dead_u_index = live_grouping.get_dead_winds(0, 0); - std::map>& dead_v_index = live_grouping.get_dead_winds(0, 1); - std::map>& dead_w_index = live_grouping.get_dead_winds(0, 2); - LogDebug("global_point_cloud.get_num_points() " << global_point_cloud->get_num_points()); - LogDebug("dead_u_index.size() " << dead_u_index.size() << " dead_v_index.size() " << dead_v_index.size() << " dead_w_index.size() " << dead_w_index.size()); + + // auto global_point_cloud = std::make_shared(angle_u, angle_v, angle_w); + auto global_point_cloud = std::make_shared(wpid_params); + + for (Cluster *cluster : live_grouping.children()) { + if(!cluster->get_scope_filter(scope)) continue; + // global_point_cloud->add_points(cluster, 0); + if (cluster->get_default_scope().hash() != scope.hash()) { + cluster->set_default_scope(scope); + // std::cout << "Test: Set default scope: " << pc_name << " " << coords[0] << " " << coords[1] << " " << coords[2] << " " << cluster->get_default_scope().hash() << " " << scope.hash() << std::endl; + } + // debug ... + // { + // const size_t num_points = cluster->npoints(); + // const size_t kd_num_points = cluster->kd3d().npoints(); + // std::cout << "Xin: " << num_points << " " << kd_num_points << std::endl; + // } + global_point_cloud->add_points(make_points_cluster(cluster, wpid_params)); + } // sort the clusters length ... std::vector live_clusters = live_grouping.children(); // copy std::sort(live_clusters.begin(), live_clusters.end(), [](const Cluster *cluster1, const Cluster *cluster2) { return cluster1->get_length() > cluster2->get_length(); }); - // double time_slice_width = tp.nticks_live_slice * tp.tick_drift; - auto global_skeleton_cloud = std::make_shared(tp.angle_u, tp.angle_v, tp.angle_w); + // auto global_skeleton_cloud = std::make_shared(angle_u, angle_v, angle_w); + auto global_skeleton_cloud = std::make_shared(wpid_params); double extending_dis = 50 * units::cm; double angle = 7.5; @@ -65,8 +168,6 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) std::map map_cluster_dir1; std::map map_cluster_dir2; - geo_point_t drift_dir(1, 0, 0); - const auto [angle_u,angle_v,angle_w] = live_grouping.wire_angles(); geo_point_t U_dir(0,cos(angle_u),sin(angle_u)); geo_point_t V_dir(0,cos(angle_v),sin(angle_v)); geo_point_t W_dir(0,cos(angle_w),sin(angle_w)); @@ -75,22 +176,18 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) // geo_point_t W_dir(0, 1, 0); for (size_t i = 0; i != live_clusters.size(); i++) { - Cluster *cluster = live_clusters.at(i); + const Cluster *cluster = live_clusters.at(i); + if(!cluster->get_scope_filter(scope)) continue; assert (cluster->npoints() > 0); // preempt segfault in get_two_extreme_points() // if (cluster->get_length()/units::cm>5){ - // std::cout << "Connect 0: " << cluster->get_length()/units::cm << " " << cluster->get_center() << std::endl; + // std::cout << "Connect 0: " << cluster->get_length()/units::cm << " " << cluster->get_pca().center << std::endl; // } - LogDebug("#b " << cluster->nchildren() << " length " << cluster->get_length()); - #ifdef __DEBUG__ - if (cluster->nchildren() == 84) break; - #endif // cluster->Create_point_cloud(); std::pair extreme_points = cluster->get_two_extreme_points(); - LogDebug("#b " << cluster->nchildren() << " extreme_points " << extreme_points.first << " " << extreme_points.second); geo_point_t main_dir(extreme_points.second.x() - extreme_points.first.x(), extreme_points.second.y() - extreme_points.first.y(), extreme_points.second.z() - extreme_points.first.z()); @@ -101,7 +198,7 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) bool flag_prol_2 = false; if (main_dir.magnitude() > 10 * units::cm && - fabs(main_dir.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { + fabs(main_dir.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { dir1 = main_dir; dir1 = dir1* -1; dir2 = main_dir; @@ -109,14 +206,14 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) else if (cluster->get_length() > 25 * units::cm) { dir1 = cluster->vhough_transform(extreme_points.first, 80 * units::cm); if (dir1.magnitude() != 0) dir1 = dir1.norm(); - if (fabs(dir1.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { + if (fabs(dir1.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { dir1.set(dir1.x(), (extreme_points.second.y() - extreme_points.first.y()) / main_dir.magnitude(), (extreme_points.second.z() - extreme_points.first.z()) / main_dir.magnitude()); dir1 = dir1 * -1; } dir2 = cluster->vhough_transform(extreme_points.second, 80 * units::cm); if (dir2.magnitude() != 0) dir2 = dir2.norm(); - if (fabs(dir2.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { + if (fabs(dir2.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { dir2.set(dir2.x(), (extreme_points.second.y() - extreme_points.first.y()) / main_dir.magnitude(), (extreme_points.second.z() - extreme_points.first.z()) / main_dir.magnitude()); } @@ -129,14 +226,13 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) if (dir1.dot(main_dir) > 0) dir1 *= -1; if (dir2.dot(dir1) > 0) dir2 *= -1; } - LogDebug("#b " << cluster->nchildren() << " dir1 " << dir1 << " dir2 " << dir2); bool flag_add_dir1 = true; bool flag_add_dir2 = true; map_cluster_dir1[cluster] = dir1; map_cluster_dir2[cluster] = dir2; - if (fabs(dir1.angle(drift_dir) - 3.1415926 / 2.) < 7.5 * 3.1415926 / 180.) { + if (fabs(dir1.angle(drift_dir_abs) - 3.1415926 / 2.) < 7.5 * 3.1415926 / 180.) { flag_para_1 = true; } else { @@ -144,7 +240,7 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) geo_point_t tempV5; double angle1 = tempV1.angle(U_dir); tempV5.set(fabs(dir1.x()), sqrt(pow(dir1.y(), 2) + pow(dir1.z(), 2)) * sin(angle1), 0); - angle1 = tempV5.angle(drift_dir); + angle1 = tempV5.angle(drift_dir_abs); if (angle1 < 7.5 / 180. * 3.1415926) { flag_prol_1 = true; @@ -152,7 +248,7 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) else { angle1 = tempV1.angle(V_dir); tempV5.set(fabs(dir1.x()), sqrt(pow(dir1.y(), 2) + pow(dir1.z(), 2)) * sin(angle1), 0); - angle1 = tempV5.angle(drift_dir); + angle1 = tempV5.angle(drift_dir_abs); if (angle1 < 7.5 / 180. * 3.1415926) { flag_prol_1 = true; @@ -160,7 +256,7 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) else { angle1 = tempV1.angle(W_dir); tempV5.set(fabs(dir1.x()), sqrt(pow(dir1.y(), 2) + pow(dir1.z(), 2)) * sin(angle1), 0); - angle1 = tempV5.angle(drift_dir); + angle1 = tempV5.angle(drift_dir_abs); if (angle1 < 7.5 / 180. * 3.1415926) { flag_prol_1 = true; @@ -169,7 +265,7 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) } } - if (fabs(dir2.angle(drift_dir) - 3.1415926 / 2.) < 7.5 * 3.1415926 / 180.) { + if (fabs(dir2.angle(drift_dir_abs) - 3.1415926 / 2.) < 7.5 * 3.1415926 / 180.) { flag_para_2 = true; } else { @@ -177,21 +273,21 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) geo_point_t tempV6; double angle2 = tempV2.angle(U_dir); tempV6.set(fabs(dir2.x()), sqrt(pow(dir2.y(), 2) + pow(dir2.z(), 2)) * sin(angle2), 0); - angle2 = tempV6.angle(drift_dir); + angle2 = tempV6.angle(drift_dir_abs); if (angle2 < 7.5 / 180. * 3.1415926) { flag_prol_2 = true; } else { angle2 = tempV2.angle(V_dir); tempV6.set(fabs(dir2.x()), sqrt(pow(dir2.y(), 2) + pow(dir2.z(), 2)) * sin(angle2), 0); - angle2 = tempV6.angle(drift_dir); + angle2 = tempV6.angle(drift_dir_abs); if (angle2 < 7.5 / 180. * 3.1415926) { flag_prol_2 = true; } else { angle2 = tempV2.angle(W_dir); tempV6.set(fabs(dir2.x()), sqrt(pow(dir2.y(), 2) + pow(dir2.z(), 2)) * sin(angle2), 0); - angle2 = tempV6.angle(drift_dir); + angle2 = tempV6.angle(drift_dir_abs); if (angle2 < 7.5 / 180. * 3.1415926) { flag_prol_2 = true; } @@ -219,61 +315,59 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) flag_add_dir2 = false; } - if (fabs(dir1.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { + if (fabs(dir1.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { flag_para_1 = true; } else { flag_para_1 = false; } - if (fabs(dir2.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { + if (fabs(dir2.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { flag_para_2 = true; } else { flag_para_2 = false; } - LogDebug("#b " << cluster->nchildren() << " flag_para_1 " << flag_para_1 << " flag_prol_1 " << flag_prol_1 << " flag_para_2 " << flag_para_2 << " flag_prol_2 " << flag_prol_2); if (i == 0) { if (flag_para_1 || flag_prol_1) { - global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis * 3, 1.2 * units::cm, - angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis * 3, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.first, dir1, extending_dis * 3, 1.2 * units::cm, angle, dv, wpid_params)); dir1 *= -1; - global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis * 3, 1.2 * units::cm, - angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis * 3, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.first, dir1, extending_dis * 3, 1.2 * units::cm, angle, dv, wpid_params)); } else { - global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis, 1.2 * units::cm, - angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.first, dir1, extending_dis, 1.2 * units::cm, angle, dv, wpid_params)); dir1 *= -1; - global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis, 1.2 * units::cm, - angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.first, dir1, extending_dis, 1.2 * units::cm, angle, dv, wpid_params)); } if (flag_para_2 || flag_prol_2) { - global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis * 3.0, - 1.2 * units::cm, angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis * 3.0, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.second, dir2, extending_dis * 3.0, 1.2 * units::cm, angle, dv, wpid_params)); dir2 *= -1; - global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis * 3.0, - 1.2 * units::cm, angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis * 3.0, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.second, dir2, extending_dis * 3.0, 1.2 * units::cm, angle, dv, wpid_params)); } else { - global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis, 1.2 * units::cm, - angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.second, dir2, extending_dis, 1.2 * units::cm, angle, dv, wpid_params)); dir2 *= -1; - global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis, 1.2 * units::cm, - angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.second, dir2, extending_dis, 1.2 * units::cm, angle, dv, wpid_params)); } } else { if (cluster->get_length() < 100 * units::cm || - fabs(dir2.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180. && - fabs(dir1.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180. && + fabs(dir2.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180. && + fabs(dir1.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180. && cluster->get_length() < 200 * units::cm) { // WCP::WCPointCloud &cloud = cluster->get_point_cloud()->get_cloud(); - LogDebug("#b " << cluster->nchildren() << " gsc " << global_skeleton_cloud->get_num_points()); int num_total_points = cluster->npoints(); const auto& winds = cluster->wire_indices(); int num_unique[3] = {0, 0, 0}; // points that are unique (not agree with any other clusters) @@ -283,23 +377,35 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) bool flag_dead = false; if (dead_u_index.find(winds[0][j]) != dead_u_index.end()) { - if (cluster->point3d(j).x() >= dead_u_index[winds[0][j]].first && - cluster->point3d(j).x() <= dead_u_index[winds[0][j]].second) { + if (cluster->point3d_raw(j).x() >= dead_u_index[winds[0][j]].first && + cluster->point3d_raw(j).x() <= dead_u_index[winds[0][j]].second) { flag_dead = true; } } if (!flag_dead) { + // std::vector> results = + // global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 0); std::vector> results = - global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 0); - LogDebug("#b " << cluster->nchildren() << " test_point " << test_point << " loose_dis_cut " << loose_dis_cut << " results.size() " << results.size()); + global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 0, face, apa); bool flag_unique = true; if (results.size() > 0) { std::set temp_clusters; for (size_t k = 0; k != results.size(); k++) { - // LogDebug("#b " << cluster->nchildren() << " results.at(k) " << std::get<0>(results.at(k)) << " " << global_skeleton_cloud->dist_cut(0,std::get<2>(results.at(k)))); + // if (cluster->children().size() == 215 && k == 0) { + // if (k == 0) { + // std::cout + // << " cluster->children().size() " << cluster->children().size() + // << " k " << k + // // <<" global_skeleton_cloud->get_num_points() " << global_skeleton_cloud->get_num_points() + // <<" global_skeleton_cloud->get_points().size() " << global_skeleton_cloud->get_points().size() + // <<" results.at(k) " << std::get<0>(results.at(k)) + // // << " " << global_skeleton_cloud->dist_cut(0,std::get<2>(results.at(k))) << std::endl; + // << " " << global_skeleton_cloud->get_points().at(std::get<2>(results.at(k))).dist_cut[0] << std::endl; + // } if (std::get<0>(results.at(k)) < - global_skeleton_cloud->dist_cut(0,std::get<2>(results.at(k)))) { + // global_skeleton_cloud->dist_cut(0,std::get<2>(results.at(k)))) { + global_skeleton_cloud->get_points().at(std::get<2>(results.at(k))).dist_cut[0]) { flag_unique = false; temp_clusters.insert(std::get<1>(results.at(k))); } @@ -316,8 +422,10 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) if (flag_unique) num_unique[0]++; } else { + // std::vector> results = + // global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 0); std::vector> results = - global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 0); + global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 0, face, apa); bool flag_unique = true; if (results.size() > 0) { std::set temp_clusters; @@ -341,21 +449,24 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) flag_dead = false; if (dead_v_index.find(winds[1][j]) != dead_v_index.end()) { - if (cluster->point3d(j).x() >= dead_v_index[winds[1][j]].first && - cluster->point3d(j).x() <= dead_v_index[winds[1][j]].second) { + if (cluster->point3d_raw(j).x() >= dead_v_index[winds[1][j]].first && + cluster->point3d_raw(j).x() <= dead_v_index[winds[1][j]].second) { flag_dead = true; } } if (!flag_dead) { + // std::vector> results = + // global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 1); std::vector> results = - global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 1); + global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 1, face, apa); bool flag_unique = true; if (results.size() > 0) { std::set temp_clusters; for (size_t k = 0; k != results.size(); k++) { if (std::get<0>(results.at(k)) < - global_skeleton_cloud->dist_cut(1,std::get<2>(results.at(k)))) { + // global_skeleton_cloud->dist_cut(1,std::get<2>(results.at(k)))) { + global_skeleton_cloud->get_points().at(std::get<2>(results.at(k))).dist_cut[1]) { flag_unique = false; temp_clusters.insert(std::get<1>(results.at(k))); } @@ -372,8 +483,10 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) if (flag_unique) num_unique[1]++; } else { + // std::vector> results = + // global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 1); std::vector> results = - global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 1); + global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 1, face, apa); bool flag_unique = true; if (results.size() > 0) { std::set temp_clusters; @@ -397,21 +510,24 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) flag_dead = false; if (dead_w_index.find(winds[2][j]) != dead_w_index.end()) { - if (cluster->point3d(j).x() >= dead_w_index[winds[2][j]].first && - cluster->point3d(j).x() <= dead_w_index[winds[2][j]].second) { + if (cluster->point3d_raw(j).x() >= dead_w_index[winds[2][j]].first && + cluster->point3d_raw(j).x() <= dead_w_index[winds[2][j]].second) { flag_dead = true; } } if (!flag_dead) { + // std::vector> results = + // global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 2); std::vector> results = - global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 2); + global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 2, face, apa); bool flag_unique = true; if (results.size() > 0) { std::set temp_clusters; for (size_t k = 0; k != results.size(); k++) { if (std::get<0>(results.at(k)) < - global_skeleton_cloud->dist_cut(2,std::get<2>(results.at(k)))) { + // global_skeleton_cloud->dist_cut(2,std::get<2>(results.at(k)))) { + global_skeleton_cloud->get_points().at(std::get<2>(results.at(k))).dist_cut[2]) { flag_unique = false; temp_clusters.insert(std::get<1>(results.at(k))); } @@ -428,8 +544,10 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) if (flag_unique) num_unique[2]++; } else { + // std::vector> results = + // global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 2); std::vector> results = - global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 2); + global_skeleton_cloud->get_2d_points_info(test_point, loose_dis_cut, 2, face, apa); bool flag_unique = true; if (results.size() > 0) { std::set temp_clusters; @@ -451,13 +569,7 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) if (flag_unique) num_unique[2]++; } } // loop over points - LogDebug("num_unique " << num_unique[0] << " " << num_unique[1] << " " << num_unique[2]); - LogDebug("map_cluster_num " << map_cluster_num[0].size() << " " << map_cluster_num[1].size() << " " << map_cluster_num[2].size()); - if (cluster->nchildren() == 84) { - for (auto it = map_cluster_num[0].begin(); it != map_cluster_num[0].end(); it++) { - LogDebug("map_cluster_num[0] " << it->first->nchildren() << " " << it->second); - } - } + bool flag_merge = false; @@ -467,9 +579,6 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) int max_value_v[3] = {0, 0, 0}; int max_value_w[3] = {0, 0, 0}; - int max_value[3] = {0, 0, 0}; - const Cluster *max_cluster = 0; - for (auto it = map_cluster_num[0].begin(); it != map_cluster_num[0].end(); it++) { if (it->second > max_value_u[0]) { max_value_u[0] = it->second; @@ -529,6 +638,9 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) } } + int max_value[3] = {0, 0, 0}; + const Cluster *max_cluster = 0; + if ((max_value_u[0] > 0.33 * num_total_points || max_value_u[0] > 100) && (max_value_u[1] > 0.33 * num_total_points || max_value_u[1] > 100) && (max_value_u[2] > 0.33 * num_total_points || max_value_u[2] > 100)) { @@ -565,7 +677,7 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) // if (max_cluster != 0) // if (fabs(cluster->get_length()/units::cm - 50) < 5 ){ - // std::cout << "Check: " << cluster->get_length()/units::cm << " " << max_cluster->get_length()/units::cm << " " << cluster->get_center() << " " << max_cluster->get_center() << " " << max_value[0] << " " << max_value[1] << " " << max_value[2] << " " << num_total_points << " " << num_unique[0] << " " << num_unique[1] << " " << num_unique[2] << " " << std::endl; + // std::cout << "Check: " << cluster->get_length()/units::cm << " " << max_cluster->get_length()/units::cm << " " << cluster->get_pca().center << " " << max_cluster->get_pca().center << " " << max_value[0] << " " << max_value[1] << " " << max_value[2] << " " << num_total_points << " " << num_unique[0] << " " << num_unique[1] << " " << num_unique[2] << " " << std::endl; // } // if overlap a lot merge @@ -585,12 +697,11 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) // to_be_merged_pairs.insert(std::make_pair(cluster, max_cluster)); boost::add_edge(ilive2desc[map_cluster_index[cluster]], ilive2desc[map_cluster_index[max_cluster]], g); - // std::cout << "Connect 1 1: " << cluster->get_length()/units::cm << " " << max_cluster->get_length()/units::cm << " " << cluster->get_center() << " " << max_cluster->get_center() << std::endl; + // std::cout << "Connect 1 1: " << cluster->get_length()/units::cm << " " << max_cluster->get_length()/units::cm << " " << cluster->get_pca().center << " " << max_cluster->get_pca().center << std::endl; // curr_cluster = max_cluster; } - LogDebug("max_cluster_u #b " << max_cluster_u->nchildren() << " max_cluster_v #b " << max_cluster_v->nchildren() << " max_cluster_w #b " << max_cluster_w->nchildren()); - LogDebug("max_cluster #b " << max_cluster->nchildren() << " map_cluster_dir1 " << map_cluster_dir1[max_cluster] << " map_cluster_dir2 " << map_cluster_dir2[max_cluster]); + if (fabs(dir1.angle(map_cluster_dir1[max_cluster]) - 3.1415926 / 2.) < 75 * 3.1415926 / 180. && fabs(dir1.angle(map_cluster_dir2[max_cluster]) - 3.1415926 / 2.) < 75 * 3.1415926 / 180.) { flag_add_dir1 = false; @@ -611,18 +722,20 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) if (cluster->get_length() > 25 * units::cm || max_cluster->get_length() > 25 * units::cm) { // if overlap significant, compare the PCA - // cluster->Calc_PCA(); - geo_point_t p1_c = cluster->get_center(); - geo_point_t p1_dir(cluster->get_pca_axis(0).x(), cluster->get_pca_axis(0).y(), - cluster->get_pca_axis(0).z()); - // max_cluster->Calc_PCA(); - geo_point_t p2_c = max_cluster->get_center(); - geo_point_t p2_dir(max_cluster->get_pca_axis(0).x(), max_cluster->get_pca_axis(0).y(), - max_cluster->get_pca_axis(0).z()); + + geo_point_t p1_c = cluster->get_pca().center; + geo_point_t p1_dir(cluster->get_pca().axis.at(0).x(), + cluster->get_pca().axis.at(0).y(), + cluster->get_pca().axis.at(0).z()); + + geo_point_t p2_c = max_cluster->get_pca().center; + geo_point_t p2_dir(max_cluster->get_pca().axis.at(0).x(), + max_cluster->get_pca().axis.at(0).y(), + max_cluster->get_pca().axis.at(0).z()); double angle_diff = p1_dir.angle(p2_dir) / 3.1415926 * 180.; - double angle1_drift = p1_dir.angle(drift_dir) / 3.1415926 * 180.; - double angle2_drift = p2_dir.angle(drift_dir) / 3.1415926 * 180.; + double angle1_drift = p1_dir.angle(drift_dir_abs) / 3.1415926 * 180.; + double angle2_drift = p2_dir.angle(drift_dir_abs) / 3.1415926 * 180.; Ray l1(p1_c, p1_c+p1_dir); Ray l2(p2_c, p2_c+p2_dir); // double dis = l1.closest_dis(l2); @@ -645,7 +758,7 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) boost::add_edge(ilive2desc[map_cluster_index[cluster]], ilive2desc[map_cluster_index[max_cluster]], g); - // std::cout << "Connect 1 2: " << cluster->get_length()/units::cm << " " << max_cluster->get_length()/units::cm << " " << cluster->get_center() << " " << max_cluster->get_center() << std::endl; + // std::cout << "Connect 1 2: " << cluster->get_length()/units::cm << " " << max_cluster->get_length()/units::cm << " " << cluster->get_pca().center << " " << max_cluster->get_pca().center << std::endl; // curr_cluster = max_cluster; flag_merge = true; } @@ -656,18 +769,18 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) boost::add_edge(ilive2desc[map_cluster_index[cluster]], ilive2desc[map_cluster_index[max_cluster]], g); - // std::cout << "Connect 1 3: " << cluster->get_length()/units::cm << " " << max_cluster->get_length()/units::cm << " " << cluster->get_center() << " " << max_cluster->get_center() << std::endl; + // std::cout << "Connect 1 3: " << cluster->get_length()/units::cm << " " << max_cluster->get_length()/units::cm << " " << cluster->get_pca().center << " " << max_cluster->get_pca().center << std::endl; flag_merge = true; } - if ((fabs(dir2.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180. && - fabs(dir1.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) && + if ((fabs(dir2.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180. && + fabs(dir1.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) && (max_value[0] + max_value[1] + max_value[2]) > 0.7 * (num_total_points + num_total_points + num_total_points)) { // to_be_merged_pairs.insert(std::make_pair(cluster, max_cluster)); boost::add_edge(ilive2desc[map_cluster_index[cluster]], ilive2desc[map_cluster_index[max_cluster]], g); - // std::cout << "Connect 1 4: " << cluster->get_length()/units::cm << " " << max_cluster->get_length()/units::cm << " " << cluster->get_center() << " " << max_cluster->get_center() << std::endl; + // std::cout << "Connect 1 4: " << cluster->get_length()/units::cm << " " << max_cluster->get_length()/units::cm << " " << cluster->get_pca().center << " " << max_cluster->get_pca().center << std::endl; flag_merge = true; } } @@ -675,134 +788,54 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) } } // length cut ... - LogDebug("#b " << cluster->nchildren() << " flag_add_dir1 " << flag_add_dir1 << " flag_add_dir2 " << flag_add_dir2); // if (cluster->get_length()/units::cm>5){ - // std::cout << "Connect 0-1: " << cluster->get_length()/units::cm << " " << cluster->get_center() << " " << flag_add_dir1 << " " << flag_add_dir2 << " " << flag_para_1 << " " << flag_prol_1 << " " << flag_para_2 << " " << flag_prol_2 << " " << extreme_points.first << " " << extreme_points.second << " " << dir1 << " " << dir2 << " " << extending_dis << " " << angle << std::endl; + // std::cout << "Connect 0-1: " << cluster->get_length()/units::cm << " " << cluster->get_pca().center << " " << flag_add_dir1 << " " << flag_add_dir2 << " " << flag_para_1 << " " << flag_prol_1 << " " << flag_para_2 << " " << flag_prol_2 << " " << extreme_points.first << " " << extreme_points.second << " " << dir1 << " " << dir2 << " " << extending_dis << " " << angle << std::endl; // } if (flag_add_dir1) { // add extension points in ... if (flag_para_1 || flag_prol_1) { - global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis * 3, - 1.2 * units::cm, angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis * 3, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.first, dir1, extending_dis * 3, 1.2 * units::cm, angle, dv, wpid_params)); dir1 *= -1; - global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis * 3, - 1.2 * units::cm, angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis * 3, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.first, dir1, extending_dis * 3, 1.2 * units::cm, angle, dv, wpid_params)); } else { - global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis, 1.2 * units::cm, - angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.first, dir1, extending_dis, 1.2 * units::cm, angle, dv, wpid_params)); dir1 *= -1; - global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis, 1.2 * units::cm, - angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.first, dir1, extending_dis, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.first, dir1, extending_dis, 1.2 * units::cm, angle, dv, wpid_params)); + } } if (flag_add_dir2) { if (flag_para_2 || flag_prol_2) { - global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis * 3.0, - 1.2 * units::cm, angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis * 3.0, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.second, dir2, extending_dis * 3.0, 1.2 * units::cm, angle, dv, wpid_params)); dir2 *= -1; - global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis * 3.0, - 1.2 * units::cm, angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis * 3.0, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.second, dir2, extending_dis * 3.0, 1.2 * units::cm, angle, dv, wpid_params)); } else { - global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis, - 1.2 * units::cm, angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.second, dir2, extending_dis, 1.2 * units::cm, angle, dv, wpid_params)); dir2 *= -1; - global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis, - 1.2 * units::cm, angle); + // global_skeleton_cloud->add_points(cluster, extreme_points.second, dir2, extending_dis, 1.2 * units::cm, angle); + global_skeleton_cloud->add_points(make_points_linear_extrapolation(cluster, extreme_points.second, dir2, extending_dis, 1.2 * units::cm, angle, dv, wpid_params)); } } } // not the first cluster ... } // loop over clusters ... - LogDebug("#edges " << boost::num_edges(g) << " #vertices " << boost::num_vertices(g)); // merge clusters - /** - * round1 - */ - // to_be_merged_pairs -> merge_clusters - // std::vector> merge_clusters; - // for (auto it = to_be_merged_pairs.begin(); it != to_be_merged_pairs.end(); it++) { - // Cluster *cluster1 = (*it).first; - // Cluster *cluster2 = (*it).second; - // // LogDebug(cluster1 << " " << cluster2 << " " << cluster1->get_cluster_id() << " " << - // // cluster2->get_cluster_id()); - - // bool flag_new = true; - // std::vector> temp_set; - // for (auto it1 = merge_clusters.begin(); it1 != merge_clusters.end(); it1++) { - // std::set &clusters = (*it1); - // if (clusters.find(cluster1) != clusters.end() || clusters.find(cluster2) != clusters.end()) { - // clusters.insert(cluster1); - // clusters.insert(cluster2); - // flag_new = false; - // temp_set.push_back(clusters); - // // break; - // } - // } - // if (flag_new) { - // std::set clusters; - // clusters.insert(cluster1); - // clusters.insert(cluster2); - // merge_clusters.push_back(clusters); - // } - // if (temp_set.size() > 1) { - // // merge them further ... - // std::set clusters; - // for (size_t i = 0; i != temp_set.size(); i++) { - // for (auto it1 = temp_set.at(i).begin(); it1 != temp_set.at(i).end(); it1++) { - // clusters.insert(*it1); - // } - // merge_clusters.erase(find(merge_clusters.begin(), merge_clusters.end(), temp_set.at(i))); - // } - // merge_clusters.push_back(clusters); - // } - // } - - // // merge_clusters -> new_clusters - // WCP::ClusterSelection new_clusters; - - // // merge clusters into new clusters, delete old clusters - // for (auto it = merge_clusters.begin(); it != merge_clusters.end(); it++) { - // std::set &clusters = (*it); - // Cluster *ncluster = new Cluster((*clusters.begin())->get_cluster_id()); - // live_clusters.push_back(ncluster); - - // new_clusters.push_back(ncluster); - - // for (auto it1 = clusters.begin(); it1 != clusters.end(); it1++) { - // Cluster *ocluster = *(it1); - // // LogDebug(ocluster->get_cluster_id() << " "; - // SMGCSelection &mcells = ocluster->get_mcells(); - // for (auto it2 = mcells.begin(); it2 != mcells.end(); it2++) { - // SlimMergeGeomCell *mcell = (*it2); - // // LogDebug(ocluster->get_cluster_id() << " " << mcell); - // int time_slice = mcell->GetTimeSlice(); - // ncluster->AddCell(mcell, time_slice); - // } - // live_clusters.erase(find(live_clusters.begin(), live_clusters.end(), ocluster)); - // cluster_length_map.erase(ocluster); - // delete ocluster; - // } - // std::vector range_v1 = ncluster->get_uvwt_range(); - // double length_1 = sqrt(2. / 3. * - // (pow(pitch_u * range_v1.at(0), 2) + pow(pitch_v * range_v1.at(1), 2) + - // pow(pitch_w * range_v1.at(2), 2)) + - // pow(time_slice_width * range_v1.at(3), 2)); - // cluster_length_map[ncluster] = length_1; - // // LogDebug(std::endl; - // } - /** - * end of round1 - */ - - cluster_set_t new_clusters; - merge_clusters(g, live_grouping, new_clusters); + + auto new_clusters = merge_clusters(g, live_grouping); live_clusters.clear(); live_clusters = live_grouping.children(); // copy std::sort(live_clusters.begin(), live_clusters.end(), [](const Cluster *cluster1, const Cluster *cluster2) { @@ -820,21 +853,23 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) // to_be_merged_pairs.clear(); // clear it for other usage ... for (auto it = new_clusters.begin(); it != new_clusters.end(); it++) { const Cluster *cluster_1 = (*it); - // cluster_1->Calc_PCA(); - geo_point_t p1_c = cluster_1->get_center(); - geo_point_t p1_dir(cluster_1->get_pca_axis(0).x(), cluster_1->get_pca_axis(0).y(), cluster_1->get_pca_axis(0).z()); + if (!cluster_1->get_scope_filter(scope)) continue; + const auto& pca1 = cluster_1->get_pca(); + geo_point_t p1_c = pca1.center; + geo_point_t p1_dir = pca1.axis.at(0); Ray l1(p1_c, p1_c+p1_dir); for (auto it1 = live_clusters.begin(); it1 != live_clusters.end(); it1++) { Cluster *cluster_2 = (*it1); + if (!cluster_2->get_scope_filter(scope)) continue; if (cluster_2->get_length() < 3 * units::cm) continue; if (cluster_2 == cluster_1) continue; + const auto& pca2 = cluster_2->get_pca(); if (cluster_1->get_length() > 25 * units::cm || cluster_2->get_length() > 25 * units::cm || (cluster_1->get_length() + cluster_2->get_length()) > 30 * units::cm) { // cluster_2->Calc_PCA(); - geo_point_t p2_c = cluster_2->get_center(); - geo_point_t p2_dir(cluster_2->get_pca_axis(0).x(), cluster_2->get_pca_axis(0).y(), - cluster_2->get_pca_axis(0).z()); + geo_point_t p2_c = pca2.center; + geo_point_t p2_dir = pca2.axis.at(0); geo_point_t cc_dir(p2_c.x() - p1_c.x(), p2_c.y() - p1_c.y(), p2_c.z() - p1_c.z()); @@ -862,7 +897,7 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) // to_be_merged_pairs.insert(std::make_pair(cluster_1, cluster_2)); boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], ilive2desc[map_cluster_index[cluster_2]], g2); - // std::cout << "Connect 2: " << cluster_1->get_length()/units::cm << " " << cluster_2->get_length()/units::cm << " " << cluster_1->get_center() << " " << cluster_2->get_center() << std::endl; + // std::cout << "Connect 2: " << cluster_1->get_length()/units::cm << " " << cluster_2->get_length()/units::cm << " " << pca1.center << " " << pca2.center << std::endl; // flag_merge = true; } else if ((angle_diff > 87) && (angle_diff1 > 90 - 1.5 * (90 - angle_diff)) && @@ -874,83 +909,13 @@ void WireCell::PointCloud::Facade::clustering_connect1(Grouping& live_grouping) // to_be_merged_pairs.insert(std::make_pair(cluster_1, cluster_2)); boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], ilive2desc[map_cluster_index[cluster_2]], g2); - // std::cout << "Connect 2: " << cluster_1->get_length()/units::cm << " " << cluster_2->get_length()/units::cm << " " << cluster_1->get_center() << " " << cluster_2->get_center() << std::endl; + // std::cout << "Connect 2: " << cluster_1->get_length()/units::cm << " " << cluster_2->get_length()/units::cm << " " << pca1.center << " " << pca2.center << std::endl; // flag_merge = true; } } } } - new_clusters.clear(); - merge_clusters(g2, live_grouping, new_clusters); - /** - * round2 - */ - // merge_clusters.clear(); // clear it for other usage ... - // for (auto it = to_be_merged_pairs.begin(); it != to_be_merged_pairs.end(); it++) { - // Cluster *cluster1 = (*it).first; - // Cluster *cluster2 = (*it).second; - // // LogDebug(cluster1 << " " << cluster2 << " " << cluster1->get_cluster_id() << " " << - // // cluster2->get_cluster_id()); - - // bool flag_new = true; - // std::vector> temp_set; - // for (auto it1 = merge_clusters.begin(); it1 != merge_clusters.end(); it1++) { - // std::set &clusters = (*it1); - // if (clusters.find(cluster1) != clusters.end() || clusters.find(cluster2) != clusters.end()) { - // clusters.insert(cluster1); - // clusters.insert(cluster2); - // flag_new = false; - // temp_set.push_back(clusters); - // // break; - // } - // } - // if (flag_new) { - // std::set clusters; - // clusters.insert(cluster1); - // clusters.insert(cluster2); - // merge_clusters.push_back(clusters); - // } - // if (temp_set.size() > 1) { - // // merge them further ... - // std::set clusters; - // for (size_t i = 0; i != temp_set.size(); i++) { - // for (auto it1 = temp_set.at(i).begin(); it1 != temp_set.at(i).end(); it1++) { - // clusters.insert(*it1); - // } - // merge_clusters.erase(find(merge_clusters.begin(), merge_clusters.end(), temp_set.at(i))); - // } - // merge_clusters.push_back(clusters); - // } - // } - - // for (auto it = merge_clusters.begin(); it != merge_clusters.end(); it++) { - // std::set &clusters = (*it); - // Cluster *ncluster = new Cluster((*clusters.begin())->get_cluster_id()); - // live_clusters.push_back(ncluster); - // for (auto it1 = clusters.begin(); it1 != clusters.end(); it1++) { - // Cluster *ocluster = *(it1); - // // LogDebug(ocluster->get_cluster_id() << " "; - // SMGCSelection &mcells = ocluster->get_mcells(); - // for (auto it2 = mcells.begin(); it2 != mcells.end(); it2++) { - // SlimMergeGeomCell *mcell = (*it2); - // // LogDebug(ocluster->get_cluster_id() << " " << mcell); - // int time_slice = mcell->GetTimeSlice(); - // ncluster->AddCell(mcell, time_slice); - // } - // live_clusters.erase(find(live_clusters.begin(), live_clusters.end(), ocluster)); - // cluster_length_map.erase(ocluster); - // delete ocluster; - // } - // std::vector range_v1 = ncluster->get_uvwt_range(); - // double length_1 = sqrt(2. / 3. * - // (pow(pitch_u * range_v1.at(0), 2) + pow(pitch_v * range_v1.at(1), 2) + - // pow(pitch_w * range_v1.at(2), 2)) + - // pow(time_slice_width * range_v1.at(3), 2)); - // cluster_length_map[ncluster] = length_1; - // // LogDebug(std::endl; - // } - /** - * end of round2 - */ + new_clusters = merge_clusters(g2, live_grouping); + } diff --git a/clus/src/clustering_ctpointcloud.cxx b/clus/src/clustering_ctpointcloud.cxx index a8b952e2f..d13f06834 100644 --- a/clus/src/clustering_ctpointcloud.cxx +++ b/clus/src/clustering_ctpointcloud.cxx @@ -1,17 +1,59 @@ -#include +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + + +class ClusteringCTPointCloud; +WIRECELL_FACTORY(ClusteringCTPointCloud, ClusteringCTPointCloud, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) + using namespace WireCell; using namespace WireCell::Clus; -using namespace WireCell::Aux; -using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; +using namespace WireCell::Clus::Facade; using namespace WireCell::PointCloud::Tree; + +static void clustering_ctpointcloud(Grouping& live_grouping, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts); + +class ClusteringCTPointCloud : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedPCTS { +public: + ClusteringCTPointCloud() {} + virtual ~ClusteringCTPointCloud() {} + + void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedPCTS::configure(config); + } + virtual Configuration default_configuration() const { + Configuration cfg; + return cfg; + } + + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + clustering_ctpointcloud(live, m_dv, m_pcts); + } +}; + + // The original developers do not care. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wparentheses" -void WireCell::PointCloud::Facade::clustering_ctpointcloud(Grouping& live_grouping){ + +// This is a test function, not used in clustering +static void clustering_ctpointcloud( + Grouping& live_grouping, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts) +{ // test a few different functions and then print out ... std::cout << "Test CTPointCloud" << std::endl; @@ -43,23 +85,23 @@ void WireCell::PointCloud::Facade::clustering_ctpointcloud(Grouping& live_groupi p1.z() + (p2.z() - p1.z())/num_steps*(ii + 1) ); std::cout << "Test: " << p << std::endl; - bool flag = live_grouping.is_good_point(p,0,0.6*units::cm, 1, 1); - bool flag_wc = live_grouping.is_good_point_wc(p,0,0.6*units::cm, 1, 1); + bool flag = live_grouping.is_good_point(p,0,0,0.6*units::cm, 1, 1); + bool flag_wc = live_grouping.is_good_point_wc(p,0,0,0.6*units::cm, 1, 1); std::cout << "Test is_good_point: " << flag << " " << flag_wc << std::endl; - auto closest_points_u = live_grouping.get_closest_points(p,0.6*units::cm, 0, 0); - auto closest_points_v = live_grouping.get_closest_points(p,0.6*units::cm, 0, 1); - auto closest_points_w = live_grouping.get_closest_points(p,0.6*units::cm, 0, 2); + auto closest_points_u = live_grouping.get_closest_points(p,0.6*units::cm, 0, 0, 0); + auto closest_points_v = live_grouping.get_closest_points(p,0.6*units::cm, 0, 0, 1); + auto closest_points_w = live_grouping.get_closest_points(p,0.6*units::cm, 0, 0, 2); std::cout << "Test get_closest_points: " << closest_points_u.size() << " " << closest_points_v.size() << " " << closest_points_w.size() << std::endl; - bool flag_dead_chs_u = live_grouping.get_closest_dead_chs(p,1,0,0); - bool flag_dead_chs_v = live_grouping.get_closest_dead_chs(p,1,0,1); - bool flag_dead_chs_w = live_grouping.get_closest_dead_chs(p,1,0,2); + bool flag_dead_chs_u = live_grouping.get_closest_dead_chs(p,1,0, 0,0); + bool flag_dead_chs_v = live_grouping.get_closest_dead_chs(p,1,0, 0,1); + bool flag_dead_chs_w = live_grouping.get_closest_dead_chs(p,1,0, 0,2); std::cout << "Test get_closest_dead_chs: " << flag_dead_chs_u << " " << flag_dead_chs_v << " " << flag_dead_chs_w << std::endl; // Test point quality using grouping parameters std::vector scores; - scores = live_grouping.test_good_point(p, 0); + scores = live_grouping.test_good_point(p, 0, 0); std::cout << "Test test_good_point: " << scores[0] << " " << scores[1] << " " << scores[2] << " " << scores[3] << " " << scores[4] << " " << scores[5] << std::endl; // Check overall quality if (scores[0] + scores[3] + scores[1] + scores[4] + (scores[2]+scores[5])*2 < 3) { @@ -89,8 +131,8 @@ void WireCell::PointCloud::Facade::clustering_ctpointcloud(Grouping& live_groupi geo_point_t p(-1204.49*units::mm, -57.85*units::mm, 5635*units::mm); // std::cout << "Test: " << p << std::endl; - // bool flag = live_grouping.is_good_point(p,0,0.6*units::cm, 1, 1); - // bool flag_wc = live_grouping.is_good_point_wc(p,0,0.6*units::cm, 1, 1); + // bool flag = live_grouping.is_good_point(p,0,0,0.6*units::cm, 1, 1); + // bool flag_wc = live_grouping.is_good_point_wc(p,0,0,0.6*units::cm, 1, 1); // std::cout << "Test is_good_point: " << flag << " " << flag_wc << std::endl; // auto closest_points_u = live_grouping.get_closest_points(p,0.6*units::cm, 0, 0); @@ -105,34 +147,34 @@ void WireCell::PointCloud::Facade::clustering_ctpointcloud(Grouping& live_groupi - auto time_ch_u = live_grouping.convert_3Dpoint_time_ch(p,0,0); - auto time_ch_v = live_grouping.convert_3Dpoint_time_ch(p,0,1); - auto time_ch_w = live_grouping.convert_3Dpoint_time_ch(p,0,2); + auto time_ch_u = live_grouping.convert_3Dpoint_time_ch(p,0, 0,0); + auto time_ch_v = live_grouping.convert_3Dpoint_time_ch(p,0, 0,1); + auto time_ch_w = live_grouping.convert_3Dpoint_time_ch(p,0, 0,2); std::cout << "Test convert_3Dpoint_time_ch: " << int(std::get<0>(time_ch_u)/4) << " " << std::get<1>(time_ch_u) << " " << std::get<1>(time_ch_v)+2400 << " " << std::get<1>(time_ch_w)+4800 << std::endl; - std::cout << "Test Number of Points: " << live_grouping.get_num_points(0,0) << " " << live_grouping.get_num_points(0,1) << " " << live_grouping.get_num_points(0,2) << std::endl; + std::cout << "Test Number of Points: " << live_grouping.get_num_points(0, 0,0) << " " << live_grouping.get_num_points(0, 0,1) << " " << live_grouping.get_num_points(0, 0,2) << std::endl; - auto num_planes = live_grouping.test_good_point(p,0,0.6*units::cm, 1); + auto num_planes = live_grouping.test_good_point(p,0, 0,0.6*units::cm, 1); std::cout << "Test test_good_point: " << num_planes[0] << " " << num_planes[1] << " " << num_planes[2] << " " << num_planes[3] << " " << num_planes[4] << " " << num_planes[5] << std::endl; - std::cout << "Test Ave Charge: " << live_grouping.get_ave_3d_charge(p,1*units::cm,0) << " " << live_grouping.get_ave_charge(p,1*units::cm,0,0) << " " << live_grouping.get_ave_charge(p,1*units::cm,0,1) << " " << live_grouping.get_ave_charge(p,1*units::cm,0,2) << " " << std::endl; + std::cout << "Test Ave Charge: " << live_grouping.get_ave_3d_charge(p,0,0,1*units::cm) << " " << live_grouping.get_ave_charge(p,0,0,0,1*units::cm) << " " << live_grouping.get_ave_charge(p,0,0,1,1*units::cm) << " " << live_grouping.get_ave_charge(p,0,0,2,1*units::cm) << " " << std::endl; - auto point_u = live_grouping.convert_time_ch_2Dpoint(10*4, 10, 0, 0); - auto point_v = live_grouping.convert_time_ch_2Dpoint(10*4, 10, 0, 1); - auto point_w = live_grouping.convert_time_ch_2Dpoint(10*4, 10, 0, 2); + auto point_u = live_grouping.convert_time_ch_2Dpoint(10*4, 10, 0, 0, 0); + auto point_v = live_grouping.convert_time_ch_2Dpoint(10*4, 10, 0, 0, 1); + auto point_w = live_grouping.convert_time_ch_2Dpoint(10*4, 10, 0, 0, 2); std::cout << "Test 2D Conversion " << point_u.first << " " << point_u.second << " " << point_v.first << " " << point_v.second << " " << point_w.first << " " << point_w.second << std::endl; - auto dead_chs_u = live_grouping.get_overlap_dead_chs(10*4,1000*4,0,2400,0,0); - auto dead_chs_v = live_grouping.get_overlap_dead_chs(10*4,1000*4,0,2400,0,1); - auto dead_chs_w = live_grouping.get_overlap_dead_chs(10*4,1000*4,0,4800,0,2); - std::cout << "Test Overlap dead chs: " << dead_chs_u.size() << " " << dead_chs_v.size() << " " << dead_chs_w.size() << std::endl; + // auto dead_chs_u = live_grouping.get_overlap_dead_chs(10*4,1000*4,0,2400,0,0,0); + // auto dead_chs_v = live_grouping.get_overlap_dead_chs(10*4,1000*4,0,2400,0,0,1); + // auto dead_chs_w = live_grouping.get_overlap_dead_chs(10*4,1000*4,0,4800,0,0,2); + // std::cout << "Test Overlap dead chs: " << dead_chs_u.size() << " " << dead_chs_v.size() << " " << dead_chs_w.size() << std::endl; - std::cout << "Test all dead chs: " << live_grouping.get_all_dead_chs(0,0).size() + live_grouping.get_all_dead_chs(0,1).size() + live_grouping.get_all_dead_chs(0,2).size() << std::endl; + std::cout << "Test all dead chs: " << live_grouping.get_all_dead_chs(0,0,0).size() + live_grouping.get_all_dead_chs(0,0,1).size() + live_grouping.get_all_dead_chs(0,0,2).size() << std::endl; - auto good_chs_u = live_grouping.get_overlap_good_ch_charge(10*4,1000*4,0,2400,0,0); - auto good_chs_v = live_grouping.get_overlap_good_ch_charge(10*4,1000*4,0,2400,0,1); - auto good_chs_w = live_grouping.get_overlap_good_ch_charge(10*4,1000*4,0,4800,0,2); + auto good_chs_u = live_grouping.get_overlap_good_ch_charge(10*4,1000*4,0,2400,0,0,0); + auto good_chs_v = live_grouping.get_overlap_good_ch_charge(10*4,1000*4,0,2400,0,0,1); + auto good_chs_w = live_grouping.get_overlap_good_ch_charge(10*4,1000*4,0,4800,0,0,2); std::cout << "Test all good chs: " << good_chs_u.size() << " " << good_chs_v.size() << " " << good_chs_w.size() << std::endl; @@ -175,11 +217,12 @@ void WireCell::PointCloud::Facade::clustering_ctpointcloud(Grouping& live_groupi points1.push_back(points.first); points1.push_back(points.second); points1.push_back(p); - live_cluster->Calc_PCA(points1); - std::cout << "Test: " << live_cluster->get_center() << " " << live_cluster->get_pca_axis(0) << " " << live_cluster->get_pca_axis(1) << " " << live_cluster->get_pca_axis(2) << std::endl; + // Removed the wrong idea to set a PCA on live_cluster based on these points. + std::cout << "Warning: not calculating any PCA from three points\n"; + std::cout << "Test: " << live_cluster->get_pca().center << " " << live_cluster->get_pca().axis.at(0) << " " << live_cluster->get_pca().axis.at(1) << " " << live_cluster->get_pca().axis.at(2) << std::endl; geo_point_t p2(0,0,0); - auto dir2 = live_cluster->calc_pca_dir(p2, points1); + auto dir2 = calc_pca_dir(p2, points1); std::cout << "Test: " << dir2 << std::endl; auto p5 = live_cluster->calc_ave_pos(points.first, 10); @@ -189,376 +232,12 @@ void WireCell::PointCloud::Facade::clustering_ctpointcloud(Grouping& live_groupi auto start_wcpoint_idx = live_cluster->get_closest_point_index(points.first); auto end_wcpoint_idx = live_cluster->get_closest_point_index(points.second); - live_cluster->dijkstra_shortest_paths(start_wcpoint_idx, true); - live_cluster->cal_shortest_path(end_wcpoint_idx); + const auto& indices = live_cluster->graph_algorithms("ctpc", dv, pcts).shortest_path(start_wcpoint_idx, end_wcpoint_idx); - auto indices = live_cluster->get_path_wcps(); auto points2 = live_cluster->indices_to_points(indices); std::cout << "Test shortest path: " << points2.size() << " " << points2.at(0) << " " << points2.at(points2.size()-1) << std::endl; - std::vector points6; - {geo_point_t temp_p(592.338, 1144.19, 1897); points6.push_back(temp_p);} - {geo_point_t temp_p(592.338, 1135.53, 1900); points6.push_back(temp_p);} - {geo_point_t temp_p(587.934, 1125.14, 1900); points6.push_back(temp_p);} - {geo_point_t temp_p(583.53, 1119.08, 1898.5); points6.push_back(temp_p);} - {geo_point_t temp_p(581.328, 1113.88, 1898.5); points6.push_back(temp_p);} - {geo_point_t temp_p(581.328, 1106.96, 1898.5); points6.push_back(temp_p);} - {geo_point_t temp_p(576.924, 1104.36, 1900); points6.push_back(temp_p);} - {geo_point_t temp_p(576.924, 1102.63, 1903); points6.push_back(temp_p);} - {geo_point_t temp_p(576.924, 1099.16, 1903); points6.push_back(temp_p);} - {geo_point_t temp_p(576.924, 1088.77, 1903); points6.push_back(temp_p);} - {geo_point_t temp_p(576.924, 1087.04, 1906); points6.push_back(temp_p);} - {geo_point_t temp_p(574.722, 1084.44, 1907.5); points6.push_back(temp_p);} - {geo_point_t temp_p(574.722, 1074.05, 1907.5); points6.push_back(temp_p);} - {geo_point_t temp_p(570.318, 1070.58, 1907.5); points6.push_back(temp_p);} - {geo_point_t temp_p(570.318, 1060.19, 1907.5); points6.push_back(temp_p);} - {geo_point_t temp_p(570.318, 1056.73, 1907.5); points6.push_back(temp_p);} - {geo_point_t temp_p(568.116, 1054.13, 1909); points6.push_back(temp_p);} - {geo_point_t temp_p(568.116, 1043.74, 1909); points6.push_back(temp_p);} - {geo_point_t temp_p(568.116, 1042, 1912); points6.push_back(temp_p);} - {geo_point_t temp_p(563.712, 1034.21, 1910.5); points6.push_back(temp_p);} - {geo_point_t temp_p(563.712, 1023.82, 1910.5); points6.push_back(temp_p);} - {geo_point_t temp_p(563.712, 1016.89, 1910.5); points6.push_back(temp_p);} - {geo_point_t temp_p(559.308, 1011.69, 1913.5); points6.push_back(temp_p);} - {geo_point_t temp_p(559.308, 999.568, 1910.5); points6.push_back(temp_p);} - {geo_point_t temp_p(559.308, 990.042, 1912); points6.push_back(temp_p);} - {geo_point_t temp_p(554.904, 980.516, 1913.5); points6.push_back(temp_p);} - {geo_point_t temp_p(552.702, 975.319, 1913.5); points6.push_back(temp_p);} - {geo_point_t temp_p(550.5, 973.587, 1913.5); points6.push_back(temp_p);} - {geo_point_t temp_p(550.5, 963.195, 1913.5); points6.push_back(temp_p);} - {geo_point_t temp_p(550.5, 952.803, 1913.5); points6.push_back(temp_p);} - {geo_point_t temp_p(550.5, 946.74, 1915); points6.push_back(temp_p);} - {geo_point_t temp_p(548.298, 940.678, 1916.5); points6.push_back(temp_p);} - {geo_point_t temp_p(543.894, 933.75, 1916.5); points6.push_back(temp_p);} - {geo_point_t temp_p(543.894, 923.358, 1916.5); points6.push_back(temp_p);} - {geo_point_t temp_p(543.894, 919.894, 1916.5); points6.push_back(temp_p);} - {geo_point_t temp_p(541.692, 916.43, 1916.5); points6.push_back(temp_p);} - {geo_point_t temp_p(537.288, 906.037, 1916.5); points6.push_back(temp_p);} - {geo_point_t temp_p(537.288, 895.645, 1916.5); points6.push_back(temp_p);} - {geo_point_t temp_p(532.884, 893.913, 1919.5); points6.push_back(temp_p);} - {geo_point_t temp_p(532.884, 891.315, 1921); points6.push_back(temp_p);} - {geo_point_t temp_p(532.884, 884.387, 1921); points6.push_back(temp_p);} - {geo_point_t temp_p(532.884, 880.922, 1921); points6.push_back(temp_p);} - {geo_point_t temp_p(530.682, 877.458, 1921); points6.push_back(temp_p);} - {geo_point_t temp_p(530.682, 870.53, 1921); points6.push_back(temp_p);} - {geo_point_t temp_p(528.48, 867.932, 1919.5); points6.push_back(temp_p);} - {geo_point_t temp_p(524.076, 861.004, 1922.5); points6.push_back(temp_p);} - {geo_point_t temp_p(524.076, 850.612, 1922.5); points6.push_back(temp_p);} - {geo_point_t temp_p(524.076, 840.219, 1922.5); points6.push_back(temp_p);} - {geo_point_t temp_p(524.076, 836.755, 1922.5); points6.push_back(temp_p);} - {geo_point_t temp_p(521.874, 829.827, 1922.5); points6.push_back(temp_p);} - {geo_point_t temp_p(519.672, 822.899, 1922.5); points6.push_back(temp_p);} - {geo_point_t temp_p(517.47, 812.507, 1922.5); points6.push_back(temp_p);} - {geo_point_t temp_p(517.47, 809.043, 1922.5); points6.push_back(temp_p);} - {geo_point_t temp_p(515.268, 798.65, 1922.5); points6.push_back(temp_p);} - {geo_point_t temp_p(513.066, 795.186, 1922.5); points6.push_back(temp_p);} - {geo_point_t temp_p(513.066, 788.258, 1922.5); points6.push_back(temp_p);} - {geo_point_t temp_p(510.864, 784.794, 1922.5); points6.push_back(temp_p);} - {geo_point_t temp_p(510.864, 775.267, 1924); points6.push_back(temp_p);} - {geo_point_t temp_p(508.662, 772.67, 1925.5); points6.push_back(temp_p);} - {geo_point_t temp_p(506.46, 762.277, 1925.5); points6.push_back(temp_p);} - {geo_point_t temp_p(504.258, 751.885, 1925.5); points6.push_back(temp_p);} - {geo_point_t temp_p(499.854, 741.492, 1925.5); points6.push_back(temp_p);} - {geo_point_t temp_p(499.854, 731.1, 1925.5); points6.push_back(temp_p);} - {geo_point_t temp_p(499.854, 725.038, 1927); points6.push_back(temp_p);} - {geo_point_t temp_p(495.45, 715.512, 1928.5); points6.push_back(temp_p);} - {geo_point_t temp_p(495.45, 708.583, 1928.5); points6.push_back(temp_p);} - {geo_point_t temp_p(493.248, 698.191, 1928.5); points6.push_back(temp_p);} - {geo_point_t temp_p(491.046, 687.799, 1928.5); points6.push_back(temp_p);} - {geo_point_t temp_p(488.844, 677.407, 1928.5); points6.push_back(temp_p);} - {geo_point_t temp_p(488.844, 673.942, 1928.5); points6.push_back(temp_p);} - {geo_point_t temp_p(484.44, 665.282, 1931.5); points6.push_back(temp_p);} - {geo_point_t temp_p(480.036, 661.818, 1931.5); points6.push_back(temp_p);} - {geo_point_t temp_p(480.036, 651.426, 1931.5); points6.push_back(temp_p);} - {geo_point_t temp_p(480.036, 641.034, 1931.5); points6.push_back(temp_p);} - {geo_point_t temp_p(480.036, 637.57, 1931.5); points6.push_back(temp_p);} - {geo_point_t temp_p(477.834, 630.641, 1931.5); points6.push_back(temp_p);} - {geo_point_t temp_p(477.834, 623.713, 1931.5); points6.push_back(temp_p);} - {geo_point_t temp_p(475.632, 613.321, 1931.5); points6.push_back(temp_p);} - {geo_point_t temp_p(475.632, 609.857, 1931.5); points6.push_back(temp_p);} - {geo_point_t temp_p(473.43, 598.599, 1930); points6.push_back(temp_p);} - {geo_point_t temp_p(471.228, 585.609, 1934.5); points6.push_back(temp_p);} - {geo_point_t temp_p(466.824, 576.948, 1934.5); points6.push_back(temp_p);} - {geo_point_t temp_p(464.622, 571.752, 1934.5); points6.push_back(temp_p);} - {geo_point_t temp_p(464.622, 561.36, 1934.5); points6.push_back(temp_p);} - {geo_point_t temp_p(460.218, 554.431, 1937.5); points6.push_back(temp_p);} - {geo_point_t temp_p(460.218, 544.039, 1937.5); points6.push_back(temp_p);} - {geo_point_t temp_p(460.218, 533.646, 1937.5); points6.push_back(temp_p);} - {geo_point_t temp_p(460.218, 530.182, 1937.5); points6.push_back(temp_p);} - {geo_point_t temp_p(458.016, 519.79, 1937.5); points6.push_back(temp_p);} - {geo_point_t temp_p(455.814, 509.398, 1937.5); points6.push_back(temp_p);} - {geo_point_t temp_p(453.612, 499.005, 1937.5); points6.push_back(temp_p);} - {geo_point_t temp_p(451.41, 490.345, 1940.5); points6.push_back(temp_p);} - {geo_point_t temp_p(451.41, 486.881, 1940.5); points6.push_back(temp_p);} - {geo_point_t temp_p(449.208, 476.489, 1940.5); points6.push_back(temp_p);} - {geo_point_t temp_p(444.804, 470.427, 1942); points6.push_back(temp_p);} - {geo_point_t temp_p(440.4, 464.365, 1943.5); points6.push_back(temp_p);} - {geo_point_t temp_p(438.198, 460.9, 1943.5); points6.push_back(temp_p);} - {geo_point_t temp_p(438.198, 450.508, 1943.5); points6.push_back(temp_p);} - {geo_point_t temp_p(433.794, 444.446, 1945); points6.push_back(temp_p);} - {geo_point_t temp_p(433.794, 440.982, 1945); points6.push_back(temp_p);} - {geo_point_t temp_p(431.592, 437.518, 1945); points6.push_back(temp_p);} - {geo_point_t temp_p(431.592, 426.259, 1943.5); points6.push_back(temp_p);} - {geo_point_t temp_p(431.592, 415.867, 1943.5); points6.push_back(temp_p);} - {geo_point_t temp_p(431.592, 405.475, 1943.5); points6.push_back(temp_p);} - {geo_point_t temp_p(431.592, 402.011, 1943.5); points6.push_back(temp_p);} - {geo_point_t temp_p(429.39, 395.082, 1943.5); points6.push_back(temp_p);} - {geo_point_t temp_p(429.39, 389.02, 1945); points6.push_back(temp_p);} - {geo_point_t temp_p(429.39, 382.092, 1945); points6.push_back(temp_p);} - {geo_point_t temp_p(427.188, 379.494, 1946.5); points6.push_back(temp_p);} - {geo_point_t temp_p(422.784, 376.03, 1946.5); points6.push_back(temp_p);} - {geo_point_t temp_p(422.784, 365.638, 1946.5); points6.push_back(temp_p);} - {geo_point_t temp_p(422.784, 359.576, 1948); points6.push_back(temp_p);} - {geo_point_t temp_p(418.38, 348.317, 1946.5); points6.push_back(temp_p);} - {geo_point_t temp_p(418.38, 344.853, 1946.5); points6.push_back(temp_p);} - {geo_point_t temp_p(416.178, 334.461, 1946.5); points6.push_back(temp_p);} - {geo_point_t temp_p(413.976, 327.532, 1946.5); points6.push_back(temp_p);} - {geo_point_t temp_p(411.774, 317.14, 1946.5); points6.push_back(temp_p);} - {geo_point_t temp_p(409.572, 306.748, 1946.5); points6.push_back(temp_p);} - {geo_point_t temp_p(407.37, 296.355, 1946.5); points6.push_back(temp_p);} - {geo_point_t temp_p(405.168, 289.427, 1946.5); points6.push_back(temp_p);} - {geo_point_t temp_p(400.764, 280.767, 1949.5); points6.push_back(temp_p);} - {geo_point_t temp_p(400.764, 270.375, 1949.5); points6.push_back(temp_p);} - {geo_point_t temp_p(400.764, 263.447, 1949.5); points6.push_back(temp_p);} - {geo_point_t temp_p(398.562, 256.518, 1949.5); points6.push_back(temp_p);} - {geo_point_t temp_p(396.36, 249.59, 1949.5); points6.push_back(temp_p);} - {geo_point_t temp_p(391.956, 239.198, 1949.5); points6.push_back(temp_p);} - {geo_point_t temp_p(387.552, 234.868, 1948); points6.push_back(temp_p);} - {geo_point_t temp_p(387.552, 224.476, 1948); points6.push_back(temp_p);} - {geo_point_t temp_p(387.552, 214.083, 1948); points6.push_back(temp_p);} - {geo_point_t temp_p(387.552, 207.155, 1948); points6.push_back(temp_p);} - {geo_point_t temp_p(385.35, 204.557, 1949.5); points6.push_back(temp_p);} - {geo_point_t temp_p(385.35, 201.093, 1949.5); points6.push_back(temp_p);} - {geo_point_t temp_p(383.148, 198.495, 1951); points6.push_back(temp_p);} - {geo_point_t temp_p(380.946, 192.432, 1952.5); points6.push_back(temp_p);} - {geo_point_t temp_p(380.946, 185.504, 1952.5); points6.push_back(temp_p);} - {geo_point_t temp_p(378.744, 178.576, 1952.5); points6.push_back(temp_p);} - {geo_point_t temp_p(374.34, 168.184, 1952.5); points6.push_back(temp_p);} - {geo_point_t temp_p(374.34, 157.791, 1952.5); points6.push_back(temp_p);} - {geo_point_t temp_p(372.138, 150.863, 1952.5); points6.push_back(temp_p);} - {geo_point_t temp_p(369.936, 140.471, 1952.5); points6.push_back(temp_p);} - {geo_point_t temp_p(367.734, 135.275, 1955.5); points6.push_back(temp_p);} - {geo_point_t temp_p(367.734, 128.346, 1955.5); points6.push_back(temp_p);} - {geo_point_t temp_p(363.33, 124.884, 1955.5); points6.push_back(temp_p);} - {geo_point_t temp_p(363.33, 114.488, 1955.5); points6.push_back(temp_p);} - {geo_point_t temp_p(363.33, 107.564, 1955.5); points6.push_back(temp_p);} - {geo_point_t temp_p(361.128, 104.097, 1955.5); points6.push_back(temp_p);} - {geo_point_t temp_p(361.128, 98.0347, 1957); points6.push_back(temp_p);} - {geo_point_t temp_p(358.926, 90.2434, 1955.5); points6.push_back(temp_p);} - {geo_point_t temp_p(356.724, 83.3143, 1955.5); points6.push_back(temp_p);} - {geo_point_t temp_p(354.522, 76.3853, 1955.5); points6.push_back(temp_p);} - {geo_point_t temp_p(352.32, 67.7261, 1958.5); points6.push_back(temp_p);} - {geo_point_t temp_p(350.118, 58.1973, 1960); points6.push_back(temp_p);} - {geo_point_t temp_p(347.916, 46.9389, 1958.5); points6.push_back(temp_p);} - {geo_point_t temp_p(343.512, 43.4768, 1958.5); points6.push_back(temp_p);} - {geo_point_t temp_p(343.512, 34.8176, 1961.5); points6.push_back(temp_p);} - {geo_point_t temp_p(343.512, 27.8886, 1961.5); points6.push_back(temp_p);} - {geo_point_t temp_p(341.31, 24.4215, 1961.5); points6.push_back(temp_p);} - {geo_point_t temp_p(341.31, 20.9595, 1961.5); points6.push_back(temp_p);} - {geo_point_t temp_p(336.906, 14.0304, 1961.5); points6.push_back(temp_p);} - {geo_point_t temp_p(336.906, 3.63921, 1961.5); points6.push_back(temp_p);} - {geo_point_t temp_p(336.906, 0.172097, 1961.5); points6.push_back(temp_p);} - {geo_point_t temp_p(334.704, -5.01991, 1964.5); points6.push_back(temp_p);} - {geo_point_t temp_p(330.3, -15.4161, 1964.5); points6.push_back(temp_p);} - {geo_point_t temp_p(328.098, -20.612, 1967.5); points6.push_back(temp_p);} - {geo_point_t temp_p(328.098, -31.0043, 1967.5); points6.push_back(temp_p);} - {geo_point_t temp_p(328.098, -37.9326, 1967.5); points6.push_back(temp_p);} - {geo_point_t temp_p(323.694, -41.3967, 1967.5); points6.push_back(temp_p);} - {geo_point_t temp_p(323.694, -44.8608, 1967.5); points6.push_back(temp_p);} - {geo_point_t temp_p(323.694, -47.459, 1969); points6.push_back(temp_p);} - {geo_point_t temp_p(319.29, -48.3252, 1970.5); points6.push_back(temp_p);} - {geo_point_t temp_p(319.29, -58.7164, 1970.5); points6.push_back(temp_p);} - {geo_point_t temp_p(317.088, -67.3774, 1970.5); points6.push_back(temp_p);} - {geo_point_t temp_p(317.088, -75.1712, 1969); points6.push_back(temp_p);} - {geo_point_t temp_p(314.886, -80.3688, 1972); points6.push_back(temp_p);} - {geo_point_t temp_p(312.684, -85.564, 1975); points6.push_back(temp_p);} - {geo_point_t temp_p(310.482, -89.8941, 1973.5); points6.push_back(temp_p);} - {geo_point_t temp_p(310.482, -96.8223, 1973.5); points6.push_back(temp_p);} - {geo_point_t temp_p(308.28, -102.884, 1975); points6.push_back(temp_p);} - {geo_point_t temp_p(308.28, -104.616, 1978); points6.push_back(temp_p);} - {geo_point_t temp_p(306.078, -111.545, 1978); points6.push_back(temp_p);} - {geo_point_t temp_p(301.674, -116.74, 1981); points6.push_back(temp_p);} - {geo_point_t temp_p(301.674, -118.472, 1984); points6.push_back(temp_p);} - {geo_point_t temp_p(301.674, -120.204, 1987); points6.push_back(temp_p);} - {geo_point_t temp_p(301.674, -121.938, 1984); points6.push_back(temp_p);} - {geo_point_t temp_p(301.674, -123.67, 1987); points6.push_back(temp_p);} - {geo_point_t temp_p(299.472, -127.133, 1987); points6.push_back(temp_p);} - {geo_point_t temp_p(299.472, -130.596, 1987); points6.push_back(temp_p);} - {geo_point_t temp_p(299.472, -134.062, 1987); points6.push_back(temp_p);} - {geo_point_t temp_p(299.472, -139.258, 1984); points6.push_back(temp_p);} - {geo_point_t temp_p(297.27, -146.186, 1984); points6.push_back(temp_p);} - {geo_point_t temp_p(297.27, -147.918, 1987); points6.push_back(temp_p);} - {geo_point_t temp_p(297.27, -154.846, 1987); points6.push_back(temp_p);} - {geo_point_t temp_p(297.27, -156.578, 1984); points6.push_back(temp_p);} - {geo_point_t temp_p(292.866, -167.836, 1985.5); points6.push_back(temp_p);} - {geo_point_t temp_p(290.664, -176.497, 1985.5); points6.push_back(temp_p);} - {geo_point_t temp_p(290.664, -182.559, 1987); points6.push_back(temp_p);} - {geo_point_t temp_p(286.26, -192.085, 1988.5); points6.push_back(temp_p);} - {geo_point_t temp_p(281.856, -195.549, 1988.5); points6.push_back(temp_p);} - {geo_point_t temp_p(277.452, -201.611, 1990); points6.push_back(temp_p);} - {geo_point_t temp_p(277.452, -210.272, 1993); points6.push_back(temp_p);} - {geo_point_t temp_p(273.048, -215.468, 1990); points6.push_back(temp_p);} - {geo_point_t temp_p(273.048, -226.726, 1988.5); points6.push_back(temp_p);} - {geo_point_t temp_p(273.048, -237.119, 1988.5); points6.push_back(temp_p);} - {geo_point_t temp_p(273.048, -244.047, 1988.5); points6.push_back(temp_p);} - {geo_point_t temp_p(270.846, -249.243, 1991.5); points6.push_back(temp_p);} - {geo_point_t temp_p(268.644, -251.841, 1993); points6.push_back(temp_p);} - {geo_point_t temp_p(264.24, -256.171, 1991.5); points6.push_back(temp_p);} - {geo_point_t temp_p(262.038, -257.903, 1991.5); points6.push_back(temp_p);} - {geo_point_t temp_p(262.038, -264.831, 1991.5); points6.push_back(temp_p);} - {geo_point_t temp_p(257.634, -269.161, 1993); points6.push_back(temp_p);} - {geo_point_t temp_p(255.432, -272.626, 1993); points6.push_back(temp_p);} - {geo_point_t temp_p(253.23, -281.286, 1996); points6.push_back(temp_p);} - {geo_point_t temp_p(253.23, -283.018, 1999); points6.push_back(temp_p);} - {geo_point_t temp_p(253.23, -289.946, 1999); points6.push_back(temp_p);} - {geo_point_t temp_p(248.826, -299.472, 1997.5); points6.push_back(temp_p);} - {geo_point_t temp_p(248.826, -306.4, 1997.5); points6.push_back(temp_p);} - {geo_point_t temp_p(246.624, -307.267, 1999); points6.push_back(temp_p);} - {geo_point_t temp_p(246.624, -310.731, 1999); points6.push_back(temp_p);} - {geo_point_t temp_p(246.624, -318.525, 1997.5); points6.push_back(temp_p);} - {geo_point_t temp_p(244.422, -321.123, 1999); points6.push_back(temp_p);} - {geo_point_t temp_p(244.422, -325.453, 1997.5); points6.push_back(temp_p);} - {geo_point_t temp_p(244.422, -332.381, 1997.5); points6.push_back(temp_p);} - {geo_point_t temp_p(244.422, -341.908, 1999); points6.push_back(temp_p);} - {geo_point_t temp_p(240.018, -347.97, 2000.5); points6.push_back(temp_p);} - {geo_point_t temp_p(237.816, -354.032, 2002); points6.push_back(temp_p);} - {geo_point_t temp_p(235.614, -358.362, 2000.5); points6.push_back(temp_p);} - {geo_point_t temp_p(231.21, -369.62, 1999); points6.push_back(temp_p);} - {geo_point_t temp_p(231.21, -374.817, 2002); points6.push_back(temp_p);} - {geo_point_t temp_p(229.008, -380.879, 2003.5); points6.push_back(temp_p);} - {geo_point_t temp_p(226.806, -390.405, 2005); points6.push_back(temp_p);} - {geo_point_t temp_p(222.402, -396.467, 2003.5); points6.push_back(temp_p);} - {geo_point_t temp_p(222.402, -406.859, 2003.5); points6.push_back(temp_p);} - {geo_point_t temp_p(217.998, -412.056, 2003.5); points6.push_back(temp_p);} - {geo_point_t temp_p(217.998, -418.984, 2003.5); points6.push_back(temp_p);} - {geo_point_t temp_p(213.594, -422.448, 2003.5); points6.push_back(temp_p);} - {geo_point_t temp_p(209.19, -432.84, 2003.5); points6.push_back(temp_p);} - {geo_point_t temp_p(209.19, -439.769, 2003.5); points6.push_back(temp_p);} - {geo_point_t temp_p(209.19, -442.367, 2005); points6.push_back(temp_p);} - {geo_point_t temp_p(209.19, -445.831, 2005); points6.push_back(temp_p);} - {geo_point_t temp_p(206.988, -452.759, 2005); points6.push_back(temp_p);} - {geo_point_t temp_p(204.786, -459.687, 2005); points6.push_back(temp_p);} - {geo_point_t temp_p(200.382, -463.151, 2005); points6.push_back(temp_p);} - {geo_point_t temp_p(198.18, -464.017, 2006.5); points6.push_back(temp_p);} - {geo_point_t temp_p(198.18, -464.883, 2008); points6.push_back(temp_p);} - {geo_point_t temp_p(198.18, -470.945, 2006.5); points6.push_back(temp_p);} - {geo_point_t temp_p(195.978, -476.142, 2006.5); points6.push_back(temp_p);} - {geo_point_t temp_p(191.574, -488.266, 2003.5); points6.push_back(temp_p);} - {geo_point_t temp_p(189.372, -494.328, 2005); points6.push_back(temp_p);} - {geo_point_t temp_p(184.968, -500.39, 2003.5); points6.push_back(temp_p);} - {geo_point_t temp_p(184.968, -501.256, 2005); points6.push_back(temp_p);} - {geo_point_t temp_p(184.968, -502.988, 2008); points6.push_back(temp_p);} - {geo_point_t temp_p(182.766, -509.916, 2008); points6.push_back(temp_p);} - {geo_point_t temp_p(182.766, -514.247, 2006.5); points6.push_back(temp_p);} - {geo_point_t temp_p(180.564, -519.443, 2006.5); points6.push_back(temp_p);} - {geo_point_t temp_p(180.564, -526.371, 2006.5); points6.push_back(temp_p);} - {geo_point_t temp_p(176.16, -534.165, 2008); points6.push_back(temp_p);} - {geo_point_t temp_p(171.756, -542.825, 2011); points6.push_back(temp_p);} - {geo_point_t temp_p(171.756, -553.218, 2011); points6.push_back(temp_p);} - {geo_point_t temp_p(167.352, -556.682, 2011); points6.push_back(temp_p);} - {geo_point_t temp_p(167.352, -561.878, 2008); points6.push_back(temp_p);} - {geo_point_t temp_p(165.15, -562.744, 2006.5); points6.push_back(temp_p);} - {geo_point_t temp_p(165.15, -573.137, 2006.5); points6.push_back(temp_p);} - {geo_point_t temp_p(165.15, -576.6, 2006.5); points6.push_back(temp_p);} - {geo_point_t temp_p(160.746, -586.993, 2009.5); points6.push_back(temp_p);} - {geo_point_t temp_p(158.544, -590.457, 2009.5); points6.push_back(temp_p);} - {geo_point_t temp_p(154.14, -593.921, 2009.5); points6.push_back(temp_p);} - {geo_point_t temp_p(151.938, -599.116, 2009.5); points6.push_back(temp_p);} - {geo_point_t temp_p(149.736, -600.849, 2009.5); points6.push_back(temp_p);} - {geo_point_t temp_p(149.736, -611.241, 2009.5); points6.push_back(temp_p);} - {geo_point_t temp_p(149.736, -621.634, 2009.5); points6.push_back(temp_p);} - {geo_point_t temp_p(145.332, -632.025, 2012.5); points6.push_back(temp_p);} - {geo_point_t temp_p(140.928, -637.222, 2012.5); points6.push_back(temp_p);} - {geo_point_t temp_p(138.726, -643.284, 2011); points6.push_back(temp_p);} - {geo_point_t temp_p(134.322, -648.48, 2008); points6.push_back(temp_p);} - {geo_point_t temp_p(134.322, -650.212, 2005); points6.push_back(temp_p);} - {geo_point_t temp_p(129.918, -658.872, 2002); points6.push_back(temp_p);} - {geo_point_t temp_p(125.514, -664.068, 2005); points6.push_back(temp_p);} - {geo_point_t temp_p(123.312, -670.997, 2005); points6.push_back(temp_p);} - {geo_point_t temp_p(123.312, -677.925, 2005); points6.push_back(temp_p);} - {geo_point_t temp_p(118.908, -684.853, 2005); points6.push_back(temp_p);} - {geo_point_t temp_p(118.908, -690.915, 2006.5); points6.push_back(temp_p);} - {geo_point_t temp_p(116.706, -700.442, 2008); points6.push_back(temp_p);} - {geo_point_t temp_p(112.302, -706.504, 2009.5); points6.push_back(temp_p);} - {geo_point_t temp_p(110.1, -717.762, 2008); points6.push_back(temp_p);} - {geo_point_t temp_p(107.898, -720.36, 2009.5); points6.push_back(temp_p);} - {geo_point_t temp_p(103.494, -724.69, 2008); points6.push_back(temp_p);} - {geo_point_t temp_p(99.09, -731.619, 2008); points6.push_back(temp_p);} - {geo_point_t temp_p(96.888, -738.547, 2008); points6.push_back(temp_p);} - {geo_point_t temp_p(96.888, -748.939, 2008); points6.push_back(temp_p);} - {geo_point_t temp_p(92.484, -756.733, 2012.5); points6.push_back(temp_p);} - {geo_point_t temp_p(88.08, -767.992, 2011); points6.push_back(temp_p);} - {geo_point_t temp_p(88.08, -774.92, 2011); points6.push_back(temp_p);} - {geo_point_t temp_p(83.676, -783.58, 2008); points6.push_back(temp_p);} - {geo_point_t temp_p(83.676, -792.24, 2011); points6.push_back(temp_p);} - {geo_point_t temp_p(81.474, -797.437, 2014); points6.push_back(temp_p);} - {geo_point_t temp_p(81.474, -807.829, 2014); points6.push_back(temp_p);} - {geo_point_t temp_p(79.272, -813.025, 2017); points6.push_back(temp_p);} - {geo_point_t temp_p(77.07, -820.819, 2015.5); points6.push_back(temp_p);} - {geo_point_t temp_p(74.868, -823.417, 2017); points6.push_back(temp_p);} - {geo_point_t temp_p(74.868, -825.15, 2020); points6.push_back(temp_p);} - {geo_point_t temp_p(70.464, -831.211, 2021.5); points6.push_back(temp_p);} - {geo_point_t temp_p(70.464, -838.14, 2021.5); points6.push_back(temp_p);} - {geo_point_t temp_p(68.262, -839.872, 2024.5); points6.push_back(temp_p);} - {geo_point_t temp_p(68.262, -843.336, 2024.5); points6.push_back(temp_p);} - {geo_point_t temp_p(66.06, -850.264, 2024.5); points6.push_back(temp_p);} - {geo_point_t temp_p(61.656, -860.656, 2024.5); points6.push_back(temp_p);} - {geo_point_t temp_p(57.252, -867.585, 2027.5); points6.push_back(temp_p);} - {geo_point_t temp_p(52.848, -872.781, 2027.5); points6.push_back(temp_p);} - {geo_point_t temp_p(50.646, -877.977, 2027.5); points6.push_back(temp_p);} - {geo_point_t temp_p(50.646, -884.905, 2027.5); points6.push_back(temp_p);} - {geo_point_t temp_p(48.444, -890.101, 2030.5); points6.push_back(temp_p);} - {geo_point_t temp_p(44.04, -896.163, 2029); points6.push_back(temp_p);} - {geo_point_t temp_p(44.04, -903.092, 2029); points6.push_back(temp_p);} - {geo_point_t temp_p(39.636, -910.886, 2027.5); points6.push_back(temp_p);} - {geo_point_t temp_p(39.636, -917.814, 2027.5); points6.push_back(temp_p);} - {geo_point_t temp_p(35.232, -928.206, 2027.5); points6.push_back(temp_p);} - {geo_point_t temp_p(30.828, -934.269, 2029); points6.push_back(temp_p);} - {geo_point_t temp_p(26.424, -939.465, 2026); points6.push_back(temp_p);} - {geo_point_t temp_p(26.424, -948.125, 2029); points6.push_back(temp_p);} - {geo_point_t temp_p(22.02, -951.589, 2029); points6.push_back(temp_p);} - {geo_point_t temp_p(22.02, -961.981, 2029); points6.push_back(temp_p);} - {geo_point_t temp_p(17.616, -965.445, 2029); points6.push_back(temp_p);} - {geo_point_t temp_p(15.414, -968.91, 2029); points6.push_back(temp_p);} - {geo_point_t temp_p(13.212, -973.24, 2030.5); points6.push_back(temp_p);} - {geo_point_t temp_p(13.212, -981.034, 2032); points6.push_back(temp_p);} - {geo_point_t temp_p(13.212, -986.23, 2029); points6.push_back(temp_p);} - {geo_point_t temp_p(8.808, -996.622, 2029); points6.push_back(temp_p);} - {geo_point_t temp_p(4.404, -1000.95, 2033.5); points6.push_back(temp_p);} - {geo_point_t temp_p(4.404, -1007.88, 2033.5); points6.push_back(temp_p);} - {geo_point_t temp_p(2.202, -1018.27, 2033.5); points6.push_back(temp_p);} - {geo_point_t temp_p(0, -1020.87, 2035); points6.push_back(temp_p);} - {geo_point_t temp_p(-4.404, -1022.6, 2038); points6.push_back(temp_p);} - {geo_point_t temp_p(-8.808, -1030.4, 2036.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-8.808, -1037.33, 2036.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-13.212, -1040.79, 2036.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-13.212, -1047.72, 2036.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-17.616, -1050.32, 2038); points6.push_back(temp_p);} - {geo_point_t temp_p(-22.02, -1055.51, 2041); points6.push_back(temp_p);} - {geo_point_t temp_p(-22.02, -1063.31, 2039.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-22.02, -1073.7, 2039.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-26.424, -1084.09, 2039.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-28.626, -1091.02, 2039.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-33.03, -1094.48, 2039.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-33.03, -1104.88, 2039.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-37.434, -1115.27, 2039.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-41.838, -1117, 2039.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-44.04, -1122.2, 2039.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-44.04, -1129.12, 2039.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-46.242, -1136.05, 2039.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-50.646, -1142.98, 2042.5); points6.push_back(temp_p);} - {geo_point_t temp_p(-50.646, -1153.37, 2042.5); points6.push_back(temp_p);} - - std::cout << "Test: " << points6.size() << " " << points6.at(0) << " " << points6.at(points6.size()-1) << std::endl; - - - live_cluster->organize_points_path_vec(points6,0.6*units::cm); - std::cout << "Test: " << points6.size() << " " << points6.at(0) << " " << points6.at(points6.size()-1) << std::endl; - - live_cluster->organize_path_points(points6,0.6*units::cm); - std::cout << "Test: " << points6.size() << " " << points6.at(0) << " " << points6.at(points6.size()-1) << std::endl; - - live_cluster->examine_graph(true); + live_cluster->connected_blobs(dv, pcts); } } } diff --git a/clus/src/clustering_deghost.cxx b/clus/src/clustering_deghost.cxx index 876d71225..417fefc03 100644 --- a/clus/src/clustering_deghost.cxx +++ b/clus/src/clustering_deghost.cxx @@ -1,12 +1,109 @@ -#include +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + +class ClusteringDeghost; +WIRECELL_FACTORY(ClusteringDeghost, ClusteringDeghost, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) + using namespace WireCell; using namespace WireCell::Clus; -using namespace WireCell::Aux; -using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; +using namespace WireCell::Clus::Facade; using namespace WireCell::PointCloud::Tree; +// bool Cluster::construct_skeleton(IDetectorVolumes::pointer dv, IPCTransformSet::pointer pcts, const bool use_ctpc) +// { + +static std::pair skeleton_points_hilo(const Cluster& cluster) +{ + geo_point_t highest_wcp = cluster.point3d(0); + geo_point_t lowest_wcp = cluster.point3d(0); + size_t highest_index = 0; + size_t lowest_index = 0; + + geo_point_t main_dir = cluster.get_pca().axis.at(0); + main_dir = main_dir.norm(); + geo_point_t center = cluster.get_pca().center; + geo_point_t temp_pt(highest_wcp.x() - center.x(), highest_wcp.y() - center.y(), highest_wcp.z() - center.z()); + double highest_value = temp_pt.dot(main_dir); + double lowest_value = highest_value; + + for (int i = 1; i < cluster.npoints(); i++) { + temp_pt.set(cluster.point3d(i).x() - center.x(), + cluster.point3d(i).y() - center.y(), + cluster.point3d(i).z() - center.z()); + double value = temp_pt.dot(main_dir); + if (value > highest_value) { + highest_value = value; + highest_wcp = cluster.point3d(i); + highest_index = i; + } + else if (value < lowest_value) { + lowest_value = value; + lowest_wcp = cluster.point3d(i); + lowest_index = i; + } + } + return std::make_pair(highest_index, lowest_index); +} + +static std::vector get_path_wcps(const Cluster& cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts, + bool use_ctpc) +{ + auto [hi, lo] = skeleton_points_hilo(cluster); + if (use_ctpc) { + return cluster.graph_algorithms("ctpc", dv, pcts).shortest_path(hi, lo); + } + else { + return cluster.graph_algorithms().shortest_path(hi, lo); + } +} + + + +static void clustering_deghost(Grouping& live_grouping, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts, + const Tree::Scope& scope, + const bool use_ctpc, + double length_cut = 0); + +class ClusteringDeghost : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedPCTS, private NeedScope { +public: + ClusteringDeghost() {} + virtual ~ClusteringDeghost() {} + + void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedPCTS::configure(config); + NeedScope::configure(config); + + use_ctpc_ = get(config, "use_ctpc", true); + length_cut_ = get(config, "length_cut", 0); + } + virtual Configuration default_configuration() const { + Configuration cfg; + return cfg; + } + + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + clustering_deghost(live, m_dv, m_pcts, m_scope, use_ctpc_, length_cut_); + } + +private: + double use_ctpc_{true}; + double length_cut_{0}; +}; + + // The original developers do not care. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wparentheses" @@ -18,30 +115,82 @@ using namespace WireCell::PointCloud::Tree; #define LogDebug(x) #endif - -void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, - const bool use_ctpc, double length_cut) +// This can handle entire APA (including all faces) data +static void clustering_deghost( + Grouping& live_grouping, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts, + const Tree::Scope& scope, + const bool use_ctpc, double length_cut) { - std::map>& dead_u_index = live_grouping.get_dead_winds(0, 0); - std::map>& dead_v_index = live_grouping.get_dead_winds(0, 1); - std::map>& dead_w_index = live_grouping.get_dead_winds(0, 2); + // Get all the wire plane IDs from the grouping + const auto& wpids = live_grouping.dv_wpids(); + // Key: pair, Value: drift_dir, angle_u, angle_v, angle_w + std::map> wpid_params; + std::set apas; + + std::map>>> af_dead_u_index; + std::map>>> af_dead_v_index; + std::map>>> af_dead_w_index; + + // + // NOTE, most of this can be replaced by a couple of function calls from DetUtils.h + // + + for (const auto& wpid : wpids) { + int apa = wpid.apa(); + int face = wpid.face(); + apas.insert(apa); + + // Create wpids for all three planes with this APA and face + WirePlaneId wpid_u(kUlayer, face, apa); + WirePlaneId wpid_v(kVlayer, face, apa); + WirePlaneId wpid_w(kWlayer, face, apa); + + // Get drift direction based on face orientation + int face_dirx = dv->face_dirx(wpid_u); + geo_point_t drift_dir(face_dirx, 0, 0); + + // Get wire directions for all planes + Vector wire_dir_u = dv->wire_direction(wpid_u); + Vector wire_dir_v = dv->wire_direction(wpid_v); + Vector wire_dir_w = dv->wire_direction(wpid_w); + + // Calculate angles + double angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + double angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + double angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + wpid_params[wpid] = std::make_tuple(drift_dir, angle_u, angle_v, angle_w); + + + af_dead_u_index[apa][face] = live_grouping.get_dead_winds(apa, face, 0); + af_dead_v_index[apa][face] = live_grouping.get_dead_winds(apa, face, 1); + af_dead_w_index[apa][face] = live_grouping.get_dead_winds(apa, face, 2); + } + + if (apas.size() > 1) { + raise("apas.size() %d > 1", apas.size()); + } + std::vector live_clusters = live_grouping.children(); // copy + // sort the clusters by length using a lambda function std::sort(live_clusters.begin(), live_clusters.end(), [](const Cluster *cluster1, const Cluster *cluster2) { return cluster1->get_length() > cluster2->get_length(); }); - const auto &tp = live_grouping.get_params(); - // this is for 4 time slices - // double time_slice_width = tp.nticks_live_slice * tp.tick_drift; + // + // NOTE: these (and above code) can be replaced with a single call to + // make_dynamicpointcloud(dv) from DetUtils. + // - // Create two point clouds ... - // One for the points ... point --> index --> cluster (vector) ... - // The other for the skeleton of each track ... point --> index --> cluster (vector) - // Both cloud needs to be dynamic, keep adding things into it as we improve the knowledge - auto global_point_cloud = std::make_shared(tp.angle_u, tp.angle_v, tp.angle_w); - auto global_skeleton_cloud = std::make_shared(tp.angle_u, tp.angle_v, tp.angle_w); + // auto global_point_cloud_legacy = std::make_shared(angle_u, angle_v, angle_w); + auto global_point_cloud = std::make_shared(wpid_params); + // auto global_skeleton_cloud = std::make_shared(angle_u, angle_v, angle_w); + auto global_skeleton_cloud = std::make_shared(wpid_params); + // replace with the new DynamicPointCloud class std::vector to_be_removed_clusters; // std::set> to_be_merged_pairs; @@ -54,19 +203,32 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, ilive2desc[ilive] = boost::add_vertex(ilive, g); } + bool flag_first = true; for (size_t i = 0; i != live_clusters.size(); i++) { - if (i == 0) { + if (live_clusters.at(i)->get_default_scope().hash() != scope.hash()) { + live_clusters.at(i)->set_default_scope(scope); + // std::cout << "Test: Set default scope: " << pc_name << " " << coords[0] << " " << coords[1] << " " << coords[2] << " " << cluster->get_default_scope().hash() << " " << scope.hash() << std::endl; + } + + // if not within the scope filter, nor processing ... + if (!live_clusters.at(i)->get_scope_filter(scope)) continue; + + if (flag_first) { // fill anyway ... // live_clusters.at(i)->Create_point_cloud(); - global_point_cloud->add_points(live_clusters.at(i), 0); + // global_point_cloud_legacy->add_points(live_clusters.at(i), 0); + global_point_cloud->add_points(make_points_cluster(live_clusters.at(i), wpid_params, true)); if (live_clusters.at(i)->get_length() > 30 * units::cm) { // should be the default for most of them ... - live_clusters.at(i)->construct_skeleton(use_ctpc); - global_skeleton_cloud->add_points(live_clusters.at(i), 1); + const auto& path_wcps = get_path_wcps(*live_clusters.at(i), dv, pcts, use_ctpc); + // global_skeleton_cloud->add_points(live_clusters.at(i), 1); + global_skeleton_cloud->add_points(make_points_cluster_skeleton(live_clusters.at(i), dv, wpid_params, path_wcps, true)); } else { - global_skeleton_cloud->add_points(live_clusters.at(i), 0); + // global_skeleton_cloud->add_points(live_clusters.at(i), 0); + global_skeleton_cloud->add_points(make_points_cluster(live_clusters.at(i), wpid_params, true)); } + flag_first = false; } else { // start the process to add things in and perform deghosting ... @@ -85,9 +247,8 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, double dis_cut = 1.2 * units::cm; for (size_t j = 0; j != num_total_points; j++) { - // geo_point_t test_point(cluster->point3d(j).x(), cloud.pts.at(j).y, cloud.pts.at(j).z); geo_point_t test_point = cluster->point3d(j); - + auto test_wpid = cluster->wire_plane_id(j); bool flag_dead = false; #ifdef __DEBUG__ @@ -97,10 +258,10 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, #endif - + auto& dead_u_index = af_dead_u_index.at(test_wpid.apa()).at(test_wpid.face()); if (dead_u_index.find(winds[0][j]) != dead_u_index.end()) { - if (cluster->point3d(j).x() >= dead_u_index[winds[0][j]].first && - cluster->point3d(j).x() <= dead_u_index[winds[0][j]].second) { + if (cluster->point3d_raw(j).x() >= dead_u_index[winds[0][j]].first && + cluster->point3d_raw(j).x() <= dead_u_index[winds[0][j]].second) { flag_dead = true; } } @@ -109,9 +270,9 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, if (!flag_dead) { std::tuple results = - global_point_cloud->get_closest_2d_point_info(test_point, 0); + global_point_cloud->get_closest_2d_point_info(test_point, 0, test_wpid.face(), test_wpid.apa()); - // if (cluster->nchildren()==801 && j==0) std::cout << j << " AU " << test_point << " " << std::get<0>(results) << " " << std::get<1>(results)->get_length()/units::cm << std::endl; + if (std::get<0>(results) <= dis_cut / 3.) { if (map_cluster_num[0].find(std::get<1>(results)) == map_cluster_num[0].end()) { @@ -122,9 +283,7 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, } } else { - results = global_skeleton_cloud->get_closest_2d_point_info(test_point, 0); - - // if (cluster->nchildren()==801 && j==0) std::cout << j << " BU " << test_point << " " << std::get<0>(results) << " " << std::get<1>(results)->get_length()/units::cm << std::endl; + results = global_skeleton_cloud->get_closest_2d_point_info(test_point, 0, test_wpid.face(), test_wpid.apa()); if (std::get<0>(results) <= dis_cut * 2.0) { if (map_cluster_num[0].find(std::get<1>(results)) == map_cluster_num[0].end()) { @@ -144,6 +303,7 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, } flag_dead = false; + auto& dead_v_index = af_dead_v_index.at(test_wpid.apa()).at(test_wpid.face()); if (dead_v_index.find(winds[1][j]) != dead_v_index.end()) { #ifdef __DEBUG__ @@ -151,8 +311,8 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, std::cout << "dead_v_index: " << winds[1][j] << " " << dead_v_index[winds[1][j]].first << " " << dead_v_index[winds[1][j]].second << std::endl; } #endif - if (cluster->point3d(j).x() >= dead_v_index[winds[1][j]].first && - cluster->point3d(j).x() <= dead_v_index[winds[1][j]].second) { + if (cluster->point3d_raw(j).x() >= dead_v_index[winds[1][j]].first && + cluster->point3d_raw(j).x() <= dead_v_index[winds[1][j]].second) { flag_dead = true; } } @@ -169,7 +329,7 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, } #endif std::tuple results = - global_point_cloud->get_closest_2d_point_info(test_point, 1); + global_point_cloud->get_closest_2d_point_info(test_point, 1, test_wpid.face(), test_wpid.apa()); // if (cluster->nchildren()==801 && j==0) std::cout << j << " AV " << test_point << " " << std::get<0>(results) << " " << std::get<1>(results)->get_length()/units::cm << std::endl; @@ -189,7 +349,8 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, } } else { - results = global_skeleton_cloud->get_closest_2d_point_info(test_point, 1); + // results = global_skeleton_cloud->get_closest_2d_point_info(test_point, 1); + results = global_skeleton_cloud->get_closest_2d_point_info(test_point, 1, test_wpid.face(), test_wpid.apa()); // if (cluster->nchildren()==801 && j==0) std::cout << j << " BV " << test_point << " " << std::get<0>(results) << " " << std::get<1>(results)->get_length()/units::cm << std::endl; @@ -211,17 +372,18 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, } flag_dead = false; + auto& dead_w_index = af_dead_w_index.at(test_wpid.apa()).at(test_wpid.face()); if (dead_w_index.find(winds[2][j]) != dead_w_index.end()) { - if (cluster->point3d(j).x() >= dead_w_index[winds[2][j]].first && - cluster->point3d(j).x() <= dead_w_index[winds[2][j]].second) { + if (cluster->point3d_raw(j).x() >= dead_w_index[winds[2][j]].first && + cluster->point3d_raw(j).x() <= dead_w_index[winds[2][j]].second) { flag_dead = true; } } if (!flag_dead) { std::tuple results = - global_point_cloud->get_closest_2d_point_info(test_point, 2); + global_point_cloud->get_closest_2d_point_info(test_point, 2, test_wpid.face(), test_wpid.apa()); // if (cluster->nchildren()==801 && j==0) std::cout << j << " AW " << test_point << " " << std::get<0>(results) << " " << std::get<1>(results)->get_length()/units::cm << std::endl; @@ -234,7 +396,8 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, } } else { - results = global_skeleton_cloud->get_closest_2d_point_info(test_point, 2); + // results = global_skeleton_cloud->get_closest_2d_point_info(test_point, 2); + results = global_skeleton_cloud->get_closest_2d_point_info(test_point, 2, test_wpid.face(), test_wpid.apa()); // if (cluster->nchildren()==801 && j==0) std::cout << j << " BW " << test_point << " " <(results) << " " << std::get<1>(results)->get_length()/units::cm << std::endl; @@ -261,9 +424,9 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, (num_unique[1] + num_unique[0] + num_unique[2]) > 25) break; } - LogDebug("num_total_points = " << num_total_points); - LogDebug("num_unique[0] = " << num_unique[0] << ", num_unique[1] = " << num_unique[1] << ", num_unique[2] = " << num_unique[2]); - LogDebug("num_dead[0] = " << num_dead[0] << ", num_dead[1] = " << num_dead[1] << ", num_dead[2] = " << num_dead[2]); + // LogDebug("num_total_points = " << num_total_points); + // LogDebug("num_unique[0] = " << num_unique[0] << ", num_unique[1] = " << num_unique[1] << ", num_unique[2] = " << num_unique[2]); + // LogDebug("num_dead[0] = " << num_dead[0] << ", num_dead[1] = " << num_dead[1] << ", num_dead[2] = " << num_dead[2]); bool flag_save = false; @@ -291,7 +454,7 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, num_unique[2] < 0.02 * num_total_points)) && (num_unique[0] + num_unique[1] + num_unique[2]) <= 500) { flag_save = false; - LogDebug("pass the first cut " << num_total_points); + // LogDebug("pass the first cut " << num_total_points); // now try to compare // find the maximal for each map @@ -316,7 +479,7 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, } } bool flag_remove = true; - LogDebug("max_value_u: " << max_value_u << ", max_value_v: " << max_value_v << ", max_value_w: " << max_value_w); + // LogDebug("max_value_u: " << max_value_u << ", max_value_v: " << max_value_v << ", max_value_w: " << max_value_w); if (max_cluster_u == max_cluster_v && max_value_u > 0.8 * (num_total_points - num_dead[0]) && max_value_v > 0.8 * (num_total_points - num_dead[1])) { @@ -578,29 +741,32 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, if (flag_save) { // live_clusters.at(i)->Create_point_cloud(); - global_point_cloud->add_points(live_clusters.at(i), 0); + // global_point_cloud_legacy->add_points(live_clusters.at(i), 0); + global_point_cloud->add_points(make_points_cluster(live_clusters.at(i), wpid_params, true)); if (live_clusters.at(i)->get_length() > 30 * units::cm) { - live_clusters.at(i)->construct_skeleton(use_ctpc); - global_skeleton_cloud->add_points(live_clusters.at(i), 1); + const auto& path_wcps = get_path_wcps(*live_clusters.at(i), dv, pcts, use_ctpc); + // global_skeleton_cloud->add_points(live_clusters.at(i), 1); + global_skeleton_cloud->add_points(make_points_cluster_skeleton(live_clusters.at(i), dv, wpid_params, path_wcps, true )); } } } else { // live_clusters.at(i)->Create_point_cloud(); - global_point_cloud->add_points(live_clusters.at(i), 0); + // global_point_cloud_legacy->add_points(live_clusters.at(i), 0); + global_point_cloud->add_points(make_points_cluster(live_clusters.at(i), wpid_params, true)); if (live_clusters.at(i)->get_length() > 30 * units::cm) { - live_clusters.at(i)->construct_skeleton(use_ctpc); - global_skeleton_cloud->add_points(live_clusters.at(i), 1); + const auto& path_wcps = get_path_wcps(*live_clusters.at(i), dv, pcts, use_ctpc); + // global_skeleton_cloud->add_points(live_clusters.at(i), 1); + global_skeleton_cloud->add_points(make_points_cluster_skeleton(live_clusters.at(i), dv, wpid_params, path_wcps, true)); } } } - LogDebug("Cluster " << i << " " << live_clusters.at(i)->nchildren() << " " << live_clusters.at(i)->npoints()); - LogDebug("global_point_cloud: " << global_point_cloud->get_num_points() << " global_skeleton_cloud: " << global_skeleton_cloud->get_num_points()); + // LogDebug("Cluster " << i << " " << live_clusters.at(i)->n_blobs() << " " << live_clusters.at(i)->npoints()); + // LogDebug("global_point_cloud: " << global_point_cloud->get_num_points() << " global_skeleton_cloud: " << global_skeleton_cloud->get_num_points()); } - // merge clusters - cluster_set_t new_clusters; - merge_clusters(g, live_grouping, new_clusters); + + auto new_clusters = merge_clusters(g, live_grouping); // remove clusters LogDebug("to_be_removed_clusters.size() = " << to_be_removed_clusters.size()); @@ -610,78 +776,20 @@ void WireCell::PointCloud::Facade::clustering_deghost(Grouping& live_grouping, assert(live == nullptr); } - // // merge clusters - // std::vector> merge_clusters; - // for (auto it = to_be_merged_pairs.begin(); it != to_be_merged_pairs.end(); it++) { - // Cluster *cluster1 = (*it).first; - // Cluster *cluster2 = (*it).second; - // // std::cout << cluster1 << " " << cluster2 << " " << cluster1->get_cluster_id() << " " << - // // cluster2->get_cluster_id() << std::endl; - - // bool flag_new = true; - // std::vector> temp_set; - // for (auto it1 = merge_clusters.begin(); it1 != merge_clusters.end(); it1++) { - // std::set &clusters = (*it1); - // if (clusters.find(cluster1) != clusters.end() || clusters.find(cluster2) != clusters.end()) { - // clusters.insert(cluster1); - // clusters.insert(cluster2); - // flag_new = false; - // temp_set.push_back(clusters); - // // break; - // } - // } - // if (flag_new) { - // std::set clusters; - // clusters.insert(cluster1); - // clusters.insert(cluster2); - // merge_clusters.push_back(clusters); - // } - // if (temp_set.size() > 1) { - // // merge them further ... - // std::set clusters; - // for (size_t i = 0; i != temp_set.size(); i++) { - // for (auto it1 = temp_set.at(i).begin(); it1 != temp_set.at(i).end(); it1++) { - // clusters.insert(*it1); - // } - // merge_clusters.erase(find(merge_clusters.begin(), merge_clusters.end(), temp_set.at(i))); - // } - // merge_clusters.push_back(clusters); - // } - // } - - // // merge clusters into new clusters, delete old clusters - // for (auto it = merge_clusters.begin(); it != merge_clusters.end(); it++) { - // std::set &clusters = (*it); - // Cluster *ncluster = new Cluster((*clusters.begin())->get_cluster_id()); - // live_clusters.push_back(ncluster); - // for (auto it1 = clusters.begin(); it1 != clusters.end(); it1++) { - // Cluster *ocluster = *(it1); - // // std::cout << ocluster->get_cluster_id() << " "; - // SMGCSelection &mcells = ocluster->get_mcells(); - // for (auto it2 = mcells.begin(); it2 != mcells.end(); it2++) { - // SlimMergeGeomCell *mcell = (*it2); - // // std::cout << ocluster->get_cluster_id() << " " << mcell << std::endl; - // int time_slice = mcell->GetTimeSlice(); - // ncluster->AddCell(mcell, time_slice); - // } - // live_clusters.erase(find(live_clusters.begin(), live_clusters.end(), ocluster)); - // cluster_length_map.erase(ocluster); - // delete ocluster; - // } - // std::vector range_v1 = ncluster->get_uvwt_range(); - // double length_1 = sqrt(2. / 3. * - // (pow(pitch_u * range_v1.at(0), 2) + pow(pitch_v * range_v1.at(1), 2) + - // pow(pitch_w * range_v1.at(2), 2)) + - // pow(time_slice_width * range_v1.at(3), 2)); - // cluster_length_map[ncluster] = length_1; - // // std::cout << std::endl; - // } - - // // delete clusters ... - // for (auto it = to_be_removed_clusters.begin(); it != to_be_removed_clusters.end(); it++) { - // Cluster *ocluster = *it; - // live_clusters.erase(find(live_clusters.begin(), live_clusters.end(), ocluster)); - // cluster_length_map.erase(ocluster); - // delete ocluster; - // } + + + + + + +// { +// auto live_clusters = live_grouping.children(); // copy +// // Process each cluster +// for (size_t iclus = 0; iclus < live_clusters.size(); ++iclus) { +// Cluster* cluster = live_clusters.at(iclus); +// auto& scope = cluster->get_default_scope(); +// std::cout << "Test: " << iclus << " " << cluster->nchildren() << " " << scope.pcname << " " << scope.coords[0] << " " << scope.coords[1] << " " << scope.coords[2] << " " << cluster->get_scope_filter(scope)<< " " << cluster->get_pca().center << std::endl; +// } +// } + } diff --git a/clus/src/clustering_examine_bundles.cxx b/clus/src/clustering_examine_bundles.cxx index d77b281dd..b9a10204b 100644 --- a/clus/src/clustering_examine_bundles.cxx +++ b/clus/src/clustering_examine_bundles.cxx @@ -1,12 +1,54 @@ -#include +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + + +class ClusteringExamineBundles; +WIRECELL_FACTORY(ClusteringExamineBundles, ClusteringExamineBundles, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) + using namespace WireCell; using namespace WireCell::Clus; -using namespace WireCell::Aux; -using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; +using namespace WireCell::Clus::Facade; using namespace WireCell::PointCloud::Tree; + +static void clustering_examine_bundles( + Grouping& live_grouping, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts, + const Tree::Scope& scope, + const bool use_ctpc); + +class ClusteringExamineBundles : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedPCTS, private NeedScope { +public: + ClusteringExamineBundles() {} + virtual ~ClusteringExamineBundles() {} + + void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedPCTS::configure(config); + NeedScope::configure(config); + + // If false, then DV and PCTS are not needed. + use_ctpc_ = get(config, "use_ctpc", use_ctpc_); + } + + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + clustering_examine_bundles(live, m_dv, m_pcts, m_scope, use_ctpc_); + } + +private: + bool use_ctpc_{true}; +}; + + // The original developers do not care. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wparentheses" @@ -18,52 +60,32 @@ using namespace WireCell::PointCloud::Tree; #define LogDebug(x) #endif -void WireCell::PointCloud::Facade::clustering_examine_bundles(Grouping& live_grouping, +// All APA Faces +static void clustering_examine_bundles( + Grouping& live_grouping, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts, + const Tree::Scope& scope, const bool use_ctpc) { // std::cout << "Test Examine Bundles" << std::endl; std::vector live_clusters = live_grouping.children(); - // for (size_t i = 0; i != live_clusters.size(); i++) { - // auto blobs = live_clusters.at(i)->kd_blobs(); - // int nblobs = blobs.size(); - - // // if(nblobs > 10){ - // // std::cout << "Test: " << nblobs << " " << std::endl; - - // auto flash = live_clusters.at(i)->get_flash(); - // if (flash) { - // std::cout << "Tests: " << nblobs << " at time " << flash.time() << "\n"; - - // auto values = flash.values(); - // std::cout << values.size() << " "; - // for (const auto& value : values) { - // std::cout << value << " "; - // } - // std::cout << std::endl; - // } - - // // auto local_pcs = live_clusters.at(i)->local_pcs(); - // // for (auto it = local_pcs.begin(); it !=local_pcs.end(); it++){ - // // auto keys = it->second.keys(); - // // for (auto it1 = keys.begin(); it1 != keys.end(); it1++){ - // // std::cout << "Test: " << it->first << " " << *it1 << std::endl; - // // } - // // } - // // auto flash = live_clusters.at(i)->get_scalar("flash"); - // // std::cout << "Test: Flash: " << flash << std::endl; - // // } - // } - for (size_t i=0;i!=live_clusters.size();i++){ + if (!live_clusters.at(i)->get_scope_filter(scope)) continue; // move on if the cluster is not in the scope filter ... + if (live_clusters.at(i)->get_default_scope().hash() != scope.hash()) { + live_clusters.at(i)->set_default_scope(scope); + // std::cout << "Test: Set default scope: " << pc_name << " " << coords[0] << " " << coords[1] << " " << coords[2] << " " << cluster->get_default_scope().hash() << " " << scope.hash() << std::endl; + } + // if there is a cc component, record the main cluster as id of the blobs??? auto old_cc_array = live_clusters.at(i)->get_pcarray("isolated", "perblob"); // currently reset the cc component (todo: find the main component) // do the examine graph - auto b2groupid = live_clusters.at(i)->examine_graph(true); + auto b2groupid = live_clusters.at(i)->connected_blobs(dv, pcts); bool flag_largest = false; // Compare old and new cluster groupings @@ -140,18 +162,11 @@ void WireCell::PointCloud::Facade::clustering_examine_bundles(Grouping& live_gro live_clusters.at(i)->put_pcarray(b2groupid, "isolated", "perblob"); - // auto blobs = live_clusters.at(i)->kd_blobs(); - // int nblobs = blobs.size(); + } + + + - // for (const auto& id : b2groupid) { - // std::cout << id << " "; - // } - // std::cout << std::endl; - // if (nblobs > 10){ - // // find the main cluster and set it to the cc tree ... - // std::cout << "Test: " << nblobs << " " << old_cc_array.size() << " " << b2groupid.size() << std::endl; - // } - } -} \ No newline at end of file +} diff --git a/clus/src/clustering_examine_x_boundary.cxx b/clus/src/clustering_examine_x_boundary.cxx index 610b30886..39c14675d 100644 --- a/clus/src/clustering_examine_x_boundary.cxx +++ b/clus/src/clustering_examine_x_boundary.cxx @@ -1,40 +1,101 @@ -#include +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + +class ClusteringExamineXBoundary; +WIRECELL_FACTORY(ClusteringExamineXBoundary, ClusteringExamineXBoundary, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) using namespace WireCell; using namespace WireCell::Clus; -using namespace WireCell::Aux; -using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; +using namespace WireCell::Clus::Facade; using namespace WireCell::PointCloud::Tree; -void WireCell::PointCloud::Facade::clustering_examine_x_boundary( - Grouping& live_grouping +static void clustering_examine_x_boundary( + Grouping& live_grouping, + IDetectorVolumes::pointer dv, + const Tree::Scope& scope + ); + +class ClusteringExamineXBoundary : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedScope { +public: + ClusteringExamineXBoundary() {} + virtual ~ClusteringExamineXBoundary() {} + + void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedScope::configure(config); + } + + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + clustering_examine_x_boundary(live, m_dv, m_scope); + } + +}; + + +// This function only handles Single APA/Face! +static void clustering_examine_x_boundary( + Grouping& live_grouping, + const IDetectorVolumes::pointer dv, + const Tree::Scope& scope ) { + // Check that live_grouping has less than one wpid + if (live_grouping.wpids().size() > 1) { + for (const auto& wpid : live_grouping.wpids()) { + std::cout << "Live grouping wpid: " << wpid.name() << std::endl; + } + raise("Live %d > 1", live_grouping.wpids().size()); + } + std::vector live_clusters = live_grouping.children(); // copy // sort the clusters by length using a lambda function // std::sort(live_clusters.begin(), live_clusters.end(), [](const Cluster *cluster1, const Cluster *cluster2) { // return cluster1->get_length() > cluster2->get_length(); // }); - const auto &tp = live_grouping.get_params(); + // const auto &tp = live_grouping.get_params(); // this is for 4 time slices // double time_slice_width = tp.nticks_live_slice * tp.tick_drift; + + // std::cout << "Test: " << tp.FV_xmin << " " << tp.FV_xmax << " " << tp.FV_xmin_margin << " " << tp.FV_xmax_margin << std::endl; + // std::cout << "Test: " << dv->metadata(*live_grouping.wpids().begin())["FV_xmin"].asDouble() << " " << dv->metadata(*live_grouping.wpids().begin())["FV_xmax"].asDouble() << " " << dv->metadata(*live_grouping.wpids().begin())["FV_xmin_margin"].asDouble() << " " << dv->metadata(*live_grouping.wpids().begin())["FV_xmax_margin"].asDouble() << std::endl; + + double FV_xmin = dv->metadata(*live_grouping.wpids().begin())["FV_xmin"].asDouble() ; + double FV_xmax = dv->metadata(*live_grouping.wpids().begin())["FV_xmax"].asDouble() ; + double FV_xmin_margin = dv->metadata(*live_grouping.wpids().begin())["FV_xmin_margin"].asDouble() ; + double FV_xmax_margin = dv->metadata(*live_grouping.wpids().begin())["FV_xmax_margin"].asDouble() ; + // std::vector new_clusters; // std::vector del_clusters; for (size_t i = 0; i != live_clusters.size(); i++) { Cluster *cluster = live_clusters.at(i); + if (!cluster->get_scope_filter(scope)) continue; + + if (cluster->get_default_scope().hash() != scope.hash()) { + cluster->set_default_scope(scope); + // std::cout << "Test: Set default scope: " << pc_name << " " << coords[0] << " " << coords[1] << " " << coords[2] << " " << cluster->get_default_scope().hash() << " " << scope.hash() << std::endl; + } // only examine big clusters ... if (cluster->get_length() > 5 * units::cm && cluster->get_length() < 150 * units::cm) { // cluster->Create_point_cloud(); // std::cout << "Cluster " << i << " old pointer " << cluster << " nchildren " << cluster->nchildren() << std::endl; - auto b2groupid = cluster->examine_x_boundary(tp.FV_xmin - tp.FV_xmin_margin, tp.FV_xmax + tp.FV_xmax_margin); + auto b2groupid = cluster->examine_x_boundary(FV_xmin - FV_xmin_margin, FV_xmax + FV_xmax_margin); if (b2groupid.empty()) { continue; } - live_grouping.separate(cluster, b2groupid, true); + + // Perform separation + auto scope_transform = cluster->get_scope_transform(scope); + auto id2clusters = live_grouping.separate(cluster, b2groupid, true); assert(cluster == nullptr); @@ -49,22 +110,21 @@ void WireCell::PointCloud::Facade::clustering_examine_x_boundary( } } - // for (auto it = new_clusters.begin(); it != new_clusters.end(); it++) { - // PR3DCluster *ncluster = (*it); - // // ncluster->Create_point_cloud(); - // std::vector range_v1 = ncluster->get_uvwt_range(); - // double length_1 = sqrt(2. / 3. * - // (pow(pitch_u * range_v1.at(0), 2) + pow(pitch_v * range_v1.at(1), 2) + - // pow(pitch_w * range_v1.at(2), 2)) + - // pow(time_slice_width * range_v1.at(3), 2)); - // cluster_length_map[ncluster] = length_1; - // live_clusters.push_back(ncluster); - // } - - // for (auto it = del_clusters.begin(); it != del_clusters.end(); it++) { - // PR3DCluster *ocluster = (*it); - // cluster_length_map.erase(ocluster); - // live_clusters.erase(find(live_clusters.begin(), live_clusters.end(), ocluster)); - // delete ocluster; - // } + + // { + // auto live_clusters = live_grouping.children(); // copy + // // Process each cluster + // for (size_t iclus = 0; iclus < live_clusters.size(); ++iclus) { + // Cluster* cluster = live_clusters.at(iclus); + // auto& scope = cluster->get_default_scope(); + // std::cout << "Test: " << iclus << " " << cluster->nchildren() << " " << scope.pcname << " " << scope.coords[0] << " " << scope.coords[1] << " " << scope.coords[2] << " " << cluster->get_scope_filter(scope)<< " " << cluster->get_pca().center << std::endl; + // } + // } + + + + + + + } diff --git a/clus/src/clustering_extend.cxx b/clus/src/clustering_extend.cxx index 5e5ffc109..ef447d585 100644 --- a/clus/src/clustering_extend.cxx +++ b/clus/src/clustering_extend.cxx @@ -1,255 +1,117 @@ -#include +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wparentheses" - -using namespace WireCell; -using namespace WireCell::Clus; -using namespace WireCell::Aux; -using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; -using namespace WireCell::PointCloud::Tree; -void WireCell::PointCloud::Facade::clustering_extend( - Grouping& live_grouping, - cluster_set_t& cluster_connected_dead, // in/out - const int flag, // - const double length_cut, // - const int num_try, // - const double length_2_cut, // - const int num_dead_try // -){ - geo_point_t drift_dir(1, 0, 0); // assuming the drift direction is along X ... - const auto [angle_u,angle_v,angle_w] = live_grouping.wire_angles(); - - // pronlonged case for U 3 and V 4 ... - geo_point_t U_dir(0,cos(angle_u),sin(angle_u)); - geo_point_t V_dir(0,cos(angle_v),sin(angle_v)); - geo_point_t W_dir(0,cos(angle_w),sin(angle_w)); - - cluster_set_t used_clusters; +#include "WireCellIface/IConfigurable.h" +#include "WireCellUtil/NamedFactory.h" - // prepare graph ... - typedef cluster_connectivity_graph_t Graph; - Graph g; - std::unordered_map ilive2desc; // added live index to graph descriptor - std::map map_cluster_index; - const auto& live_clusters = live_grouping.children(); - +class ClusteringExtend; +class ClusteringExtendLoop; +WIRECELL_FACTORY(ClusteringExtend, ClusteringExtend, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) +WIRECELL_FACTORY(ClusteringExtendLoop, ClusteringExtendLoop, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) - for (size_t ilive = 0; ilive < live_clusters.size(); ++ilive) { - const auto& live = live_clusters.at(ilive); - map_cluster_index[live] = ilive; - ilive2desc[ilive] = boost::add_vertex(ilive, g); +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; + +static +void clustering_extend(Grouping& live_clusters, + IDetectorVolumes::pointer dv, // detector volumes + const Tree::Scope& scope, + const int flag, // + const double length_cut = 150*units::cm, // + const int num_try = 0, // + const double length_2_cut = 3*units::cm, // + const int num_dead_try =3 // + ); + +class ClusteringExtend : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedScope { +public: + ClusteringExtend() {} + virtual ~ClusteringExtend() {} + + void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedScope::configure(config); + + flag_ = get(config, "flag", 0); + length_cut_ = get(config, "length_cut", 150*units::cm); + num_try_ = get(config, "num_try", 0); + length_2_cut_ = get(config, "length_2_cut", 3*units::cm); + num_dead_try_ = get(config, "num_dead_try", 3); } - // original algorithm ... (establish edges ... ) - - int length_1_cut = 40*units::cm + num_try * 10*units::cm; - - if (flag==1) length_1_cut = 20*units::cm + num_try*10*units::cm; //prolong case - - for (size_t i=0;i!=live_clusters.size();i++){ - auto cluster_1 = live_clusters.at(i); - - if (cluster_1->get_length() > length_1_cut){ - geo_point_t highest_p, lowest_p, earliest_p, latest_p; - // bool flag_para = false; - // bool flag_prol = false; - - if (flag==1){// prolong case ... - - std::tie(earliest_p, latest_p) = cluster_1->get_earliest_latest_points(); - // find earliest point - - geo_point_t dir_earlp = cluster_1->vhough_transform(earliest_p,60*units::cm); - - geo_point_t tempV5,tempV1; - tempV1.set(0,dir_earlp.y(),dir_earlp.z()); - double angle1 = tempV1.angle(U_dir); - tempV5.set(fabs(dir_earlp.x()),sqrt(pow(dir_earlp.y(),2)+pow(dir_earlp.z(),2))*sin(angle1),0); - angle1 = tempV5.angle(drift_dir); - - double angle2 = tempV1.angle(V_dir); - tempV5.set(fabs(dir_earlp.x()),sqrt(pow(dir_earlp.y(),2)+pow(dir_earlp.z(),2))*sin(angle2),0); - angle2 = tempV5.angle(drift_dir); - - double angle3 = tempV1.angle(W_dir); - tempV5.set(fabs(dir_earlp.x()),sqrt(pow(dir_earlp.y(),2)+pow(dir_earlp.z(),2))*sin(angle3),0); - angle3 = tempV5.angle(drift_dir); - - - // find latest point - geo_point_t dir_latep = cluster_1->vhough_transform(latest_p, 60*units::cm); - tempV1.set(0,dir_latep.y(),dir_latep.z()); - double angle4 = tempV1.angle(U_dir); - tempV5.set(fabs(dir_latep.x()),sqrt(pow(dir_latep.y(),2)+pow(dir_latep.z(),2))*sin(angle4),0); - angle4 = tempV5.angle(drift_dir); - - double angle5 = tempV1.angle(V_dir); - tempV5.set(fabs(dir_latep.x()),sqrt(pow(dir_latep.y(),2)+pow(dir_latep.z(),2))*sin(angle5),0); - angle5 = tempV5.angle(drift_dir); - - double angle6 = tempV1.angle(W_dir); - tempV5.set(fabs(dir_latep.x()),sqrt(pow(dir_latep.y(),2)+pow(dir_latep.z(),2))*sin(angle6),0); - angle6 = tempV5.angle(drift_dir); - - if (angle1 <5./180.*3.1415926 || angle2 < 5./180.*3.1415926 || angle3 < 5./180.*3.1415926){ - // flag_prol = true; - - for (size_t j=0;j!=live_clusters.size();j++){ - auto cluster_2 = live_clusters.at(j); - if (used_clusters.find(cluster_2)!=used_clusters.end()) continue; - if (cluster_2==cluster_1) continue; - if (Clustering_4th_prol(*cluster_1,*cluster_2,cluster_2->get_length(),earliest_p,dir_earlp,length_cut)){ - // to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); - boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], - ilive2desc[map_cluster_index[cluster_2]], g); - - - - if (cluster_2->get_length()<10*units::cm) - used_clusters.insert(cluster_2); - } - } - } - - if (angle4<5./180.*3.1415926 || angle5 < 5./180.*3.1415926 || angle6 < 5./180.*3.1415926){ - - // flag_prol = true; - for (size_t j=0;j!=live_clusters.size();j++){ - auto cluster_2 = live_clusters.at(j); - if (used_clusters.find(cluster_2)!=used_clusters.end()) continue; - if (cluster_2==cluster_1) continue; - if (Clustering_4th_prol(*cluster_1,*cluster_2,cluster_2->get_length(),latest_p,dir_latep,length_cut)){ - //to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); - boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], - ilive2desc[map_cluster_index[cluster_2]], g); - - - if (cluster_2->get_length()<10*units::cm) - used_clusters.insert(cluster_2); - } - } - } - }else if (flag==2){ // parallel case ... - std::tie(highest_p, lowest_p) = cluster_1->get_highest_lowest_points(); - - highest_p = cluster_1->calc_ave_pos(highest_p,5*units::cm); - geo_point_t dir_highp = cluster_1->vhough_transform(highest_p,100*units::cm); - - lowest_p = cluster_1->calc_ave_pos(lowest_p,5*units::cm); - geo_point_t dir_lowp = cluster_1->vhough_transform(lowest_p, 100*units::cm); - - if (fabs(dir_highp.angle(drift_dir)-3.1415926/2.)<5/180.*3.1415926){ - // flag_para = true; - - for (size_t j=0;j!=live_clusters.size();j++){ - auto cluster_2 = live_clusters.at(j); - if (used_clusters.find(cluster_2)!=used_clusters.end()) continue; - if (cluster_2==cluster_1) continue; - - if (Clustering_4th_para(*cluster_1,*cluster_2,cluster_1->get_length(),cluster_2->get_length(),highest_p,dir_highp,length_cut)){ - //to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); - boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], - ilive2desc[map_cluster_index[cluster_2]], g); - - - - if (cluster_2->get_length()<15*units::cm) - used_clusters.insert(cluster_2); - } - } - } - - if (fabs(dir_lowp.angle(drift_dir)-3.1415926/2.)<5/180.*3.1415926 ){ - // flag_para = true; - - for (size_t j=0;j!=live_clusters.size();j++){ - auto cluster_2 = live_clusters.at(j); - if (cluster_2==cluster_1) continue; - if (Clustering_4th_para(*cluster_1,*cluster_2,cluster_1->get_length(),cluster_2->get_length(),lowest_p,dir_lowp,length_cut)){ - // to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); - boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], - ilive2desc[map_cluster_index[cluster_2]], g); - - - } - } - - } - }else if (flag==3){ // regular case ... - auto hl_ps = cluster_1->get_highest_lowest_points(); - auto el_ps = cluster_1->get_earliest_latest_points(); - - geo_point_t first_p, second_p; - - - if (pow(hl_ps.first.x()-hl_ps.second.x(),2)+pow(hl_ps.first.y()-hl_ps.second.y(),2)+pow(hl_ps.first.z()-hl_ps.second.z(),2) > pow(el_ps.first.x()-el_ps.second.x(),2)+pow(el_ps.first.y()-el_ps.second.y(),2)+pow(el_ps.first.z()-el_ps.second.z(),2)){ - first_p = hl_ps.first; - second_p = hl_ps.second; - }else{ - first_p = el_ps.first; - second_p = el_ps.second; - } - - for (size_t j=0;j!=live_clusters.size();j++){ - auto cluster_2 = live_clusters.at(j); - if (used_clusters.find(cluster_2)!=used_clusters.end()) continue; - if (cluster_2==cluster_1) continue; - - if (Clustering_4th_reg(*cluster_1,*cluster_2,cluster_1->get_length(),cluster_2->get_length(),first_p,length_cut)){ - // to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); - boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], - ilive2desc[map_cluster_index[cluster_2]], g); - - - if (cluster_2->get_length()<10*units::cm) - used_clusters.insert(cluster_2); - - }else if (Clustering_4th_reg(*cluster_1,*cluster_2,cluster_1->get_length(),cluster_2->get_length(),second_p,length_cut)){ - //to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); - boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], - ilive2desc[map_cluster_index[cluster_2]], g); - + virtual Configuration default_configuration() const { + Configuration cfg; + return cfg; + } - if (cluster_2->get_length()<10*units::cm) - used_clusters.insert(cluster_2); - } - - } + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + clustering_extend(live, m_dv, m_scope, flag_, length_cut_, num_try_, length_2_cut_, num_dead_try_); + } - - }else if (flag==4){ - if (cluster_connected_dead.find(cluster_1)!=cluster_connected_dead.end()){ - used_clusters.insert(cluster_1); - for (size_t j=0;j!=live_clusters.size();j++){ - auto cluster_2 = live_clusters.at(j); - if (cluster_2->get_length() < length_2_cut) continue; - if (used_clusters.find(cluster_2)!=used_clusters.end()) continue; - if (Clustering_4th_dead(*cluster_1,*cluster_2,cluster_1->get_length(),cluster_2->get_length(),length_cut,num_dead_try)){ - // to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); - boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], - ilive2desc[map_cluster_index[cluster_2]], g); +private: + int flag_{0}; + double length_cut_{150*units::cm}; + int num_try_{0}; + double length_2_cut_{3*units::cm}; + int num_dead_try_{3}; +}; + +class ClusteringExtendLoop : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedScope { +public: + ClusteringExtendLoop() {} + virtual ~ClusteringExtendLoop() {} + + void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedScope::configure(config); + + num_try_ = get(config, "num_try", 0); + } + virtual Configuration default_configuration() const { + Configuration cfg; + return cfg; + } - if (cluster_2->get_length()<10*units::cm) - used_clusters.insert(cluster_2); - } - } - } + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + + // for very busy events do less ... + int num_try = num_try_; + if (live.nchildren() > 1100) num_try = 1; + for (int i = 0; i != num_try; i++) { + // deal with prolong case + clustering_extend(live, m_dv, m_scope, 1, 150*units::cm, 0); + // deal with parallel case + clustering_extend(live, m_dv, m_scope, 2, 30*units::cm, 0); + // extension regular case + clustering_extend(live, m_dv, m_scope, 3, 15*units::cm, 0); + // extension ones connected to dead region ... + if (i == 0) { + clustering_extend(live, m_dv, m_scope, 4, 60 * units::cm, i); + } + else { + clustering_extend(live, m_dv, m_scope, 4, 35 * units::cm, i); } } } - // new function to merge clusters ... - merge_clusters(g, live_grouping, cluster_connected_dead); -} - +private: + int num_try_{0}; +}; +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wparentheses" +using namespace WireCell::PointCloud::Tree; -bool WireCell::PointCloud::Facade::Clustering_4th_prol( +static bool Clustering_4th_prol( const Cluster& cluster_1, const Cluster& cluster_2, double length_2, @@ -279,11 +141,9 @@ bool WireCell::PointCloud::Facade::Clustering_4th_prol( } return false; - - } -bool WireCell::PointCloud::Facade::Clustering_4th_para( +static bool Clustering_4th_para( const Cluster& cluster_1, const Cluster& cluster_2, double length_1, double length_2, @@ -325,174 +185,682 @@ bool WireCell::PointCloud::Facade::Clustering_4th_para( return false; } -bool WireCell::PointCloud::Facade::Clustering_4th_reg( +static bool Clustering_4th_reg( const Cluster& cluster_1, const Cluster& cluster_2, double length_1, double length_2, - geo_point_t p1, double length_cut) + geo_point_t p1, double length_cut, + const std::map > & wpid_U_dir, const std::map > & wpid_V_dir, const std::map > & wpid_W_dir, const IDetectorVolumes::pointer dv) { auto temp_results = cluster_2.get_closest_point_blob(p1); geo_point_t p2 = temp_results.first; + auto wpid_p2 = cluster_2.wpid(p2); geo_point_t diff = p1 - p2; double dis1 = diff.magnitude(); temp_results = cluster_1.get_closest_point_blob(p2); p1 = temp_results.first; + auto wpid_p1 = cluster_1.wpid(p1); + + auto wpid_ps = get_wireplaneid(p1, wpid_p1, p2, wpid_p2, dv); + /* temp_results = cluster_2.get_closest_point_blob(p1); */ /* p2 = temp_results.second; */ - diff = p1 - p2; - double dis = diff.magnitude(); + diff = p1 - p2; + double dis = diff.magnitude(); + + geo_point_t drift_dir_abs(1, 0, 0); // assuming the drift direction is along X ... + + if (dis1 > 15*units::cm && dis < 3*units::cm && length_2 > 80*units::cm &&length_1>80*units::cm) return false; + + if (dis < length_cut && (length_2 >= 40*units::cm || dis < 3*units::cm)){ + geo_point_t cluster1_ave_pos = cluster_1.calc_ave_pos(p1,5*units::cm); + geo_point_t cluster2_ave_pos = cluster_2.calc_ave_pos(p2,5*units::cm); + geo_point_t dir1; + + if (cluster_1.nnearby(cluster1_ave_pos, 30*units::cm)>50 && length_1 < 120*units::cm){ + dir1 = cluster_1.vhough_transform(cluster1_ave_pos,30*units::cm); + }else{ + dir1 = cluster_1.vhough_transform(cluster1_ave_pos,80*units::cm); + } + + geo_point_t dir3; + if (cluster_2.nnearby(cluster2_ave_pos, 30*units::cm)>50&&length_2 < 120*units::cm){ + dir3 = cluster_2.vhough_transform(cluster2_ave_pos,30*units::cm); + }else{ + dir3 = cluster_2.vhough_transform(cluster2_ave_pos,80*units::cm); + } + + geo_point_t dir2(cluster2_ave_pos.x() - cluster1_ave_pos.x(), + cluster2_ave_pos.y() - cluster1_ave_pos.y(), + cluster2_ave_pos.z() - cluster1_ave_pos.z()); + + + double ave_dis = sqrt(pow(cluster1_ave_pos.x()-cluster2_ave_pos.x(),2) + pow(cluster1_ave_pos.y()-cluster2_ave_pos.y(),2) + pow(cluster1_ave_pos.z()-cluster2_ave_pos.z(),2)); + geo_point_t test_point; + double min_dis = 1e9, max_dis = -1e9; + + if (dir2.angle(dir1)>3.1415926/2. ){ + + for (int i=-5;i!=10;i++){ + test_point.set(cluster1_ave_pos.x() - dir1.x() * (ave_dis +i*2*units::cm), cluster1_ave_pos.y() - dir1.y() * (ave_dis +i*2*units::cm), cluster1_ave_pos.z() - dir1.z() * (ave_dis +i*2*units::cm)); + + auto temp_results = cluster_2.get_closest_point_blob(test_point); + //reuse this + geo_point_t test_point1 = temp_results.first; + if (sqrt(pow(test_point1.x()-test_point.x(),2)+pow(test_point1.y()-test_point.y(),2)+pow(test_point1.z()-test_point.z(),2))<1.5*units::cm){ + double temp_dis = (test_point1.x() - cluster1_ave_pos.x())*dir1.x() + (test_point1.y() - cluster1_ave_pos.y())*dir1.y() + (test_point1.z() - cluster1_ave_pos.z())*dir1.z(); + temp_dis *=-1; + if (temp_dis < min_dis) min_dis = temp_dis; + if (temp_dis > max_dis) max_dis = temp_dis; + } + } + + if ((max_dis - min_dis)>2.5*units::cm) return true; + } + + if (dir2.angle(dir3)<3.1415926/2.){ + + // look at the other side (repeat) + // cluster2_ave_pos, dir2 + min_dis = 1e9; + max_dis = -1e9; + for (int i=-5;i!=10;i++){ + test_point.set(cluster2_ave_pos.x() - dir3.x() * (ave_dis +i*2*units::cm), cluster2_ave_pos.y() - dir3.y() * (ave_dis +i*2*units::cm), cluster2_ave_pos.z() - dir3.z() * (ave_dis +i*2*units::cm)); + + auto temp_results = cluster_1.get_closest_point_blob(test_point); + //reuse this + geo_point_t test_point1 = temp_results.first; + if (sqrt(pow(test_point1.x()-test_point.x(),2)+pow(test_point1.y()-test_point.y(),2)+pow(test_point1.z()-test_point.z(),2))<1.5*units::cm){ + double temp_dis = (test_point1.x() - cluster2_ave_pos.x())*dir3.x() + (test_point1.y() - cluster2_ave_pos.y())*dir3.y() + (test_point1.z() - cluster2_ave_pos.z())*dir3.z(); + temp_dis *=-1; + if (temp_dis < min_dis) min_dis = temp_dis; + if (temp_dis > max_dis) max_dis = temp_dis; + } + } + + if ((max_dis - min_dis)>2.5*units::cm) return true; + } + + }else if (dis < 2 * length_cut && length_2 < 40*units::cm){ + + // pronlonged case for U 3 and V 4 ... + // geo_point_t U_dir(0,cos(angle_u),sin(angle_u)); + // geo_point_t V_dir(0,cos(angle_v),sin(angle_v)); + + geo_point_t dir2(p2.x()-p1.x(),p2.y()-p1.y(),p2.z()-p1.z()); + + bool flag_para = false, flag_prol =false, flag_reg = false; + + double angle1 = fabs(dir2.angle(drift_dir_abs)-3.1415926/2.)/3.1415926*180.; + + if (angle1 < 5 && dis < 2*length_cut || angle1 < 2 ){ + flag_para = true; + }else if (dis < 2*length_cut){ + geo_point_t tempV1(0, p2.y() - p1.y(), p2.z() - p1.z()); + geo_point_t tempV5; + double angle2 = tempV1.angle(wpid_U_dir.at(wpid_ps).first); + tempV5.set(fabs(p2.x()-p1.x()),sqrt(pow(p2.y() - p1.y(),2)+pow(p2.z() - p1.z(),2))*sin(angle2),0); + angle2 = tempV5.angle(drift_dir_abs)/3.1415926*180.; + + double angle3 = tempV1.angle(wpid_V_dir.at(wpid_ps).first); + tempV5.set(fabs(p2.x()-p1.x()),sqrt(pow(p2.y() - p1.y(),2)+pow(p2.z() - p1.z(),2))*sin(angle3),0); + angle3 = tempV5.angle(drift_dir_abs)/3.1415926*180.; + if (angle2<7.5 || angle3 < 7.5) + flag_prol = true; + } + + + if (flag_para || flag_prol || flag_reg){ + + geo_point_t dir1; + if (cluster_1.nnearby(p1, 15*units::cm)>30 && (flag_prol ||flag_reg) ){ + dir1 = cluster_1.vhough_transform(p1,15*units::cm); + }else{ + dir1 = cluster_1.vhough_transform(p1,60*units::cm); + } + + geo_point_t dir3; + if (cluster_2.nnearby(p2, 15*units::cm)>30 && (flag_prol || flag_reg)){ + dir3 = cluster_2.vhough_transform(p2,15*units::cm); + }else{ + dir3 = cluster_2.vhough_transform(p2,60*units::cm); + } + + + double angle4 = (3.1415926-dir1.angle(dir2))/3.1415926*180.; + double angle5 = dir2.angle(dir3)/3.1415926*180.; + + if (flag_para && fabs(dir3.angle(drift_dir_abs)-3.141592/2.)<10/180.*3.1415926 && fabs(dir1.angle(drift_dir_abs)-3.141592/2.)<10/180.*3.1415926){ + if (angle4 < 30 && (length_2 < 12*units::cm && fabs(angle5-90.)>30 || angle5 < 45)) + return true; + }else if (flag_prol){ + if (angle4 < 25 && (length_2 < 15*units::cm && fabs(angle5-90.)>30 || angle5 < 25)) + return true; + } + + if (fabs(dir2.angle(drift_dir_abs)-3.1415926/2.)/3.1415926*180.>7.5){ + // non-parallel case ... + if (WireCell::Clus::Facade::is_angle_consistent(dir1,dir2,false,10,wpid_U_dir.at(wpid_p1).second, wpid_V_dir.at(wpid_p1).second, wpid_W_dir.at(wpid_p1).second,2)){ + if (length_2 < 8*units::cm && WireCell::Clus::Facade::is_angle_consistent(dir1,dir2,false,5,wpid_U_dir.at(wpid_p1).second, wpid_V_dir.at(wpid_p1).second, wpid_W_dir.at(wpid_p1).second,2)) + return true; + if (WireCell::Clus::Facade::is_angle_consistent(dir3,dir2,true,10,wpid_U_dir.at(wpid_p2).second, wpid_V_dir.at(wpid_p2).second, wpid_W_dir.at(wpid_p2).second,2)){ + return true; + } + } + } + } + } + return false; +} + + +static bool Clustering_4th_dead( + const Cluster& cluster_1, + const Cluster& cluster_2, + double length_1, double length_2, double length_cut, int num_dead_try, + const std::map > & wpid_U_dir, const std::map > & wpid_V_dir, const std::map > & wpid_W_dir, const IDetectorVolumes::pointer dv) +{ + geo_point_t drift_dir_abs(1, 0, 0); // assuming the drift direction is along X ... + // const auto [angle_u,angle_v,angle_w] = cluster_1.grouping()->wire_angles(); + + geo_point_t p1; + geo_point_t p2; + + double dis = Find_Closest_Points(cluster_1, cluster_2, length_1, length_2, length_cut, p1, p2); + + // auto wpid_p1 = cluster_1.wpid(p1); + // auto wpid_p2 = cluster_2.wpid(p2); + // auto wpid_ps = get_wireplaneid(p1, wpid_p1, p2, wpid_p2, dv); + + //add a special one ... for uboone ... + /* + if (length_1 > 30*units::cm && length_2 > 30*units::cm && + (dis < 3*units::cm || + fabs(p1.x()-p2.x()) < 1.6*units::cm && (dis < 20*units::cm && + p1.z() > 700.6*units::cm && p1.z() < 739.6*units::cm && // special region + p2.z() > 700.6*units::cm && p2.z() < 739.6*units::cm && // special region ... + p1.y() > -10.4*units::cm && p1.y() < 29*units::cm && + p2.y() > -10.4*units::cm && p2.y() < 29*units::cm ) + )){ + return true; + } + */ + + if ((dis < length_cut || (length_2 > 50*units::cm && dis < 80*units::cm))){ + + geo_point_t cluster1_ave_pos_save; + geo_point_t cluster2_ave_pos_save; + geo_point_t dir1_save; + geo_point_t dir3_save; + + for (int i=0;i!=num_dead_try;i++){ + geo_point_t cluster1_ave_pos; + geo_point_t cluster2_ave_pos; + + geo_point_t dir1; + geo_point_t dir3; + geo_point_t dir2; + + if (i==0){ + cluster1_ave_pos = cluster_1.calc_ave_pos(p1,5*units::cm); + cluster1_ave_pos_save = cluster1_ave_pos; + cluster2_ave_pos = cluster_2.calc_ave_pos(p2,5*units::cm); + cluster2_ave_pos_save = cluster2_ave_pos; + + if (num_dead_try==1){ + dir1 = cluster_1.vhough_transform(cluster1_ave_pos,20*units::cm); + dir3 = cluster_2.vhough_transform(cluster2_ave_pos,20*units::cm); + }else{ + dir1 = cluster_1.vhough_transform(cluster1_ave_pos,80*units::cm); + dir3 = cluster_2.vhough_transform(cluster2_ave_pos,80*units::cm); + } + dir1_save = dir1; + dir3_save = dir3; + + dir2.set(cluster2_ave_pos.x() - cluster1_ave_pos.x()+1e-9, cluster2_ave_pos.y() - cluster1_ave_pos.y()+1e-9, cluster2_ave_pos.z() - cluster1_ave_pos.z()+1e-9); // 2-1 + + }else if (i==1){ + if (length_2 >= 15*units::cm &&(!(length_2 > 150*units::cm && dis<15*units::cm))){ + cluster1_ave_pos = cluster1_ave_pos_save;//cluster_1->calc_ave_pos(p1,5*units::cm); + dir1 = dir1_save;//cluster_1->VHoughTrans(cluster1_ave_pos,80*units::cm); + + geo_vector_t dir_test(dir1); + dir_test = dir_test/dir_test.magnitude(); + dir_test = (-1) * dir_test; + + std::pair temp_results = cluster_2.get_closest_point_along_vec(cluster1_ave_pos, dir_test, dis*2, 5*units::cm, 15, 10*units::cm); + + if (temp_results.second < 100*units::cm){ + cluster2_ave_pos = cluster_2.calc_ave_pos(temp_results.first,5*units::cm); + dir3 = cluster_2.vhough_transform(cluster2_ave_pos,80*units::cm); + dir2.set(cluster2_ave_pos.x() - cluster1_ave_pos.x()+1e-9, cluster2_ave_pos.y() - cluster1_ave_pos.y()+1e-9, cluster2_ave_pos.z() - cluster1_ave_pos.z()+1e-9); // 2-1 + + }else{ + continue; + } + }else{ + continue; + } + }else if (i==2){ + if (length_2 >=15*units::cm&&(!(length_2 > 150*units::cm && dis<15*units::cm))){ + cluster2_ave_pos = cluster2_ave_pos_save;//cluster_2->calc_ave_pos(p2,5*units::cm); + dir3 = dir3_save;//cluster_2->VHoughTrans(cluster2_ave_pos,80*units::cm); + + geo_point_t dir_test(dir3); + dir_test = dir_test / dir_test.magnitude(); + dir_test = (-1) * dir_test; + + std::pair temp_results = cluster_1.get_closest_point_along_vec(cluster2_ave_pos, dir_test, dis*2, 5*units::cm, 15, 10*units::cm); + + if (temp_results.second < 100*units::cm){ + cluster1_ave_pos = cluster_1.calc_ave_pos(temp_results.first,5*units::cm); + dir1 = cluster_1.vhough_transform(cluster1_ave_pos,80*units::cm); + dir2.set(cluster2_ave_pos.x() - cluster1_ave_pos.x()+1e-9, cluster2_ave_pos.y() - cluster1_ave_pos.y()+1e-9, cluster2_ave_pos.z() - cluster1_ave_pos.z()+1e-9); // 2-1 + + }else{ + continue; + } + }else{ + continue; + } + } + + geo_point_t ave_dir(cluster1_ave_pos.x()-cluster2_ave_pos.x(),cluster1_ave_pos.y()-cluster2_ave_pos.y(),cluster1_ave_pos.z()-cluster2_ave_pos.z()); + + auto wpid_ave_p1 = cluster_1.wpid(cluster1_ave_pos); + auto wpid_ave_p2 = cluster_2.wpid(cluster2_ave_pos); + // auto wpid_ave_ps = get_wireplaneid(cluster1_ave_pos, wpid_ave_p1, cluster2_ave_pos, wpid_ave_p2, dv); + + // use projection to deal with stuff ... + if (fabs(ave_dir.angle(drift_dir_abs)-3.1415926/2.)/3.1415926*180.>7.5){ + // non-parallel case ... + if (WireCell::Clus::Facade::is_angle_consistent(dir1,dir2,false,10,wpid_U_dir.at(wpid_ave_p1).second, wpid_V_dir.at(wpid_ave_p1).second, wpid_W_dir.at(wpid_ave_p1).second,2)){ + if (length_2 < 8*units::cm&& WireCell::Clus::Facade::is_angle_consistent(dir1,dir2,false,5,wpid_U_dir.at(wpid_ave_p1).second, wpid_V_dir.at(wpid_ave_p1).second, wpid_W_dir.at(wpid_ave_p1).second,2)) + return true; + if (length_2 < 15*units::cm && WireCell::Clus::Facade::is_angle_consistent(dir1,dir2,false,7.5,wpid_U_dir.at(wpid_ave_p1).second, wpid_V_dir.at(wpid_ave_p1).second, wpid_W_dir.at(wpid_ave_p1).second)) + return true; + if (WireCell::Clus::Facade::is_angle_consistent(dir3,dir2,true,10,wpid_U_dir.at(wpid_ave_p2).second, wpid_V_dir.at(wpid_ave_p2).second, wpid_W_dir.at(wpid_ave_p2).second,2)){ + return true; + } + } + } + + + double angle1 = (3.1415926-dir1.angle(dir2))/3.1415926*180.; + double angle2 = dir3.angle(dir2)/3.1415926*180.; + double angle3 = (3.1415926-dir1.angle(dir3))/3.1415926*180.; + + + if (length_2 <=10*units::cm){ + if (angle1 < 15 && (angle2 < 60 || length_2 < 5*units::cm) ) return true; + }else{ + if (angle1 < 15 && angle2 <15 && angle3 < 25 || + angle3 < 10 && (angle1+angle2)<45 && dis < 5*units::cm ) + return true; + + double ave_dis = sqrt(pow(cluster1_ave_pos.x()-cluster2_ave_pos.x(),2) + pow(cluster1_ave_pos.y()-cluster2_ave_pos.y(),2) + pow(cluster1_ave_pos.z()-cluster2_ave_pos.z(),2)); + geo_point_t test_point; + double min_dis = 1e9, max_dis = -1e9; + + if (fabs(ave_dir.angle(drift_dir_abs)-3.1415926/2.)/3.1415926*180. > 7.5 && ave_dis < 30*units::cm){ + if (i==1){ + for (int k=-5;k!=10;k++){ + test_point.set(cluster1_ave_pos.x() - dir1.x() * (ave_dis +k*2*units::cm), cluster1_ave_pos.y() - dir1.y() * (ave_dis +k*2*units::cm), cluster1_ave_pos.z() - dir1.z() * (ave_dis +k*2*units::cm)); + + auto temp_results = cluster_2.get_closest_point_blob(test_point); + //reuse this + geo_point_t test_point1 = temp_results.first; + if (sqrt(pow(test_point1.x()-test_point.x(),2)+pow(test_point1.y()-test_point.y(),2)+pow(test_point1.z()-test_point.z(),2))<1.5*units::cm){ + double temp_dis = (test_point1.x() - cluster1_ave_pos.x())*dir1.x() + (test_point1.y() - cluster1_ave_pos.y())*dir1.y() + (test_point1.z() - cluster1_ave_pos.z())*dir1.z(); + temp_dis *=-1; + if (temp_dis < min_dis) min_dis = temp_dis; + if (temp_dis > max_dis) max_dis = temp_dis; + } + } + + if ((max_dis - min_dis)>2.5*units::cm) return true; + }else if (i==2){ + for (int k=-5;k!=10;k++){ + test_point.set(cluster2_ave_pos.x() - dir3.x() * (ave_dis +k*2*units::cm), cluster2_ave_pos.y() - dir3.y() * (ave_dis +k*2*units::cm), cluster2_ave_pos.z() - dir3.z() * (ave_dis +k*2*units::cm)); + + auto temp_results = cluster_1.get_closest_point_blob(test_point); + //reuse this + geo_point_t test_point1 = temp_results.first; + if (sqrt(pow(test_point1.x()-test_point.x(),2)+pow(test_point1.y()-test_point.y(),2)+pow(test_point1.z()-test_point.z(),2))<1.5*units::cm){ + double temp_dis = (test_point1.x() - cluster2_ave_pos.x())*dir3.x() + (test_point1.y() - cluster2_ave_pos.y())*dir3.y() + (test_point1.z() - cluster2_ave_pos.z())*dir3.z(); + temp_dis *=-1; + if (temp_dis < min_dis) min_dis = temp_dis; + if (temp_dis > max_dis) max_dis = temp_dis; + } + } + + if ((max_dis - min_dis)>2.5*units::cm) return true; + } + + } + + } + } + } + + return false; +} + + +// Expand this function to handle multiple APA/Faces ... +static void clustering_extend( + Grouping& live_grouping, + const IDetectorVolumes::pointer dv, // detector volumes + const Tree::Scope& scope, + const int flag, // + const double length_cut, // + const int num_try, // + const double length_2_cut, // + const int num_dead_try // +){ + + // Get all the wire plane IDs from the grouping + const auto& wpids = live_grouping.wpids(); + + // Key: pair, Value: drift_dir, angle_u, angle_v, angle_w + std::map> wpid_params; + std::map > wpid_U_dir; + std::map > wpid_V_dir; + std::map > wpid_W_dir; + std::set apas; + for (const auto& wpid : wpids) { + int apa = wpid.apa(); + int face = wpid.face(); + apas.insert(apa); + + // Create wpids for all three planes with this APA and face + WirePlaneId wpid_u(kUlayer, face, apa); + WirePlaneId wpid_v(kVlayer, face, apa); + WirePlaneId wpid_w(kWlayer, face, apa); + + // Get drift direction based on face orientation + int face_dirx = dv->face_dirx(wpid_u); + geo_point_t drift_dir(face_dirx, 0, 0); + + // Get wire directions for all planes + Vector wire_dir_u = dv->wire_direction(wpid_u); + Vector wire_dir_v = dv->wire_direction(wpid_v); + Vector wire_dir_w = dv->wire_direction(wpid_w); + + // Calculate angles + double angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + double angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + double angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + wpid_params[wpid] = std::make_tuple(drift_dir, angle_u, angle_v, angle_w); + wpid_U_dir[wpid] = std::make_pair(geo_point_t(0, cos(angle_u), sin(angle_u)), angle_u); + wpid_V_dir[wpid] = std::make_pair(geo_point_t(0, cos(angle_v), sin(angle_v)), angle_v); + wpid_W_dir[wpid] = std::make_pair(geo_point_t(0, cos(angle_w), sin(angle_w)), angle_w); + } + + + geo_point_t drift_dir_abs(1,0,0); + cluster_set_t used_clusters; + + + // prepare graph ... + typedef cluster_connectivity_graph_t Graph; + Graph g; + std::unordered_map ilive2desc; // added live index to graph descriptor + std::map map_cluster_index; + const auto& live_clusters = live_grouping.children(); + + for (size_t ilive = 0; ilive < live_clusters.size(); ++ilive) { + auto& live = live_clusters.at(ilive); + map_cluster_index[live] = ilive; + ilive2desc[ilive] = boost::add_vertex(ilive, g); + // set scope ... + if (live->get_default_scope().hash() != scope.hash()) { + live->set_default_scope(scope); + } + } + + // original algorithm ... (establish edges ... ) + + int length_1_cut = 40*units::cm + num_try * 10*units::cm; + + if (flag==1) length_1_cut = 20*units::cm + num_try*10*units::cm; //prolong case + + for (size_t i=0;i!=live_clusters.size();i++){ + auto cluster_1 = live_clusters.at(i); + if (!cluster_1->get_scope_filter(scope)) continue; + + + if (cluster_1->get_length() > length_1_cut){ + geo_point_t highest_p, lowest_p, earliest_p, latest_p; + + + if (flag==1){// prolong case ... + + std::tie(earliest_p, latest_p) = cluster_1->get_earliest_latest_points(); + // find earliest point + + geo_point_t dir_earlp = cluster_1->vhough_transform(earliest_p,60*units::cm); + auto wpid_earliest_p = cluster_1->wpid(earliest_p); + + geo_point_t tempV5,tempV1; + tempV1.set(0,dir_earlp.y(),dir_earlp.z()); + double angle1 = tempV1.angle(wpid_U_dir.at(wpid_earliest_p).first); + tempV5.set(fabs(dir_earlp.x()),sqrt(pow(dir_earlp.y(),2)+pow(dir_earlp.z(),2))*sin(angle1),0); + angle1 = tempV5.angle(drift_dir_abs); + + double angle2 = tempV1.angle(wpid_V_dir.at(wpid_earliest_p).first); + tempV5.set(fabs(dir_earlp.x()),sqrt(pow(dir_earlp.y(),2)+pow(dir_earlp.z(),2))*sin(angle2),0); + angle2 = tempV5.angle(drift_dir_abs); + + double angle3 = tempV1.angle(wpid_W_dir.at(wpid_earliest_p).first); + tempV5.set(fabs(dir_earlp.x()),sqrt(pow(dir_earlp.y(),2)+pow(dir_earlp.z(),2))*sin(angle3),0); + angle3 = tempV5.angle(drift_dir_abs); + + + // find latest point + geo_point_t dir_latep = cluster_1->vhough_transform(latest_p, 60*units::cm); + auto wpid_latest_p = cluster_1->wpid(latest_p); + tempV1.set(0,dir_latep.y(),dir_latep.z()); + double angle4 = tempV1.angle(wpid_U_dir.at(wpid_latest_p).first); + tempV5.set(fabs(dir_latep.x()),sqrt(pow(dir_latep.y(),2)+pow(dir_latep.z(),2))*sin(angle4),0); + angle4 = tempV5.angle(drift_dir_abs); + + double angle5 = tempV1.angle(wpid_V_dir.at(wpid_latest_p).first); + tempV5.set(fabs(dir_latep.x()),sqrt(pow(dir_latep.y(),2)+pow(dir_latep.z(),2))*sin(angle5),0); + angle5 = tempV5.angle(drift_dir_abs); + + double angle6 = tempV1.angle(wpid_W_dir.at(wpid_latest_p).first); + tempV5.set(fabs(dir_latep.x()),sqrt(pow(dir_latep.y(),2)+pow(dir_latep.z(),2))*sin(angle6),0); + angle6 = tempV5.angle(drift_dir_abs); + + if (angle1 <5./180.*3.1415926 || angle2 < 5./180.*3.1415926 || angle3 < 5./180.*3.1415926){ + // flag_prol = true; + + for (size_t j=0;j!=live_clusters.size();j++){ + auto cluster_2 = live_clusters.at(j); + if (!cluster_2->get_scope_filter(scope)) continue; + + if (used_clusters.find(cluster_2)!=used_clusters.end()) continue; + if (cluster_2==cluster_1) continue; + if (Clustering_4th_prol(*cluster_1,*cluster_2,cluster_2->get_length(),earliest_p,dir_earlp,length_cut)){ + // to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); + boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], + ilive2desc[map_cluster_index[cluster_2]], g); + + + + if (cluster_2->get_length()<10*units::cm) + used_clusters.insert(cluster_2); + } + } + } + + if (angle4<5./180.*3.1415926 || angle5 < 5./180.*3.1415926 || angle6 < 5./180.*3.1415926){ + + // flag_prol = true; + for (size_t j=0;j!=live_clusters.size();j++){ + auto cluster_2 = live_clusters.at(j); + if (!cluster_2->get_scope_filter(scope)) continue; + if (used_clusters.find(cluster_2)!=used_clusters.end()) continue; + if (cluster_2==cluster_1) continue; + if (Clustering_4th_prol(*cluster_1,*cluster_2,cluster_2->get_length(),latest_p,dir_latep,length_cut)){ + //to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); + boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], + ilive2desc[map_cluster_index[cluster_2]], g); + + + if (cluster_2->get_length()<10*units::cm) + used_clusters.insert(cluster_2); + } + } + } + }else if (flag==2){ // parallel case ... + std::tie(highest_p, lowest_p) = cluster_1->get_highest_lowest_points(); + + highest_p = cluster_1->calc_ave_pos(highest_p,5*units::cm); + geo_point_t dir_highp = cluster_1->vhough_transform(highest_p,100*units::cm); + + lowest_p = cluster_1->calc_ave_pos(lowest_p,5*units::cm); + geo_point_t dir_lowp = cluster_1->vhough_transform(lowest_p, 100*units::cm); + + if (fabs(dir_highp.angle(drift_dir_abs)-3.1415926/2.)<5/180.*3.1415926){ + // flag_para = true; + + for (size_t j=0;j!=live_clusters.size();j++){ + auto cluster_2 = live_clusters.at(j); + if (!cluster_2->get_scope_filter(scope)) continue; + if (used_clusters.find(cluster_2)!=used_clusters.end()) continue; + if (cluster_2==cluster_1) continue; + + if (Clustering_4th_para(*cluster_1,*cluster_2,cluster_1->get_length(),cluster_2->get_length(),highest_p,dir_highp,length_cut)){ + //to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); + boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], + ilive2desc[map_cluster_index[cluster_2]], g); + + + + if (cluster_2->get_length()<15*units::cm) + used_clusters.insert(cluster_2); + } + } + } + + if (fabs(dir_lowp.angle(drift_dir_abs)-3.1415926/2.)<5/180.*3.1415926 ){ + // flag_para = true; + + for (size_t j=0;j!=live_clusters.size();j++){ + auto cluster_2 = live_clusters.at(j); + if (!cluster_2->get_scope_filter(scope)) continue; + if (cluster_2==cluster_1) continue; + if (Clustering_4th_para(*cluster_1,*cluster_2,cluster_1->get_length(),cluster_2->get_length(),lowest_p,dir_lowp,length_cut)){ + // to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); + boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], + ilive2desc[map_cluster_index[cluster_2]], g); + + + } + } + + } + }else if (flag==3){ // regular case ... + auto hl_ps = cluster_1->get_highest_lowest_points(); + auto el_ps = cluster_1->get_earliest_latest_points(); - geo_point_t drift_dir(1, 0, 0); // assuming the drift direction is along X ... - const auto [angle_u,angle_v,angle_w] = cluster_1.grouping()->wire_angles(); + geo_point_t first_p, second_p; + + if (pow(hl_ps.first.x()-hl_ps.second.x(),2)+pow(hl_ps.first.y()-hl_ps.second.y(),2)+pow(hl_ps.first.z()-hl_ps.second.z(),2) > pow(el_ps.first.x()-el_ps.second.x(),2)+pow(el_ps.first.y()-el_ps.second.y(),2)+pow(el_ps.first.z()-el_ps.second.z(),2)){ + first_p = hl_ps.first; + second_p = hl_ps.second; + }else{ + first_p = el_ps.first; + second_p = el_ps.second; + } - if (dis1 > 15*units::cm && dis < 3*units::cm && length_2 > 80*units::cm &&length_1>80*units::cm) return false; - - if (dis < length_cut && (length_2 >= 40*units::cm || dis < 3*units::cm)){ - geo_point_t cluster1_ave_pos = cluster_1.calc_ave_pos(p1,5*units::cm); - geo_point_t cluster2_ave_pos = cluster_2.calc_ave_pos(p2,5*units::cm); - geo_point_t dir1; - - if (cluster_1.nnearby(cluster1_ave_pos, 30*units::cm)>50 && length_1 < 120*units::cm){ - dir1 = cluster_1.vhough_transform(cluster1_ave_pos,30*units::cm); - }else{ - dir1 = cluster_1.vhough_transform(cluster1_ave_pos,80*units::cm); - } + for (size_t j=0;j!=live_clusters.size();j++){ + auto cluster_2 = live_clusters.at(j); + if (!cluster_2->get_scope_filter(scope)) continue; - geo_point_t dir3; - if (cluster_2.nnearby(cluster2_ave_pos, 30*units::cm)>50&&length_2 < 120*units::cm){ - dir3 = cluster_2.vhough_transform(cluster2_ave_pos,30*units::cm); - }else{ - dir3 = cluster_2.vhough_transform(cluster2_ave_pos,80*units::cm); - } + if (used_clusters.find(cluster_2)!=used_clusters.end()) continue; + if (cluster_2==cluster_1) continue; + + if (Clustering_4th_reg(*cluster_1,*cluster_2,cluster_1->get_length(),cluster_2->get_length(),first_p,length_cut, wpid_U_dir, wpid_V_dir, wpid_W_dir, dv)){ + // to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); + boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], + ilive2desc[map_cluster_index[cluster_2]], g); - geo_point_t dir2(cluster2_ave_pos.x() - cluster1_ave_pos.x(), - cluster2_ave_pos.y() - cluster1_ave_pos.y(), - cluster2_ave_pos.z() - cluster1_ave_pos.z()); - - double ave_dis = sqrt(pow(cluster1_ave_pos.x()-cluster2_ave_pos.x(),2) + pow(cluster1_ave_pos.y()-cluster2_ave_pos.y(),2) + pow(cluster1_ave_pos.z()-cluster2_ave_pos.z(),2)); - geo_point_t test_point; - double min_dis = 1e9, max_dis = -1e9; + if (cluster_2->get_length()<10*units::cm) + used_clusters.insert(cluster_2); + + }else if (Clustering_4th_reg(*cluster_1,*cluster_2,cluster_1->get_length(),cluster_2->get_length(),second_p,length_cut, wpid_U_dir, wpid_V_dir, wpid_W_dir, dv)){ + //to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); + boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], + ilive2desc[map_cluster_index[cluster_2]], g); - if (dir2.angle(dir1)>3.1415926/2. ){ - - for (int i=-5;i!=10;i++){ - test_point.set(cluster1_ave_pos.x() - dir1.x() * (ave_dis +i*2*units::cm), cluster1_ave_pos.y() - dir1.y() * (ave_dis +i*2*units::cm), cluster1_ave_pos.z() - dir1.z() * (ave_dis +i*2*units::cm)); - - auto temp_results = cluster_2.get_closest_point_blob(test_point); - //reuse this - geo_point_t test_point1 = temp_results.first; - if (sqrt(pow(test_point1.x()-test_point.x(),2)+pow(test_point1.y()-test_point.y(),2)+pow(test_point1.z()-test_point.z(),2))<1.5*units::cm){ - double temp_dis = (test_point1.x() - cluster1_ave_pos.x())*dir1.x() + (test_point1.y() - cluster1_ave_pos.y())*dir1.y() + (test_point1.z() - cluster1_ave_pos.z())*dir1.z(); - temp_dis *=-1; - if (temp_dis < min_dis) min_dis = temp_dis; - if (temp_dis > max_dis) max_dis = temp_dis; - } - } - if ((max_dis - min_dis)>2.5*units::cm) return true; - } + if (cluster_2->get_length()<10*units::cm) + used_clusters.insert(cluster_2); + } + + } - if (dir2.angle(dir3)<3.1415926/2.){ - - // look at the other side (repeat) - // cluster2_ave_pos, dir2 - min_dis = 1e9; - max_dis = -1e9; - for (int i=-5;i!=10;i++){ - test_point.set(cluster2_ave_pos.x() - dir3.x() * (ave_dis +i*2*units::cm), cluster2_ave_pos.y() - dir3.y() * (ave_dis +i*2*units::cm), cluster2_ave_pos.z() - dir3.z() * (ave_dis +i*2*units::cm)); - auto temp_results = cluster_1.get_closest_point_blob(test_point); - //reuse this - geo_point_t test_point1 = temp_results.first; - if (sqrt(pow(test_point1.x()-test_point.x(),2)+pow(test_point1.y()-test_point.y(),2)+pow(test_point1.z()-test_point.z(),2))<1.5*units::cm){ - double temp_dis = (test_point1.x() - cluster2_ave_pos.x())*dir3.x() + (test_point1.y() - cluster2_ave_pos.y())*dir3.y() + (test_point1.z() - cluster2_ave_pos.z())*dir3.z(); - temp_dis *=-1; - if (temp_dis < min_dis) min_dis = temp_dis; - if (temp_dis > max_dis) max_dis = temp_dis; - } + }else if (flag==4){ + // if (cluster_connected_dead.find(cluster_1)!=cluster_connected_dead.end()){ + if (cluster_1->get_flag(Flags::live_dead)) { + used_clusters.insert(cluster_1); + for (size_t j=0;j!=live_clusters.size();j++){ + auto cluster_2 = live_clusters.at(j); + if (!cluster_2->get_scope_filter(scope)) continue; + + if (cluster_2->get_length() < length_2_cut) continue; + if (used_clusters.find(cluster_2)!=used_clusters.end()) continue; + if (Clustering_4th_dead(*cluster_1,*cluster_2,cluster_1->get_length(),cluster_2->get_length(),length_cut,num_dead_try, wpid_U_dir, wpid_V_dir, wpid_W_dir, dv)){ + // to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); + boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], + ilive2desc[map_cluster_index[cluster_2]], g); + + + if (cluster_2->get_length()<10*units::cm) + used_clusters.insert(cluster_2); + } + } + } } - - if ((max_dis - min_dis)>2.5*units::cm) return true; } - - }else if (dis < 2 * length_cut && length_2 < 40*units::cm){ + } - // pronlonged case for U 3 and V 4 ... - geo_point_t U_dir(0,cos(angle_u),sin(angle_u)); - geo_point_t V_dir(0,cos(angle_v),sin(angle_v)); - - geo_point_t dir2(p2.x()-p1.x(),p2.y()-p1.y(),p2.z()-p1.z()); - bool flag_para = false, flag_prol =false, flag_reg = false; - - double angle1 = fabs(dir2.angle(drift_dir)-3.1415926/2.)/3.1415926*180.; - - if (angle1 < 5 && dis < 2*length_cut || angle1 < 2 ){ - flag_para = true; - }else if (dis < 2*length_cut){ - geo_point_t tempV1(0, p2.y() - p1.y(), p2.z() - p1.z()); - geo_point_t tempV5; - double angle2 = tempV1.angle(U_dir); - tempV5.set(fabs(p2.x()-p1.x()),sqrt(pow(p2.y() - p1.y(),2)+pow(p2.z() - p1.z(),2))*sin(angle2),0); - angle2 = tempV5.angle(drift_dir)/3.1415926*180.; - - double angle3 = tempV1.angle(V_dir); - tempV5.set(fabs(p2.x()-p1.x()),sqrt(pow(p2.y() - p1.y(),2)+pow(p2.z() - p1.z(),2))*sin(angle3),0); - angle3 = tempV5.angle(drift_dir)/3.1415926*180.; - if (angle2<7.5 || angle3 < 7.5) - flag_prol = true; + merge_clusters(g, live_grouping); - } + // { + // auto live_clusters = live_grouping.children(); // copy + // // Process each cluster + // for (size_t iclus = 0; iclus < live_clusters.size(); ++iclus) { + // Cluster* cluster = live_clusters.at(iclus); + // auto& scope = cluster->get_default_scope(); + // std::cout << "Test: " << iclus << " " << cluster->nchildren() << " " << scope.pcname << " " << scope.coords[0] << " " << scope.coords[1] << " " << scope.coords[2] << " " << cluster->get_scope_filter(scope)<< " " << cluster->get_pca().center) << std::endl; + // } + // } - if (flag_para || flag_prol || flag_reg){ - geo_point_t dir1; - if (cluster_1.nnearby(p1, 15*units::cm)>30 && (flag_prol ||flag_reg) ){ - dir1 = cluster_1.vhough_transform(p1,15*units::cm); - }else{ - dir1 = cluster_1.vhough_transform(p1,60*units::cm); - } - geo_point_t dir3; - if (cluster_2.nnearby(p2, 15*units::cm)>30 && (flag_prol || flag_reg)){ - dir3 = cluster_2.vhough_transform(p2,15*units::cm); - }else{ - dir3 = cluster_2.vhough_transform(p2,60*units::cm); - } - - double angle4 = (3.1415926-dir1.angle(dir2))/3.1415926*180.; - double angle5 = dir2.angle(dir3)/3.1415926*180.; - - if (flag_para && fabs(dir3.angle(drift_dir)-3.141592/2.)<10/180.*3.1415926 && fabs(dir1.angle(drift_dir)-3.141592/2.)<10/180.*3.1415926){ - if (angle4 < 30 && (length_2 < 12*units::cm && fabs(angle5-90.)>30 || angle5 < 45)) - return true; - }else if (flag_prol){ - if (angle4 < 25 && (length_2 < 15*units::cm && fabs(angle5-90.)>30 || angle5 < 25)) - return true; - } - if (fabs(dir2.angle(drift_dir)-3.1415926/2.)/3.1415926*180.>7.5){ - // non-parallel case ... - if (WireCell::PointCloud::Facade::is_angle_consistent(dir1,dir2,false,10,angle_u,angle_v,angle_w,2)){ - if (length_2 < 8*units::cm && WireCell::PointCloud::Facade::is_angle_consistent(dir1,dir2,false,5,angle_u,angle_v,angle_w,2)) - return true; - if (WireCell::PointCloud::Facade::is_angle_consistent(dir3,dir2,true,10,angle_u,angle_v,angle_w,2)){ - return true; - } - } - } - } - } - return false; } -std::vector> WireCell::PointCloud::Facade::get_strategic_points(const Cluster& cluster) { + + + + + + +std::vector> WireCell::Clus::Facade::get_strategic_points(const Cluster& cluster) { // Store unique points and their corresponding blobs std::set> unique_points; @@ -516,13 +884,13 @@ std::vector> WireCell::PointCloud::Facade::g // // 2. Add points based on PCA // { - // auto center = cluster.get_center(); + // auto center = cluster.get_pca().center); // unique_points.emplace(center, cluster.blob_with_point(cluster.get_closest_point_index(center))); // // Add points along principal axes // for (int i = 0; i < 3; ++i) { - // auto dir = cluster.get_pca_axis(i); - // auto value = sqrt(cluster.get_pca_value(i)); // Use eigenvalue for scale + // auto dir = cluster.get_pca().axis.at(i); + // auto value = sqrt(cluster.get_pca().values.at(i)); // Use eigenvalue for scale // // Add points in both directions along each principal axis // geo_point_t p1 = center + dir * value; @@ -577,7 +945,7 @@ std::vector> WireCell::PointCloud::Facade::g return points; } -double WireCell::PointCloud::Facade::Find_Closest_Points( +double WireCell::Clus::Facade::Find_Closest_Points( const Cluster& cluster1ref, const Cluster& cluster2ref, double length_1, @@ -769,189 +1137,6 @@ double WireCell::PointCloud::Facade::Find_Closest_Points( } -bool WireCell::PointCloud::Facade::Clustering_4th_dead( - const Cluster& cluster_1, - const Cluster& cluster_2, - double length_1, double length_2, double length_cut, int num_dead_try) -{ - geo_point_t drift_dir(1, 0, 0); // assuming the drift direction is along X ... - const auto [angle_u,angle_v,angle_w] = cluster_1.grouping()->wire_angles(); - - geo_point_t p1; - geo_point_t p2; - - double dis = Find_Closest_Points(cluster_1, cluster_2, length_1, length_2, length_cut, p1, p2); - - //add a special one ... for uboone ... - /* - if (length_1 > 30*units::cm && length_2 > 30*units::cm && - (dis < 3*units::cm || - fabs(p1.x()-p2.x()) < 1.6*units::cm && (dis < 20*units::cm && - p1.z() > 700.6*units::cm && p1.z() < 739.6*units::cm && // special region - p2.z() > 700.6*units::cm && p2.z() < 739.6*units::cm && // special region ... - p1.y() > -10.4*units::cm && p1.y() < 29*units::cm && - p2.y() > -10.4*units::cm && p2.y() < 29*units::cm ) - )){ - return true; - } - */ - - if ((dis < length_cut || (length_2 > 50*units::cm && dis < 80*units::cm))){ - - geo_point_t cluster1_ave_pos_save; - geo_point_t cluster2_ave_pos_save; - geo_point_t dir1_save; - geo_point_t dir3_save; - - for (int i=0;i!=num_dead_try;i++){ - geo_point_t cluster1_ave_pos; - geo_point_t cluster2_ave_pos; - - geo_point_t dir1; - geo_point_t dir3; - geo_point_t dir2; - - if (i==0){ - cluster1_ave_pos = cluster_1.calc_ave_pos(p1,5*units::cm); - cluster1_ave_pos_save = cluster1_ave_pos; - cluster2_ave_pos = cluster_2.calc_ave_pos(p2,5*units::cm); - cluster2_ave_pos_save = cluster2_ave_pos; - - if (num_dead_try==1){ - dir1 = cluster_1.vhough_transform(cluster1_ave_pos,20*units::cm); - dir3 = cluster_2.vhough_transform(cluster2_ave_pos,20*units::cm); - }else{ - dir1 = cluster_1.vhough_transform(cluster1_ave_pos,80*units::cm); - dir3 = cluster_2.vhough_transform(cluster2_ave_pos,80*units::cm); - } - dir1_save = dir1; - dir3_save = dir3; - - dir2.set(cluster2_ave_pos.x() - cluster1_ave_pos.x()+1e-9, cluster2_ave_pos.y() - cluster1_ave_pos.y()+1e-9, cluster2_ave_pos.z() - cluster1_ave_pos.z()+1e-9); // 2-1 - - }else if (i==1){ - if (length_2 >= 15*units::cm &&(!(length_2 > 150*units::cm && dis<15*units::cm))){ - cluster1_ave_pos = cluster1_ave_pos_save;//cluster_1->calc_ave_pos(p1,5*units::cm); - dir1 = dir1_save;//cluster_1->VHoughTrans(cluster1_ave_pos,80*units::cm); - - geo_vector_t dir_test(dir1); - dir_test = dir_test/dir_test.magnitude(); - dir_test = (-1) * dir_test; - - std::pair temp_results = cluster_2.get_closest_point_along_vec(cluster1_ave_pos, dir_test, dis*2, 5*units::cm, 15, 10*units::cm); - - if (temp_results.second < 100*units::cm){ - cluster2_ave_pos = cluster_2.calc_ave_pos(temp_results.first,5*units::cm); - dir3 = cluster_2.vhough_transform(cluster2_ave_pos,80*units::cm); - dir2.set(cluster2_ave_pos.x() - cluster1_ave_pos.x()+1e-9, cluster2_ave_pos.y() - cluster1_ave_pos.y()+1e-9, cluster2_ave_pos.z() - cluster1_ave_pos.z()+1e-9); // 2-1 - - }else{ - continue; - } - }else{ - continue; - } - }else if (i==2){ - if (length_2 >=15*units::cm&&(!(length_2 > 150*units::cm && dis<15*units::cm))){ - cluster2_ave_pos = cluster2_ave_pos_save;//cluster_2->calc_ave_pos(p2,5*units::cm); - dir3 = dir3_save;//cluster_2->VHoughTrans(cluster2_ave_pos,80*units::cm); - - geo_point_t dir_test(dir3); - dir_test = dir_test / dir_test.magnitude(); - dir_test = (-1) * dir_test; - - std::pair temp_results = cluster_1.get_closest_point_along_vec(cluster2_ave_pos, dir_test, dis*2, 5*units::cm, 15, 10*units::cm); - - if (temp_results.second < 100*units::cm){ - cluster1_ave_pos = cluster_1.calc_ave_pos(temp_results.first,5*units::cm); - dir1 = cluster_1.vhough_transform(cluster1_ave_pos,80*units::cm); - dir2.set(cluster2_ave_pos.x() - cluster1_ave_pos.x()+1e-9, cluster2_ave_pos.y() - cluster1_ave_pos.y()+1e-9, cluster2_ave_pos.z() - cluster1_ave_pos.z()+1e-9); // 2-1 - - }else{ - continue; - } - }else{ - continue; - } - } - - geo_point_t ave_dir(cluster1_ave_pos.x()-cluster2_ave_pos.x(),cluster1_ave_pos.y()-cluster2_ave_pos.y(),cluster1_ave_pos.z()-cluster2_ave_pos.z()); - - - // use projection to deal with stuff ... - if (fabs(ave_dir.angle(drift_dir)-3.1415926/2.)/3.1415926*180.>7.5){ - // non-parallel case ... - if (WireCell::PointCloud::Facade::is_angle_consistent(dir1,dir2,false,10,angle_u,angle_v,angle_w,2)){ - if (length_2 < 8*units::cm&& WireCell::PointCloud::Facade::is_angle_consistent(dir1,dir2,false,5,angle_u,angle_v,angle_w,2)) - return true; - if (length_2 < 15*units::cm && WireCell::PointCloud::Facade::is_angle_consistent(dir1,dir2,false,7.5,angle_u,angle_v,angle_w,2)) - return true; - if (WireCell::PointCloud::Facade::is_angle_consistent(dir3,dir2,true,10,angle_u,angle_v,angle_w,2)){ - return true; - } - } - } - - - double angle1 = (3.1415926-dir1.angle(dir2))/3.1415926*180.; - double angle2 = dir3.angle(dir2)/3.1415926*180.; - double angle3 = (3.1415926-dir1.angle(dir3))/3.1415926*180.; - - - if (length_2 <=10*units::cm){ - if (angle1 < 15 && (angle2 < 60 || length_2 < 5*units::cm) ) return true; - }else{ - if (angle1 < 15 && angle2 <15 && angle3 < 25 || - angle3 < 10 && (angle1+angle2)<45 && dis < 5*units::cm ) - return true; - - double ave_dis = sqrt(pow(cluster1_ave_pos.x()-cluster2_ave_pos.x(),2) + pow(cluster1_ave_pos.y()-cluster2_ave_pos.y(),2) + pow(cluster1_ave_pos.z()-cluster2_ave_pos.z(),2)); - geo_point_t test_point; - double min_dis = 1e9, max_dis = -1e9; - - if (fabs(ave_dir.angle(drift_dir)-3.1415926/2.)/3.1415926*180. > 7.5 && ave_dis < 30*units::cm){ - if (i==1){ - for (int k=-5;k!=10;k++){ - test_point.set(cluster1_ave_pos.x() - dir1.x() * (ave_dis +k*2*units::cm), cluster1_ave_pos.y() - dir1.y() * (ave_dis +k*2*units::cm), cluster1_ave_pos.z() - dir1.z() * (ave_dis +k*2*units::cm)); - - auto temp_results = cluster_2.get_closest_point_blob(test_point); - //reuse this - geo_point_t test_point1 = temp_results.first; - if (sqrt(pow(test_point1.x()-test_point.x(),2)+pow(test_point1.y()-test_point.y(),2)+pow(test_point1.z()-test_point.z(),2))<1.5*units::cm){ - double temp_dis = (test_point1.x() - cluster1_ave_pos.x())*dir1.x() + (test_point1.y() - cluster1_ave_pos.y())*dir1.y() + (test_point1.z() - cluster1_ave_pos.z())*dir1.z(); - temp_dis *=-1; - if (temp_dis < min_dis) min_dis = temp_dis; - if (temp_dis > max_dis) max_dis = temp_dis; - } - } - - if ((max_dis - min_dis)>2.5*units::cm) return true; - }else if (i==2){ - for (int k=-5;k!=10;k++){ - test_point.set(cluster2_ave_pos.x() - dir3.x() * (ave_dis +k*2*units::cm), cluster2_ave_pos.y() - dir3.y() * (ave_dis +k*2*units::cm), cluster2_ave_pos.z() - dir3.z() * (ave_dis +k*2*units::cm)); - - auto temp_results = cluster_1.get_closest_point_blob(test_point); - //reuse this - geo_point_t test_point1 = temp_results.first; - if (sqrt(pow(test_point1.x()-test_point.x(),2)+pow(test_point1.y()-test_point.y(),2)+pow(test_point1.z()-test_point.z(),2))<1.5*units::cm){ - double temp_dis = (test_point1.x() - cluster2_ave_pos.x())*dir3.x() + (test_point1.y() - cluster2_ave_pos.y())*dir3.y() + (test_point1.z() - cluster2_ave_pos.z())*dir3.z(); - temp_dis *=-1; - if (temp_dis < min_dis) min_dis = temp_dis; - if (temp_dis > max_dis) max_dis = temp_dis; - } - } - - if ((max_dis - min_dis)>2.5*units::cm) return true; - } - - } - - } - } - } - - return false; -} #pragma GCC diagnostic pop diff --git a/clus/src/clustering_isolated.cxx b/clus/src/clustering_isolated.cxx index eeff77ba2..88453bb00 100644 --- a/clus/src/clustering_isolated.cxx +++ b/clus/src/clustering_isolated.cxx @@ -1,12 +1,45 @@ -#include +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + +class ClusteringIsolated; +WIRECELL_FACTORY(ClusteringIsolated, ClusteringIsolated, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) using namespace WireCell; using namespace WireCell::Clus; -using namespace WireCell::Aux; -using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; +using namespace WireCell::Clus::Facade; using namespace WireCell::PointCloud::Tree; + +static void clustering_isolated( + Grouping& live_grouping, + IDetectorVolumes::pointer dv, + const Tree::Scope& scope + ); + +class ClusteringIsolated : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedScope { +public: + ClusteringIsolated() {} + virtual ~ClusteringIsolated() {} + + void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedScope::configure(config); + } + + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + return clustering_isolated(live, m_dv, m_scope); + } + +}; + + // The original developers do not care. #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wparentheses" @@ -22,20 +55,77 @@ using namespace WireCell::PointCloud::Tree; * @brief aims to organize clusters based on spatial relationships and merges those that meet specific proximity and size criteria. * @return large cluster -> {small cluster, distance} */ -void WireCell::PointCloud::Facade::clustering_isolated(Grouping& live_grouping) +// Handle all APA/Faces +static void clustering_isolated( + Grouping& live_grouping, + const IDetectorVolumes::pointer dv, + const Tree::Scope& scope +) { + // Get all the wire plane IDs from the grouping + const auto& wpids = live_grouping.wpids(); + // Key: pair, Value: drift_dir, angle_u, angle_v, angle_w + std::map> wpid_params; + std::set apas; + for (const auto& wpid : wpids) { + int apa = wpid.apa(); + int face = wpid.face(); + apas.insert(apa); + + // Create wpids for all three planes with this APA and face + WirePlaneId wpid_u(kUlayer, face, apa); + WirePlaneId wpid_v(kVlayer, face, apa); + WirePlaneId wpid_w(kWlayer, face, apa); + + // Get drift direction based on face orientation + int face_dirx = dv->face_dirx(wpid_u); + geo_point_t drift_dir(face_dirx, 0, 0); + + // Get wire directions for all planes + Vector wire_dir_u = dv->wire_direction(wpid_u); + Vector wire_dir_v = dv->wire_direction(wpid_v); + Vector wire_dir_w = dv->wire_direction(wpid_w); + + // Calculate angles + double angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + double angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + double angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + wpid_params[wpid] = std::make_tuple(drift_dir, angle_u, angle_v, angle_w); + } + std::vector live_clusters = live_grouping.children(); // copy // sort the clusters by length using a lambda function (sort from small to large clusters ... ) std::sort(live_clusters.begin(), live_clusters.end(), [](const Cluster *cluster1, const Cluster *cluster2) { return cluster1->get_length() > cluster2->get_length(); }); + + for (auto& cluster : live_clusters) { + if (cluster->get_default_scope().hash() != scope.hash()) { + cluster->set_default_scope(scope); + // std::cout << "Test: Set default scope: " << pc_name << " " << coords[0] << " " << coords[1] << " " << coords[2] << " " << cluster->get_default_scope().hash() << " " << scope.hash() << std::endl; + } + } - const auto &mp = live_grouping.get_params(); + // const auto &mp = live_grouping.get_params(); // this is for 4 time slices - double time_slice_width = mp.nticks_live_slice * mp.tick_drift; - geo_point_t drift_dir(1, 0, 0); + // double time_slice_width = mp.nticks_live_slice * mp.tick_drift; + + // get wpids ... + std::map map_wpid_nticks_live_slice; + std::map map_wpid_time_slice_width; + for (const auto& wpid : wpids) { + map_wpid_nticks_live_slice[wpid] = dv->metadata(wpid)["nticks_live_slice"].asDouble() ; + map_wpid_time_slice_width[wpid] = dv->metadata(wpid)["nticks_live_slice"].asDouble() * dv->metadata(wpid)["tick_drift"].asDouble() ; + // std::cout << "Test: " << wpid << " " << map_wpid_nticks_live_slice[wpid] << " " << map_wpid_time_slice_width[wpid] << " " << mp.nticks_live_slice << " " << time_slice_width << std::endl; + } + + + // geo_point_t drift_dir(1, 0, 0); + // Get drift direction from the first element of wpid_params, + // in the current code, we do not care about the actual direction of drift_dir, so just picking up the first instance + geo_point_t drift_dir_abs(1,0,0); - // std::cout << mp.nticks_live_slice << std::endl; int range_cut = 150; int length_cut = 20 * units::cm; @@ -44,23 +134,32 @@ void WireCell::PointCloud::Facade::clustering_isolated(Grouping& live_grouping) std::vector small_clusters; for (size_t i = 0; i != live_clusters.size(); i++) { - std::tuple ranges_tuple = live_clusters.at(i)->get_uvwt_range(); - std::vector ranges = {std::get<0>(ranges_tuple), std::get<1>(ranges_tuple), std::get<2>(ranges_tuple), std::get<3>(ranges_tuple)}; - ranges.at(3) /= mp.nticks_live_slice; + if (!live_clusters.at(i)->get_scope_filter(scope)) continue; + auto map_wpid_uvwt_range = live_clusters.at(i)->get_uvwt_range(); + std::vector ranges(4, 0); // Initialize a vector with 4 zeros + + for (auto [wpid, uvwt_range] : map_wpid_uvwt_range) { + ranges.at(0) += std::get<0>(uvwt_range); + ranges.at(1) += std::get<1>(uvwt_range); + ranges.at(2) += std::get<2>(uvwt_range); + ranges.at(3) += std::get<3>(uvwt_range)/map_wpid_nticks_live_slice[wpid]; + } + // std::tuple ranges_tuple = live_clusters.at(i)->get_uvwt_range(); + // std::vector ranges = {std::get<0>(ranges_tuple), std::get<1>(ranges_tuple), std::get<2>(ranges_tuple), std::get<3>(ranges_tuple)}; + int max = 0; for (int j = 0; j != 4; j++) { if (ranges.at(j) > max) max = ranges.at(j); } - // std::cout << i << " " << live_clusters.at(i)->get_length()/units::cm << " " << live_clusters.at(i)->get_center() << " " << max << " " << range_cut << std::endl; + // std::cout << i << " " << live_clusters.at(i)->get_length()/units::cm << " " << live_clusters.at(i)->get_pca().center) << " " << max << " " << range_cut << std::endl; if (max < range_cut && live_clusters.at(i)->get_length() < length_cut) { small_clusters.push_back(live_clusters.at(i)); } else { if (live_clusters.at(i)->get_length() < 60 * units::cm) { - if (JudgeSeparateDec_1(live_clusters.at(i), drift_dir, live_clusters.at(i)->get_length(), - time_slice_width)) { + if (JudgeSeparateDec_1(live_clusters.at(i), drift_dir_abs, live_clusters.at(i)->get_length())) { // std::vector sep_clusters = Separate_2(live_clusters.at(i), 2.5 * units::cm); - const auto b2id = Separate_2(live_clusters.at(i), 2.5 * units::cm); + const auto b2id = Separate_2(live_clusters.at(i), scope, 2.5 * units::cm); std::set ids; for (const auto& id : b2id) { ids.insert(id); @@ -70,15 +169,16 @@ void WireCell::PointCloud::Facade::clustering_isolated(Grouping& live_grouping) double max_length = 0; // for (auto it = sep_clusters.begin(); it != sep_clusters.end(); it++) { for (const auto id : ids) { - // std::tuple ranges = (*it)->get_uvwt_range(); - // std::tuple ranges_tuple = (*it)->get_uvwt_range(); - std::tuple ranges_tuple = get_uvwt_range(live_clusters.at(i), b2id, id); - std::vector ranges = {std::get<0>(ranges_tuple), std::get<1>(ranges_tuple), std::get<2>(ranges_tuple), std::get<3>(ranges_tuple)}; - ranges.at(3) /= mp.nticks_live_slice; - // double length_1 = sqrt(2. / 3. * - // (pow(mp.pitch_u * ranges.at(0), 2) + pow(mp.pitch_v * ranges.at(1), 2) + - // pow(mp.pitch_w * ranges.at(2), 2)) + - // pow(time_slice_width * ranges.at(3), 2)); + auto map_wpid_uvwt_range = get_uvwt_range(live_clusters.at(i), b2id, id); + + std::vector ranges(4, 0); // Initialize a vector with 4 zeros + for (auto [wpid, uvwt_range] : map_wpid_uvwt_range) { + ranges.at(0) += std::get<0>(uvwt_range); + ranges.at(1) += std::get<1>(uvwt_range); + ranges.at(2) += std::get<2>(uvwt_range); + ranges.at(3) += std::get<3>(uvwt_range)/map_wpid_nticks_live_slice[wpid]; + } + double length_1 = get_length(live_clusters.at(i), b2id, id); for (int j = 0; j != 4; j++) { if (ranges.at(j) > max) { @@ -92,7 +192,7 @@ void WireCell::PointCloud::Facade::clustering_isolated(Grouping& live_grouping) // for (size_t j = 0; j != sep_clusters.size(); j++) { // delete sep_clusters.at(j); // } - // std::cout << i << " " << live_clusters.at(i)->get_length()/units::cm << " " << live_clusters.at(i)->get_center() << " " << max << " " << range_cut << std::endl; + // std::cout << i << " " << live_clusters.at(i)->get_length()/units::cm << " " << live_clusters.at(i)->get_pca().center) << " " << max << " " << range_cut << std::endl; if (max < range_cut && max_length < length_cut) { small_clusters.push_back(live_clusters.at(i)); @@ -136,7 +236,7 @@ void WireCell::PointCloud::Facade::clustering_isolated(Grouping& live_grouping) min_dis_cluster = big_cluster; } } - // std::cout << "SB: " << curr_cluster->get_length()/units::cm << " " << min_dis_cluster->get_length()/units::cm << " " << curr_cluster->get_center() << " " << min_dis_cluster->get_center() << " " << min_dis << " " << small_big_dis_cut << std::endl; + // std::cout << "SB: " << curr_cluster->get_length()/units::cm << " " << min_dis_cluster->get_length()/units::cm << " " << curr_cluster->get_pca().center) << " " << min_dis_cluster->get_pca().center) << " " << min_dis << " " << small_big_dis_cut << std::endl; if (min_dis < small_big_dis_cut) { @@ -163,7 +263,7 @@ void WireCell::PointCloud::Facade::clustering_isolated(Grouping& live_grouping) used_small_clusters.find(cluster2) != used_small_clusters.end() && used_small_clusters.find(cluster1) == used_small_clusters.end()) { to_be_merged_pairs.insert(std::make_pair(cluster1, cluster2)); - // std::cout << "SD: " << cluster1->get_length()/units::cm << " " << cluster2->get_length()/units::cm << " " << cluster1->get_center() << " " << cluster2->get_center() << std::endl; + // std::cout << "SD: " << cluster1->get_length()/units::cm << " " << cluster2->get_length()/units::cm << " " << cluster1->get_pca().center) << " " << cluster2->get_pca().center) << std::endl; used_small_clusters.insert(cluster1); used_small_clusters.insert(cluster2); } @@ -190,7 +290,7 @@ void WireCell::PointCloud::Facade::clustering_isolated(Grouping& live_grouping) std::tuple results = cluster2->get_closest_points(*cluster1); double dis = std::get<2>(results); if (dis < small_small_dis_cut) { - // std::cout << "SS: "<< cluster1->get_length()/units::cm << " " << cluster2->get_length()/units::cm << " " << cluster1->get_center() << " " << cluster2->get_center() << std::endl; + // std::cout << "SS: "<< cluster1->get_length()/units::cm << " " << cluster2->get_length()/units::cm << " " << cluster1->get_pca().center) << " " << cluster2->get_pca().center) << std::endl; to_be_merged_pairs.insert(std::make_pair(cluster1, cluster2)); } } @@ -216,7 +316,7 @@ void WireCell::PointCloud::Facade::clustering_isolated(Grouping& live_grouping) // // cloud2 = cluster2->get_point_cloud(); // if (used_big_clusters.find(cluster2) != used_big_clusters.end()) continue; // // cluster2->Calc_pca(); - // pca_ratio = cluster2->get_pca_value(1) / cluster2->get_pca_value(0); + // pca_ratio = cluster2->get_pca().values.at(1) / cluster2->get_pca().values.at(0); // small_cluster_length = cluster2->get_length(); // } // else { @@ -224,13 +324,13 @@ void WireCell::PointCloud::Facade::clustering_isolated(Grouping& live_grouping) // // cloud2 = cluster1->get_point_cloud(); // if (used_big_clusters.find(cluster1) != used_big_clusters.end()) continue; // // cluster1->Calc_pca(); - // pca_ratio = cluster1->get_pca_value(1) / cluster1->get_pca_value(0); + // pca_ratio = cluster1->get_pca().values.at(1) / cluster1->get_pca().values.at(0); // small_cluster_length = cluster1->get_length(); // } // make sure cluster1 is the longer one if (!(cluster1->get_length() > cluster2->get_length())) std::swap(cluster1, cluster2); if (used_big_clusters.find(cluster2) != used_big_clusters.end()) continue; - pca_ratio = cluster2->get_pca_value(1) / cluster2->get_pca_value(0); + pca_ratio = cluster2->get_pca().values.at(1) / cluster2->get_pca().values.at(0); small_cluster_length = cluster2->get_length(); // std::tuple results = cloud2->get_closest_points(cloud1); @@ -246,7 +346,7 @@ void WireCell::PointCloud::Facade::clustering_isolated(Grouping& live_grouping) // WCP::WCPointCloud &cloud = cloud2->get_cloud(); for (int k = 0; k != N; k++) { // Point test_p1(cloud.pts[k].x, cloud.pts[k].y, cloud.pts[k].z); - geo_point_t test_p1 = cluster2->point(k); + geo_point_t test_p1 = cluster2->point3d(k); // double close_dis = cloud1->get_closest_dis(test_p1); double close_dis = cluster1->get_closest_dis(test_p1); if (close_dis > big_dis_range_cut) { @@ -267,7 +367,7 @@ void WireCell::PointCloud::Facade::clustering_isolated(Grouping& live_grouping) if (flag_merge) { to_be_merged_pairs.insert(std::make_pair(cluster1, cluster2)); - // std::cout << "BB: " << cluster1->get_length()/units::cm << " " << cluster2->get_length()/units::cm << " " << cluster1->get_center() << " " << cluster2->get_center() << std::endl; + // std::cout << "BB: " << cluster1->get_length()/units::cm << " " << cluster2->get_length()/units::cm << " " << cluster1->get_pca().center) << " " << cluster2->get_pca().center) << std::endl; if (cluster1->get_length() < cluster2->get_length()) { used_big_clusters.insert(cluster1); @@ -386,8 +486,55 @@ void WireCell::PointCloud::Facade::clustering_isolated(Grouping& live_grouping) } } cluster_set_t temp_clusters; - merge_clusters(g, live_grouping, temp_clusters, "isolated"); + merge_clusters(g, live_grouping, "isolated"); } + // example separation ... + // { + // auto live_clusters = live_grouping.children(); // copy + // for (size_t iclus = 0; iclus < live_clusters.size(); ++iclus) { + // if (!live_clusters.at(iclus)->get_scope_filter(scope)) continue; + + // auto cc = live_clusters.at(iclus)->get_pcarray("isolated", "perblob"); + // // convert span to vector + // std::vector cc_vec(cc.begin(), cc.end()); + // // for (const auto& val : cc_vec) { + // // std::cout << val << " "; + // // } + // // std::cout << std::endl; + // if (cc_vec.size() < 2) continue; + // auto scope = live_clusters.at(iclus)->get_default_scope(); + // auto scope_transform = live_clusters.at(iclus)->get_scope_transform(scope); + // // // origi_cluster still have the original main cluster ... + // // std::cout << "Start: " << orig_cluster->kd_blobs().size() << " " << orig_cluster->nchildren() << std::endl; + // auto splits = live_grouping.separate(live_clusters.at(iclus), cc_vec); + // // std::cout << "Mid: " << orig_cluster->kd_blobs().size() << " " << orig_cluster->nchildren() << std::endl; + + // // Apply the scope filter settings to all new clusters + // for (auto& [id, new_cluster] : splits) { + // new_cluster->set_scope_filter(scope, true); + // new_cluster->set_default_scope(scope); + // new_cluster->set_scope_transform(scope,scope_transform); + // } + // } + // } + + + // { + // auto live_clusters = live_grouping.children(); // copy + // // Process each cluster + // for (size_t iclus = 0; iclus < live_clusters.size(); ++iclus) { + // Cluster* cluster = live_clusters.at(iclus); + // auto& scope = cluster->get_default_scope(); + // std::cout << "Test: " << iclus << " " << cluster->nchildren() << " " << scope.pcname << " " << scope.coords[0] << " " << scope.coords[1] << " " << scope.coords[2] << " " << cluster->get_scope_filter(scope)<< " " << cluster->get_pca().center) << std::endl; + // } + // } + + + + + + + return; } diff --git a/clus/src/clustering_live_dead.cxx b/clus/src/clustering_live_dead.cxx index 97d1bd269..db9cc04ef 100644 --- a/clus/src/clustering_live_dead.cxx +++ b/clus/src/clustering_live_dead.cxx @@ -1,278 +1,263 @@ -#include -#include "WireCellUtil/ExecMon.h" +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + +class ClusteringLiveDead; +WIRECELL_FACTORY(ClusteringLiveDead, ClusteringLiveDead, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) + #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wparentheses" using namespace WireCell; using namespace WireCell::Clus; -using namespace WireCell::Aux; -using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; +using namespace WireCell::Clus::Facade; using namespace WireCell::PointCloud::Tree; + +class ClusteringLiveDead : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedScope { + int dead_live_overlap_offset_{2}; +public: + ClusteringLiveDead() {} + virtual ~ClusteringLiveDead() {} + + virtual void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedScope::configure(config); + + dead_live_overlap_offset_ = get(config, "dead_live_overlap_offset", 2); + } + virtual Configuration default_configuration() const { + Configuration cfg; + return cfg; + } + -void WireCell::PointCloud::Facade::clustering_live_dead( - Grouping& live_grouping, - const Grouping& dead_grouping, - cluster_set_t& cluster_connected_dead, // in/out - const int dead_live_overlap_offset // specific params -) -{ - using spdlog::debug; - - // form map from dead to set of live clusters ... - std::map> dead_live_cluster_mapping; - std::vector dead_cluster_order; - std::map>> dead_live_mcells_mapping; - - std::vector live_clusters = live_grouping.children(); // copy - std::sort(live_clusters.begin(), live_clusters.end(), [](const Cluster *cluster1, const Cluster *cluster2) { - return cluster1->get_length() > cluster2->get_length(); - }); - // sort_clusters(live_clusters); - - // debug block, free to remove it - // { - // std::set seen; - // for (size_t ind=0; indis_connected(*dead, dead_live_overlap_offset); - // - if (blobs.size() > 0) { - // if (dead_live_cluster_mapping.find(dead) == dead_live_cluster_mapping.end()){ - if (dead_live_cluster_mapping.find(dead) == dead_live_cluster_mapping.end()) { - dead_cluster_order.push_back(dead); + virtual void visit(Ensemble& ensemble) const { + using spdlog::debug; + + auto& dead_grouping = *ensemble.with_name("dead").at(0); + + if (dead_grouping.nchildren() == 0) { + // No dead, no live dead. *taps temple* + return; + } + + auto& live_grouping = *ensemble.with_name("live").at(0); + + + // { + // auto wpids = live_grouping.wpids(); + // for (const auto& wpid : wpids) { + // int apa = wpid.apa(); + // int face = wpid.face(); + + // std::cout << "Test Number of Points: " << apa << " " << face << " " << live_grouping.get_num_points(apa, face, 0) << " " << live_grouping.get_num_points(apa, face, 1) << " " << live_grouping.get_num_points(apa, face, 2) << std::endl; + + // } + // } + + // check if the grouping's wpid ... + //std::cout << "Live: " << live_grouping.wpids().size() << " " << dead_grouping.wpids().size() << std::endl; + + // Check that groupings has less than one wpid + if (live_grouping.wpids().size() > 1 || dead_grouping.wpids().size() > 1) { + for (const auto& wpid : live_grouping.wpids()) { + std::cout << "Live grouping wpid: " << wpid.name() << std::endl; } - dead_live_cluster_mapping[dead].push_back(live); - dead_live_mcells_mapping[dead].push_back(blobs); - //} - } - } - } + for (const auto& wpid : dead_grouping.wpids()) { + std::cout << "Dead grouping wpid: " << wpid.name() << std::endl; + } + raise("Live %d > 1, Dead %d > 1", live_grouping.wpids().size(), dead_grouping.wpids().size()); + } + auto [drift_dir, angle_u, angle_v, angle_w] = extract_geometry_params(live_grouping, m_dv); + - // // debug code fine-grain debugging - // for (auto it = dead_live_cluster_mapping.begin(); it!= dead_live_cluster_mapping.end(); it++){ - // if (it->second.size()>1){ - // int nlength = 0; - // const Cluster *temp_cluster = 0; - // std::cerr << "Xin: " << it->second.size() << " "; - // for (auto it1 = it->second.begin(); it1 != it->second.end(); it1++){ - // std::cout << (*it1)->get_length()/units::cm << " "; - // if ((*it1)->get_length() > 100*units::cm){ - // nlength ++; - // //temp_cluster = *it1; - // if ((*it1)->get_length() > 200*units::cm) temp_cluster = *it1; - // } - // else temp_cluster = *it1; - // } - // std::cout << std::endl; - // // test a special case ... - // if (nlength==2 && it->second.size()==3){ - // std::cout << (*it->first) << " " << temp_cluster << " " << temp_cluster->get_length()/units::cm << std::endl; - // std::cout << "Dead: " << std::endl; - // it->first->print_blobs_info(); - // std::cout << "Live: " << std::endl; - // temp_cluster->print_blobs_info(); - // std::cout << dead_live_overlap_offset << std::endl; - // //auto blob = temp_cluster->get_first_blob(); - // //std::cout << "U: " << blob->u_wire_index_min() << " " << blob->u_wire_index_max() - // // << " V: " << blob->v_wire_index_min() << " " << blob->v_wire_index_max() - // // << " W: " << blob->w_wire_index_min() << " " << blob->w_wire_index_max() - // // << " T: " << blob->slice_index_min() << " " << blob->slice_index_max() - // // << std::endl; - // // auto points = blob->points(); - - // // for (auto it1 = points.begin();it1!=points.end();it1++){ - // // std::cout << (*it1).x() << " " << (*it1).y() << " " << (*it1).z() << std::endl; - // // } - // //live->is_connected(*dead, dead_live_overlap_offset, true); - // } - // } - // } - // //debug end ... - - - - - if (dead_live_cluster_mapping.empty()) { - std::cerr - << "WARNING: clustering_live: empty dead live cluster mapping," - << " ndead=" << dead_clusters.size() - << " nlive=" << live_clusters.size() << std::endl; - } - // prepare a graph ... - typedef cluster_connectivity_graph_t Graph; + // form map from dead to set of live clusters ... + std::map> dead_live_cluster_mapping; + std::vector dead_cluster_order; + std::map>> dead_live_mcells_mapping; - Graph g; - std::unordered_map ilive2desc; // added live index to graph descriptor - std::map map_cluster_index; - for (const Cluster* live : live_grouping.children()) { - size_t ilive = map_cluster_index.size(); - map_cluster_index[live] = ilive; - ilive2desc[ilive] = boost::add_vertex(ilive, g); - } + std::vector live_clusters = live_grouping.children(); // copy - std::set > tested_pairs; - - // start to form edges ... - for (const auto& the_dead_cluster : dead_cluster_order) { - // for (auto it = dead_live_cluster_mapping.begin(); it != dead_live_cluster_mapping.end(); it++) { - // const auto& the_dead_cluster = (*it).first; - const auto& connected_live_clusters = dead_live_cluster_mapping[the_dead_cluster]; - const auto& connected_live_mcells = dead_live_mcells_mapping[the_dead_cluster]; - - if (connected_live_clusters.size() > 1) { - // std::cerr << "xin " << connected_live_clusters.size() << " " << connected_live_mcells.size() - // << std::endl; - - for (size_t i = 0; i != connected_live_clusters.size(); i++) { - const auto& cluster_1 = connected_live_clusters.at(i); - const auto& blobs_1 = connected_live_mcells.at(i); - cluster_connected_dead.insert(cluster_1); - - for (size_t j = i + 1; j < connected_live_clusters.size(); j++) { - const auto& cluster_2 = connected_live_clusters.at(j); - // const auto& blobs_2 = connected_live_mcells.at(j); - - // std::cerr << "xin1 " << i << " " << j << " " << blobs_1.size() << " " << - // blobs_2.size() - // << std::endl; - - if (tested_pairs.find(std::make_pair(cluster_1, cluster_2)) == tested_pairs.end()) { - tested_pairs.insert(std::make_pair(cluster_1, cluster_2)); - tested_pairs.insert(std::make_pair(cluster_2, cluster_1)); - - bool flag_merge = false; - - // std::cout << "test: " << cluster_1->get_length()/units::cm << " " << cluster_2->get_length()/units::cm << std::endl; - - const Blob* prev_mcell1 = 0; - const Blob* prev_mcell2 = 0; - const Blob* mcell1 = blobs_1.at(0); - const Blob* mcell2 = 0; - - geo_point_t p1 = mcell1->center_pos(); - std::tie(p1, mcell1) = cluster_1->get_closest_point_blob(p1); - // p1 = temp_pair.first; - // mcell1 = temp_pair.second; - geo_point_t p2(0, 0, 0); - while (mcell1 != prev_mcell1 || mcell2 != prev_mcell2) { - prev_mcell1 = mcell1; - prev_mcell2 = mcell2; - - std::tie(p2, mcell2) = cluster_2->get_closest_point_blob(p1); - std::tie(p1, mcell1) = cluster_1->get_closest_point_blob(p2); - } - geo_point_t diff = p1 - p2; - double dis = diff.magnitude(); - - // std::cout << p1 << " " << p2 << " " << dis/units::cm << std::endl; - - if (dis < 60 * units::cm) { - const double length_1 = cluster_1->get_length(); - const double length_2 = cluster_2->get_length(); - - geo_point_t mcell1_center = cluster_1->calc_ave_pos(p1, 5 * units::cm); - geo_point_t dir1 = cluster_1->vhough_transform(mcell1_center, 30 * units::cm); - // protection against angles ... - geo_point_t dir5 = cluster_1->vhough_transform(p1, 30 * units::cm); - if (dir1.angle(dir5) > 120 / 180. * 3.1415926) dir1 = dir1 * (-1); - - geo_point_t mcell2_center = cluster_2->calc_ave_pos(p2, 5 * units::cm); - geo_point_t dir3 = cluster_2->vhough_transform(mcell2_center, 30 * units::cm); - // Protection against angles - geo_point_t dir6 = cluster_2->vhough_transform(p2, 30 * units::cm); - if (dir3.angle(dir6) > 120 / 180. * 3.1415926) dir3 = dir3 * (-1); - - geo_point_t dir2 = mcell2_center - mcell1_center; - geo_point_t dir4 = mcell1_center - mcell2_center; - - double angle_diff1 = (3.1415926 - dir1.angle(dir2)) / 3.1415926 * 180.; // 1 to 2 - double angle_diff2 = (3.1415926 - dir3.angle(dir4)) / 3.1415926 * 180.; // 2 to 1 - double angle_diff3 = (3.1415926 - dir1.angle(dir3)) / 3.1415926 * 180.; // 1 to 2 - - // geo_point_t p3(317.6*units::cm,-98.9*units::cm,927*units::cm); - // p3 = p3 - p1; - // if (p3.magnitude() < 20*units::cm){ - // std::cout << "xin4 " << length_1 / units::cm << " " << length_2 / units::cm - // << " " << std::endl; - // std::cout << "xin5 " << p1 << " " << p2 << " " << mcell1_center << " " << - // mcell2_center - // << " " << dir1 << " " << dir3 << " " << angle_diff1 << " " << angle_diff2 - // << " " << angle_diff3 << std::endl; - - // } - - bool flag_para = false; - - double angle1, angle2, angle3; - if (!flag_merge) { - geo_point_t drift_dir(1, 0, 0); // assuming the drift direction is along X ... - angle1 = dir1.angle(drift_dir); - angle2 = dir2.angle(drift_dir); - angle3 = dir3.angle(drift_dir); - - const auto [angle_u, angle_v, angle_w] = cluster_1->grouping()->wire_angles(); - if (fabs(angle1 - 3.1415926 / 2.) < 5 / 180. * 3.1415926 && - fabs(angle2 - 3.1415926 / 2.) < 5 / 180. * 3.1415926 && - fabs(angle3 - 3.1415926 / 2.) < 5 / 180. * 3.1415926) { - if (dis < 10 * units::cm) // if very parallel and close, merge any way - flag_merge = true; - } + for (auto& cluster : live_clusters) { + if (cluster->get_default_scope().hash() != m_scope.hash()) { + cluster->set_default_scope(m_scope); + // std::cout << "Test: Set default scope: " << m_scope.pcname << " " << m_scope.coords[0] << " " << m_scope.coords[1] << " " << m_scope.coords[2] << " " << cluster->get_default_scope().hash() << " " << m_scope.hash() << std::endl; + } + } - if (fabs(angle2 - 3.1415926 / 2.) < 7.5 / 180. * 3.1415926 && - (fabs(angle1 - 3.1415926 / 2.) < 7.5 / 180. * 3.1415926 || - fabs(angle3 - 3.1415926 / 2.) < 7.5 / 180. * 3.1415926) && - fabs(angle1 - 3.1415926 / 2.) + fabs(angle2 - 3.1415926 / 2.) + - fabs(angle3 - 3.1415926 / 2.) < - 25 / 180. * 3.1415926) { - flag_para = true; - if (WireCell::PointCloud::Facade::is_angle_consistent( - dir1, dir2, false, 15, angle_u, angle_v, angle_w, 3) && - WireCell::PointCloud::Facade::is_angle_consistent( - dir3, dir2, true, 15, angle_u, angle_v, angle_w, 3)) - flag_merge = true; - } - else { - bool flag_const1 = WireCell::PointCloud::Facade::is_angle_consistent( - dir1, dir2, false, 10, angle_u, angle_v, angle_w, 2); - bool flag_const2 = WireCell::PointCloud::Facade::is_angle_consistent( - dir3, dir2, true, 10, angle_u, angle_v, angle_w, 2); - - if (flag_const1 && flag_const2) { - flag_merge = true; + std::sort(live_clusters.begin(), live_clusters.end(), [](const Cluster *cluster1, const Cluster *cluster2) { + return cluster1->get_length() > cluster2->get_length(); + }); + // sort_clusters(live_clusters); + + auto dead_clusters = dead_grouping.children(); // copy + sort_clusters(dead_clusters); + + for (size_t ilive = 0; ilive < live_clusters.size(); ++ilive) { + const auto& live = live_clusters.at(ilive); + if (!live->get_scope_filter(m_scope)) continue; + for (size_t idead = 0; idead < dead_clusters.size(); ++idead) { + const auto& dead = dead_clusters.at(idead); + + auto blobs = live->is_connected(*dead, dead_live_overlap_offset_); + if (blobs.size() > 0) { + if (dead_live_cluster_mapping.find(dead) == dead_live_cluster_mapping.end()) { + dead_cluster_order.push_back(dead); + } + dead_live_cluster_mapping[dead].push_back(live); + dead_live_mcells_mapping[dead].push_back(blobs); + } + } + } + + /// Does this really need to be announced? + // if (dead_live_cluster_mapping.empty()) { + // std::cerr + // << "WARNING: clustering_live: empty dead live cluster mapping," + // << " ndead=" << dead_clusters.size() + // << " nlive=" << live_clusters.size() << std::endl; + // } + + // prepare a graph ... + typedef cluster_connectivity_graph_t Graph; + + Graph g; + std::unordered_map ilive2desc; // added live index to graph descriptor + std::map map_cluster_index; + for (const Cluster* live : live_grouping.children()) { + size_t ilive = map_cluster_index.size(); + map_cluster_index[live] = ilive; + ilive2desc[ilive] = boost::add_vertex(ilive, g); + } + + std::set > tested_pairs; + + // start to form edges ... + for (const auto& the_dead_cluster : dead_cluster_order) { + const auto& connected_live_clusters = dead_live_cluster_mapping[the_dead_cluster]; + const auto& connected_live_mcells = dead_live_mcells_mapping[the_dead_cluster]; + + if (connected_live_clusters.size() > 1) { + + for (size_t i = 0; i != connected_live_clusters.size(); i++) { + auto cluster_1 = connected_live_clusters.at(i); + const auto& blobs_1 = connected_live_mcells.at(i); + // cluster_connected_dead.insert(cluster_1); + cluster_1->set_flag(Flags::live_dead); + for (size_t j = i + 1; j < connected_live_clusters.size(); j++) { + const auto& cluster_2 = connected_live_clusters.at(j); + + + if (tested_pairs.find(std::make_pair(cluster_1, cluster_2)) == tested_pairs.end()) { + tested_pairs.insert(std::make_pair(cluster_1, cluster_2)); + tested_pairs.insert(std::make_pair(cluster_2, cluster_1)); + + bool flag_merge = false; + const Blob* prev_mcell1 = 0; + const Blob* prev_mcell2 = 0; + const Blob* mcell1 = blobs_1.at(0); + const Blob* mcell2 = 0; + + geo_point_t p1 = mcell1->center_pos(); + std::tie(p1, mcell1) = cluster_1->get_closest_point_blob(p1); + // p1 = temp_pair.first; + // mcell1 = temp_pair.second; + geo_point_t p2(0, 0, 0); + while (mcell1 != prev_mcell1 || mcell2 != prev_mcell2) { + prev_mcell1 = mcell1; + prev_mcell2 = mcell2; + + std::tie(p2, mcell2) = cluster_2->get_closest_point_blob(p1); + std::tie(p1, mcell1) = cluster_1->get_closest_point_blob(p2); + } + geo_point_t diff = p1 - p2; + double dis = diff.magnitude(); + + // std::cout << p1 << " " << p2 << " " << dis/units::cm << std::endl; + + if (dis < 60 * units::cm) { + const double length_1 = cluster_1->get_length(); + const double length_2 = cluster_2->get_length(); + + geo_point_t mcell1_center = cluster_1->calc_ave_pos(p1, 5 * units::cm); + geo_point_t dir1 = cluster_1->vhough_transform(mcell1_center, 30 * units::cm); + // protection against angles ... + geo_point_t dir5 = cluster_1->vhough_transform(p1, 30 * units::cm); + if (dir1.angle(dir5) > 120 / 180. * 3.1415926) dir1 = dir1 * (-1); + + geo_point_t mcell2_center = cluster_2->calc_ave_pos(p2, 5 * units::cm); + geo_point_t dir3 = cluster_2->vhough_transform(mcell2_center, 30 * units::cm); + // Protection against angles + geo_point_t dir6 = cluster_2->vhough_transform(p2, 30 * units::cm); + if (dir3.angle(dir6) > 120 / 180. * 3.1415926) dir3 = dir3 * (-1); + + geo_point_t dir2 = mcell2_center - mcell1_center; + geo_point_t dir4 = mcell1_center - mcell2_center; + + double angle_diff1 = (3.1415926 - dir1.angle(dir2)) / 3.1415926 * 180.; // 1 to 2 + double angle_diff2 = (3.1415926 - dir3.angle(dir4)) / 3.1415926 * 180.; // 2 to 1 + double angle_diff3 = (3.1415926 - dir1.angle(dir3)) / 3.1415926 * 180.; // 1 to 2 + + bool flag_para = false; + + double angle1, angle2, angle3; + if (!flag_merge) { + + angle1 = dir1.angle(drift_dir); + angle2 = dir2.angle(drift_dir); + angle3 = dir3.angle(drift_dir); + + if (fabs(angle1 - 3.1415926 / 2.) < 5 / 180. * 3.1415926 && + fabs(angle2 - 3.1415926 / 2.) < 5 / 180. * 3.1415926 && + fabs(angle3 - 3.1415926 / 2.) < 5 / 180. * 3.1415926) { + if (dis < 10 * units::cm) // if very parallel and close, merge any way + flag_merge = true; } - else if (flag_const1 && length_2 < 6 * units::cm && length_1 > 15 * units::cm) { - if (WireCell::PointCloud::Facade::is_angle_consistent( - dir1, dir2, false, 5, angle_u, angle_v, angle_w, 3)) + + if (fabs(angle2 - 3.1415926 / 2.) < 7.5 / 180. * 3.1415926 && + (fabs(angle1 - 3.1415926 / 2.) < 7.5 / 180. * 3.1415926 || + fabs(angle3 - 3.1415926 / 2.) < 7.5 / 180. * 3.1415926) && + fabs(angle1 - 3.1415926 / 2.) + fabs(angle2 - 3.1415926 / 2.) + + fabs(angle3 - 3.1415926 / 2.) < + 25 / 180. * 3.1415926) { + flag_para = true; + + if (WireCell::Clus::Facade::is_angle_consistent( + dir1, dir2, false, 15, angle_u, angle_v, angle_w, 3) && + WireCell::Clus::Facade::is_angle_consistent( + dir3, dir2, true, 15, angle_u, angle_v, angle_w, 3)) flag_merge = true; } - else if (flag_const2 && length_1 < 6 * units::cm && length_2 > 15 * units::cm) { - if (WireCell::PointCloud::Facade::is_angle_consistent( - dir3, dir2, true, 5, angle_u, angle_v, angle_w, 3)) + else { + bool flag_const1 = WireCell::Clus::Facade::is_angle_consistent( + dir1, dir2, false, 10, angle_u, angle_v, angle_w, 2); + bool flag_const2 = WireCell::Clus::Facade::is_angle_consistent( + dir3, dir2, true, 10, angle_u, angle_v, angle_w, 2); + + if (flag_const1 && flag_const2) { flag_merge = true; + } + else if (flag_const1 && length_2 < 6 * units::cm && length_1 > 15 * units::cm) { + if (WireCell::Clus::Facade::is_angle_consistent( + dir1, dir2, false, 5, angle_u, angle_v, angle_w, 3)) + flag_merge = true; + } + else if (flag_const2 && length_1 < 6 * units::cm && length_2 > 15 * units::cm) { + if (WireCell::Clus::Facade::is_angle_consistent( + dir3, dir2, true, 5, angle_u, angle_v, angle_w, 3)) + flag_merge = true; + } } } - } // This block of code comes from the prototype and should have parentheses // applied to make the logic explicit but nobody wants to do that so we tell the @@ -280,76 +265,91 @@ void WireCell::PointCloud::Facade::clustering_live_dead( #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wparentheses" - if (!flag_merge) { - if (length_1 <= 12 * units::cm && length_2 <= 12 * units::cm) { - // both are short - if ((dis <= 3 * units::cm) && + if (!flag_merge) { + if (length_1 <= 12 * units::cm && length_2 <= 12 * units::cm) { + // both are short + if ((dis <= 3 * units::cm) && ((angle_diff1 <= 45 || angle_diff2 <= 45) && (angle_diff3 < 60) || (flag_para && (angle_diff1 <= 90 || angle_diff2 <= 90) && angle_diff3 < 120)) || - (dis <= 5 * units::cm) && (angle_diff1 <= 30 || angle_diff2 <= 30) && + (dis <= 5 * units::cm) && (angle_diff1 <= 30 || angle_diff2 <= 30) && angle_diff3 < 45 || - (dis <= 15 * units::cm) && (angle_diff1 <= 15 || angle_diff2 <= 15) && + (dis <= 15 * units::cm) && (angle_diff1 <= 15 || angle_diff2 <= 15) && angle_diff3 < 20 || - (dis <= 60 * units::cm) && (angle_diff1 < 5 || angle_diff2 < 5) && + (dis <= 60 * units::cm) && (angle_diff1 < 5 || angle_diff2 < 5) && angle_diff3 < 10) { - flag_merge = true; + flag_merge = true; + } } - } - else if (length_1 > 12 * units::cm && length_2 <= 12 * units::cm) { - // one is short - if ((dis <= 3 * units::cm) && + else if (length_1 > 12 * units::cm && length_2 <= 12 * units::cm) { + // one is short + if ((dis <= 3 * units::cm) && ((angle_diff1 <= 45 || angle_diff2 <= 45) && (angle_diff3 < 60) || (flag_para && (angle_diff1 <= 90 || angle_diff2 <= 90) && angle_diff3 < 120)) || - dis <= 5 * units::cm && angle_diff1 <= 30 && angle_diff3 < 60 || - dis <= 15 * units::cm && (angle_diff1 <= 20) && angle_diff3 < 40 || - (angle_diff1 < 10 && dis <= 60 * units::cm && angle_diff3 < 15)) - flag_merge = true; - } - else if (length_2 > 12 * units::cm && length_1 <= 12 * units::cm) { - // one is short - if ((dis <= 3 * units::cm) && + dis <= 5 * units::cm && angle_diff1 <= 30 && angle_diff3 < 60 || + dis <= 15 * units::cm && (angle_diff1 <= 20) && angle_diff3 < 40 || + (angle_diff1 < 10 && dis <= 60 * units::cm && angle_diff3 < 15)) + flag_merge = true; + } + else if (length_2 > 12 * units::cm && length_1 <= 12 * units::cm) { + // one is short + if ((dis <= 3 * units::cm) && ((angle_diff1 <= 45 || angle_diff2 <= 45) && (angle_diff3 < 60) || (flag_para && (angle_diff1 <= 90 || angle_diff2 <= 90) && angle_diff3 < 120)) || - dis <= 5 * units::cm && angle_diff2 <= 30 && angle_diff3 < 60 || - dis <= 15 * units::cm && (angle_diff2 <= 20) && angle_diff3 < 40 || - (angle_diff2 < 10 && dis <= 60 * units::cm && angle_diff3 < 15)) - flag_merge = true; - } - else { - // both are long - if ((dis <= 3 * units::cm) && + dis <= 5 * units::cm && angle_diff2 <= 30 && angle_diff3 < 60 || + dis <= 15 * units::cm && (angle_diff2 <= 20) && angle_diff3 < 40 || + (angle_diff2 < 10 && dis <= 60 * units::cm && angle_diff3 < 15)) + flag_merge = true; + } + else { + // both are long + if ((dis <= 3 * units::cm) && ((angle_diff1 <= 45 || angle_diff2 <= 45) && (angle_diff3 < 60) || (flag_para && (angle_diff1 <= 90 || angle_diff2 <= 90) && angle_diff3 < 120)) || - dis <= 5 * units::cm && (angle_diff1 <= 30 || angle_diff2 <= 30) && + dis <= 5 * units::cm && (angle_diff1 <= 30 || angle_diff2 <= 30) && angle_diff3 < 45 || - (dis <= 15 * units::cm) && (angle_diff1 <= 20 || angle_diff2 <= 20) && + (dis <= 15 * units::cm) && (angle_diff1 <= 20 || angle_diff2 <= 20) && angle_diff3 < 30 || - (angle_diff1 < 10 || angle_diff2 < 10) && (dis <= 60 * units::cm) && + (angle_diff1 < 10 || angle_diff2 < 10) && (dis <= 60 * units::cm) && angle_diff3 < 15) - flag_merge = true; + flag_merge = true; + } } - } #pragma GCC diagnostic pop - } + } - if (flag_merge) { - boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], - ilive2desc[map_cluster_index[cluster_2]], g); - // std::cout << "success: " << cluster_1->get_length()/units::cm << " " << cluster_2->get_length()/units::cm << std::endl; - } - } // if (tested_pairs....) - } // j - } // i - } //if(connected_live_clusters.size()>1) - } + if (flag_merge) { + boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], + ilive2desc[map_cluster_index[cluster_2]], g); + } + } // if (tested_pairs....) + } // j + } // i + } //if(connected_live_clusters.size()>1) + } + + // new function to merge clusters ... + merge_clusters(g, live_grouping); + - // new function to merge clusters ... - merge_clusters(g, live_grouping, cluster_connected_dead); -} + + + + + + + // live_clusters = live_grouping.children(); // copy + // // Process each cluster + // for (size_t iclus = 0; iclus < live_clusters.size(); ++iclus) { + // Cluster* cluster = live_clusters.at(iclus); + // auto& scope = cluster->get_default_scope(); + // std::cout << "Test: " << iclus << " " << cluster->nchildren() << " " << scope.pcname << " " << scope.coords[0] << " " << scope.coords[1] << " " << scope.coords[2] << " " << cluster->get_scope_filter(scope)<< " " << cluster->get_pca().center) << std::endl; + // } + } +}; #pragma GCC diagnostic pop diff --git a/clus/src/clustering_neutrino.cxx b/clus/src/clustering_neutrino.cxx index 76b2a1ced..b25d6b3fc 100644 --- a/clus/src/clustering_neutrino.cxx +++ b/clus/src/clustering_neutrino.cxx @@ -1,32 +1,158 @@ -#include +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + +class ClusteringNeutrino; +WIRECELL_FACTORY(ClusteringNeutrino, ClusteringNeutrino, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) -// The original developers do not care. -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wparentheses" using namespace WireCell; using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; using namespace WireCell::Aux; -using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; using namespace WireCell::PointCloud::Tree; -void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, int num_try) +static void clustering_neutrino( + Grouping &live_grouping, + int num_try, + IDetectorVolumes::pointer dv, + const Tree::Scope& scope + ); + +class ClusteringNeutrino : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedScope { +public: + ClusteringNeutrino() {} + virtual ~ClusteringNeutrino() {} + + void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedScope::configure(config); + + num_try_ = get(config, "num_try", 1); + } + + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + for (int i = 0; i != num_try_; i++) { + clustering_neutrino(live, i, m_dv, m_scope); + } + } + +private: + int num_try_{1}; +}; + +// The original developers do not care. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wparentheses" + +// handle all APA/Face +static void clustering_neutrino( + Grouping &live_grouping, + int num_try, + IDetectorVolumes::pointer dv, + const Tree::Scope& scope) { + // Get all the wire plane IDs from the grouping + const auto& wpids = live_grouping.wpids(); + // Key: pair, Value: drift_dir, angle_u, angle_v, angle_w + std::map> wpid_params; + std::set apas; + for (const auto& wpid : wpids) { + int apa = wpid.apa(); + int face = wpid.face(); + apas.insert(apa); + + // Create wpids for all three planes with this APA and face + WirePlaneId wpid_u(kUlayer, face, apa); + WirePlaneId wpid_v(kVlayer, face, apa); + WirePlaneId wpid_w(kWlayer, face, apa); + + // Get drift direction based on face orientation + int face_dirx = dv->face_dirx(wpid_u); + geo_point_t drift_dir(face_dirx, 0, 0); + + // Get wire directions for all planes + Vector wire_dir_u = dv->wire_direction(wpid_u); + Vector wire_dir_v = dv->wire_direction(wpid_v); + Vector wire_dir_w = dv->wire_direction(wpid_w); + + // Calculate angles + double angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + double angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + double angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + wpid_params[wpid] = std::make_tuple(drift_dir, angle_u, angle_v, angle_w); + } + + std::vector live_clusters = live_grouping.children(); // copy // sort the clusters by length using a lambda function from long cluster to short cluster std::sort(live_clusters.begin(), live_clusters.end(), [](const Cluster *cluster1, const Cluster *cluster2) { return cluster1->get_length() > cluster2->get_length(); }); - const auto &mp = live_grouping.get_params(); + + // const auto &mp = live_grouping.get_params(); // this is for 4 time slices - double time_slice_width = mp.nticks_live_slice * mp.tick_drift; + // double time_slice_width = mp.nticks_live_slice * mp.tick_drift; + + // get wpids ... + std::map map_wpid_time_slice_width; + std::map map_FV_xmin; + std::map map_FV_xmax; + std::map map_FV_xmin_margin; + std::map map_FV_xmax_margin; + for (const auto& wpid : wpids) { + map_wpid_time_slice_width[wpid] = dv->metadata(wpid)["nticks_live_slice"].asDouble() * dv->metadata(wpid)["tick_drift"].asDouble() ; + map_FV_xmin[wpid] = dv->metadata(wpid)["FV_xmin"].asDouble() ; + map_FV_xmax[wpid] = dv->metadata(wpid)["FV_xmax"].asDouble() ; + map_FV_xmin_margin[wpid] = dv->metadata(wpid)["FV_xmin_margin"].asDouble() ; + map_FV_xmax_margin[wpid] = dv->metadata(wpid)["FV_xmax_margin"].asDouble() ; + } + WirePlaneId wpid_all(0); + double det_FV_xmin = dv->metadata(wpid_all)["FV_xmin"].asDouble(); + double det_FV_xmax = dv->metadata(wpid_all)["FV_xmax"].asDouble(); + double det_FV_ymin = dv->metadata(wpid_all)["FV_ymin"].asDouble(); + double det_FV_ymax = dv->metadata(wpid_all)["FV_ymax"].asDouble(); + double det_FV_zmin = dv->metadata(wpid_all)["FV_zmin"].asDouble(); + double det_FV_zmax = dv->metadata(wpid_all)["FV_zmax"].asDouble(); + // double det_FV_xmin_margin = dv->metadata(wpid_all)["FV_xmin_margin"].asDouble(); + // double det_FV_xmax_margin = dv->metadata(wpid_all)["FV_xmax_margin"].asDouble(); + + + + + + // Get drift direction from the first element of wpid_params, + // in the current code, we do not care about the actual direction of drift_dir, so just picking up the first instance + geo_point_t drift_dir_abs(1,0,0); - geo_point_t drift_dir(1, 0, 0); geo_point_t vertical_dir(0, 1, 0); geo_point_t beam_dir(0, 0, 1); + // Get vertical_dir from metadata + Json::Value vertical_dir_json = dv->metadata(wpid_all)["vertical_dir"]; + Json::Value beam_dir_json = dv->metadata(wpid_all)["beam_dir"]; + if (!vertical_dir_json.isNull() && vertical_dir_json.isArray() && vertical_dir_json.size() >= 3) { + vertical_dir = geo_point_t( + vertical_dir_json[0].asDouble(), + vertical_dir_json[1].asDouble(), + vertical_dir_json[2].asDouble() + ); + } + if (!beam_dir_json.isNull() && beam_dir_json.isArray() && beam_dir_json.size() >= 3) { + beam_dir = geo_point_t( + beam_dir_json[0].asDouble(), + beam_dir_json[1].asDouble(), + beam_dir_json[2].asDouble() + ); + } // find all the clusters that are inside the box ... std::vector contained_clusters; @@ -34,6 +160,12 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, for (size_t i = 0; i != live_clusters.size(); i++) { Cluster *cluster = live_clusters.at(i); + if (!cluster->get_scope_filter(scope)) continue; + if (cluster->get_default_scope().hash() != scope.hash()) { + cluster->set_default_scope(scope); + // std::cout << "Test: Set default scope: " << pc_name << " " << coords[0] << " " << coords[1] << " " << coords[2] << " " << cluster->get_default_scope().hash() << " " << scope.hash() << std::endl; + } + // cluster->Create_point_cloud(); std::pair hl_wcps = cluster->get_highest_lowest_points(); std::pair fb_wcps = cluster->get_front_back_points(); @@ -43,7 +175,7 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, // el_wcps.first.x()/units::cm << " " << el_wcps.second.x()/units::cm << std::endl; // if (el_wcps.first.x() < -1 * units::cm || el_wcps.second.x() > 257 * units::cm || - if (el_wcps.first.x() < mp.FV_xmin - mp.FV_xmin_margin || el_wcps.second.x() > mp.FV_xmax + mp.FV_xmax_margin || cluster->get_length() < 6.0 * units::cm) + if (el_wcps.first.x() < map_FV_xmin.begin()->second - map_FV_xmin_margin.begin()->second || el_wcps.second.x() > map_FV_xmax.begin()->second + map_FV_xmax_margin.begin()->second || cluster->get_length() < 6.0 * units::cm) continue; bool flag_fy = false; @@ -54,17 +186,17 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, bool flag_bz = false; std::vector saved_wcps; - if (hl_wcps.first.y() > mp.FV_ymax) { + if (hl_wcps.first.y() > det_FV_ymax) { saved_wcps.push_back(hl_wcps.first); flag_fy = true; } - if (hl_wcps.second.y() < mp.FV_ymin) { + if (hl_wcps.second.y() < det_FV_ymin) { saved_wcps.push_back(hl_wcps.second); flag_by = true; } - if (fb_wcps.first.z() > mp.FV_zmax) { + if (fb_wcps.first.z() > det_FV_zmax) { bool flag_save = true; for (size_t j = 0; j != saved_wcps.size(); j++) { double dis = sqrt(pow(saved_wcps.at(j).x() - fb_wcps.first.x(), 2) + @@ -81,7 +213,7 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, } } - if (fb_wcps.second.z() < mp.FV_zmin) { + if (fb_wcps.second.z() < det_FV_zmin) { bool flag_save = true; for (size_t j = 0; j != saved_wcps.size(); j++) { double dis = sqrt(pow(saved_wcps.at(j).x() - fb_wcps.second.x(), 2) + @@ -98,7 +230,7 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, } } - if (el_wcps.first.x() < mp.FV_xmin) { + if (el_wcps.first.x() < det_FV_xmin) { bool flag_save = true; for (size_t j = 0; j != saved_wcps.size(); j++) { double dis = sqrt(pow(saved_wcps.at(j).x() - el_wcps.first.x(), 2) + @@ -115,7 +247,7 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, } } - if (el_wcps.second.x() > mp.FV_xmax) { + if (el_wcps.second.x() > det_FV_xmax) { bool flag_save = true; for (size_t j = 0; j != saved_wcps.size(); j++) { double dis = sqrt(pow(saved_wcps.at(j).x() - el_wcps.second.x(), 2) + @@ -148,9 +280,11 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, // calculate the closest distance??? ... for (size_t i = 0; i != live_clusters.size(); i++) { Cluster *cluster1 = live_clusters.at(i); + if (!cluster1->get_scope_filter(scope)) continue; // ToyPointCloud *cloud1 = cluster1->get_point_cloud(); for (size_t j = i + 1; j != live_clusters.size(); j++) { Cluster *cluster2 = live_clusters.at(j); + if (!cluster2->get_scope_filter(scope)) continue; // ToyPointCloud *cloud2 = cluster2->get_point_cloud(); // std::tuple results = cloud2->get_closest_points(cloud1); @@ -203,8 +337,8 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, // can not be the same if (cluster2 == cluster1) continue; if (cluster2->get_length() > 150 * units::cm) { - geo_point_t dir1(cluster2->get_pca_axis(0).x(), cluster2->get_pca_axis(0).y(), - cluster2->get_pca_axis(0).z()); + geo_point_t dir1(cluster2->get_pca().axis.at(0).x(), cluster2->get_pca().axis.at(0).y(), + cluster2->get_pca().axis.at(0).z()); if (fabs(dir1.angle(vertical_dir) - 3.1415926 / 2.) / 3.1415926 * 180. > 80) continue; } // cluster2->Create_point_cloud(); @@ -217,9 +351,9 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, if (cluster_cloud_map.find(cluster1) == cluster_cloud_map.end()) { // cluster1->Calc_PCA(); - geo_point_t center = cluster1->get_center(); - geo_point_t main_dir(cluster1->get_pca_axis(0).x(), cluster1->get_pca_axis(0).y(), - cluster1->get_pca_axis(0).z()); + geo_point_t center = cluster1->get_pca().center; + geo_point_t main_dir(cluster1->get_pca().axis.at(0).x(), cluster1->get_pca().axis.at(0).y(), + cluster1->get_pca().axis.at(0).z()); main_dir = main_dir.norm(); // ToyPointCloud *cloud1_ext = new ToyPointCloud(angle_u, angle_v, angle_w); @@ -248,14 +382,17 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, if (cluster1->nnearby(extreme_pts.first, 15 * units::cm) <= 75 && cluster1->npoints() > 75 || cluster1->nnearby(extreme_pts.second, 15 * units::cm) <= 75 && cluster1->npoints() > 75 || - cluster1->get_pca_value(1) > 0.022 * cluster1->get_pca_value(0) && + cluster1->get_pca().values.at(1) > 0.022 * cluster1->get_pca().values.at(0) && cluster1->get_length() > 45 * units::cm) { // std::vector sep_clusters = Separate_2(cluster1, 2.5 * units::cm); const double orig_cluster_length = cluster1->get_length(); // std::cout << "[neutrino] cluster1->npoints() " << cluster1->npoints() << " " << cluster1->point(0) << std::endl; - const auto b2id = Separate_2(cluster1, 2.5 * units::cm); + const auto b2id = Separate_2(cluster1, scope, 2.5 * units::cm); // false: do not remove the cluster1 + auto scope_transform = cluster1->get_scope_transform(scope); auto sep_clusters = live_grouping.separate(cluster1, b2id, false); + + assert(cluster1 != nullptr); Cluster *largest_cluster = 0; int max_num_points = 0; @@ -267,9 +404,9 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, } temp_extreme_pts = largest_cluster->get_two_extreme_points(); - center = largest_cluster->get_center(); - main_dir.set(largest_cluster->get_pca_axis(0).x(), largest_cluster->get_pca_axis(0).y(), - largest_cluster->get_pca_axis(0).z()); + center = largest_cluster->get_pca().center; + main_dir.set(largest_cluster->get_pca().axis.at(0).x(), largest_cluster->get_pca().axis.at(0).y(), + largest_cluster->get_pca().axis.at(0).z()); num_clusters = sep_clusters.size(); // largest_cluster->Create_point_cloud(); @@ -310,14 +447,14 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, if (cluster1->nnearby(extreme_pts.first, 15 * units::cm) <= 75 && cluster1->get_length() > 60 * units::cm || flag_enable_temp && num_clusters >= 4 && - cluster1->get_pca_value(1) > 0.022 * cluster1->get_pca_value(0)) + cluster1->get_pca().values.at(1) > 0.022 * cluster1->get_pca().values.at(0)) flag_add1 = false; bool flag_add2 = true; if (cluster1->nnearby(extreme_pts.second, 15 * units::cm) <= 75 && cluster1->get_length() > 60 * units::cm || flag_enable_temp && num_clusters >= 4 && - cluster1->get_pca_value(1) > 0.022 * cluster1->get_pca_value(0)) + cluster1->get_pca().values.at(1) > 0.022 * cluster1->get_pca().values.at(0)) flag_add2 = false; for (size_t j = 0; j != 150; j++) { @@ -393,9 +530,9 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, if (cluster_cloud_map.find(cluster2) == cluster_cloud_map.end()) { // cluster2->Calc_PCA(); - geo_point_t center = cluster2->get_center(); - geo_point_t main_dir(cluster2->get_pca_axis(0).x(), cluster2->get_pca_axis(0).y(), - cluster2->get_pca_axis(0).z()); + geo_point_t center = cluster2->get_pca().center; + geo_point_t main_dir(cluster2->get_pca().axis.at(0).x(), cluster2->get_pca().axis.at(0).y(), + cluster2->get_pca().axis.at(0).z()); main_dir = main_dir.norm(); // ToyPointCloud *cloud2_ext = new ToyPointCloud(angle_u, angle_v, angle_w); @@ -424,13 +561,15 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, if (cluster2->nnearby(extreme_pts.first, 15 * units::cm) <= 75 && cluster2->npoints() > 75 || cluster2->nnearby(extreme_pts.second, 15 * units::cm) <= 75 && cluster2->npoints() > 75 || - cluster2->get_pca_value(1) > 0.022 * cluster2->get_pca_value(0) && + cluster2->get_pca().values.at(1) > 0.022 * cluster2->get_pca().values.at(0) && cluster2->get_length() > 45 * units::cm) { // std::vector sep_clusters = Separate_2(cluster2, 2.5 * units::cm); // std::cout << "[neutrino] cluster2->npoints() " << cluster2->npoints() << " " << cluster2->point(0) << std::endl; const double orig_cluster_length = cluster2->get_length(); - const auto b2id = Separate_2(cluster2, 2.5 * units::cm); + const auto b2id = Separate_2(cluster2, scope, 2.5 * units::cm); + auto scope_transform = cluster2->get_scope_transform(scope); auto sep_clusters = live_grouping.separate(cluster2, b2id, false); + assert(cluster2 != nullptr); Cluster *largest_cluster = 0; int max_num_points = 0; @@ -441,9 +580,9 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, } } temp_extreme_pts = largest_cluster->get_two_extreme_points(); - center = largest_cluster->get_center(); - main_dir.set(largest_cluster->get_pca_axis(0).x(), largest_cluster->get_pca_axis(0).y(), - largest_cluster->get_pca_axis(0).z()); + center = largest_cluster->get_pca().center; + main_dir.set(largest_cluster->get_pca().axis.at(0).x(), largest_cluster->get_pca().axis.at(0).y(), + largest_cluster->get_pca().axis.at(0).z()); num_clusters = sep_clusters.size(); // largest_cluster->Create_point_cloud(); @@ -485,13 +624,13 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, if (cluster2->nnearby(extreme_pts.first, 15 * units::cm) <= 75 && cluster2->get_length() > 60 * units::cm || flag_enable_temp && num_clusters >= 4 && - cluster2->get_pca_value(1) > 0.022 * cluster2->get_pca_value(0)) + cluster2->get_pca().values.at(1) > 0.022 * cluster2->get_pca().values.at(0)) flag_add1 = false; bool flag_add2 = true; if (cluster2->nnearby(extreme_pts.second, 15 * units::cm) <= 75 && cluster2->get_length() > 60 * units::cm || flag_enable_temp && num_clusters >= 4 && - cluster2->get_pca_value(1) > 0.022 * cluster2->get_pca_value(0)) + cluster2->get_pca().values.at(1) > 0.022 * cluster2->get_pca().values.at(0)) flag_add2 = false; // std::cout << flag_add1 << " " << flag_add2 << " " << dir1.x() << " " << dir1.y() << " " << @@ -585,6 +724,7 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, double dis1 = std::get<2>(results_1); double dis2 = cluster1->get_closest_dis(test_pt); + // drift_dir +x, -x the same ... if (dis1 < std::min(std::max(4.5 * units::cm, dis2 * sin(15 / 180. * 3.1415926)), 12 * units::cm) && (cluster2->get_length() > 25 * units::cm || cluster1->get_length() <= cluster2->get_length()) || dis1 < std::min(std::max(2.5 * units::cm, dis2 * sin(10 / 180. * 3.1415926)), 10 * units::cm) || @@ -593,8 +733,8 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, cluster2->get_length() > 15 * units::cm || cluster1->get_length() > 45 * units::cm && dis1 < 16 * units::cm && fabs(test_pt.x() - test_pt1.x()) < 3.2 * units::cm && - (fabs(drift_dir.angle(cluster_dir1_map[cluster1]) - 3.1415926 / 2.) / 3.1415926 * 180. < 5 || - fabs(drift_dir.angle(cluster_dir2_map[cluster1]) - 3.1415926 / 2.) / 3.1415926 * 180. < 5)) { + (fabs(drift_dir_abs.angle(cluster_dir1_map[cluster1]) - 3.1415926 / 2.) / 3.1415926 * 180. < 5 || + fabs(drift_dir_abs.angle(cluster_dir2_map[cluster1]) - 3.1415926 / 2.) / 3.1415926 * 180. < 5)) { // std::cout << test_pt1.x()/units::cm << " " << test_pt1.y()/units::cm << " " << // test_pt1.z()/units::cm // << std::endl; @@ -618,24 +758,24 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, if ((angle_diff1 > 65 || angle_diff2 > 65) && (dis * sin((90 - angle_diff1) / 180. * 3.1415926) < 4.5 * units::cm || dis * sin((90 - angle_diff2) / 180. * 3.1415926) < 4.5 * units::cm)) { - if (!cluster2->judge_vertex(test_pt1)) { + if (!cluster2->judge_vertex(test_pt1, dv)) { test_pt1 = test_pt2; } } } if (cluster1->get_length() > 25 * units::cm && - cluster1->get_pca_value(1) < 0.0015 * cluster1->get_pca_value(0)) { + cluster1->get_pca().values.at(1) < 0.0015 * cluster1->get_pca().values.at(0)) { flag_merge = false; if (dis < 0.5 * units::cm && dis1 < 1.5 * units::cm && dis2 < 1.5 * units::cm) - flag_merge = cluster2->judge_vertex(test_pt1, 0.5, 0.6); + flag_merge = cluster2->judge_vertex(test_pt1, dv, 0.5, 0.6); } else { if (cluster2->get_length() < 30 * units::cm) { flag_merge = true; if (cluster1->get_length() > 15 * units::cm && - cluster1->get_pca_value(1) < 0.012 * cluster1->get_pca_value(0)) { + cluster1->get_pca().values.at(1) < 0.012 * cluster1->get_pca().values.at(0)) { if (dis1 > std::max(2.5 * units::cm, dis2 * sin(7.5 / 180. * 3.1415926))) flag_merge = false; } @@ -644,44 +784,44 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, flag_merge = false; } } - else if (JudgeSeparateDec_1(cluster2, drift_dir, cluster2->get_length(), time_slice_width)) { + else if (JudgeSeparateDec_1(cluster2, drift_dir_abs, cluster2->get_length())) { if (dis2 < 5 * units::cm) { - flag_merge = cluster2->judge_vertex(test_pt1, 2. / 3.); + flag_merge = cluster2->judge_vertex(test_pt1,dv, 2. / 3.); } else if (dis < 0.5 * units::cm) { - flag_merge = cluster2->judge_vertex(test_pt1, 0.5, 0.6); + flag_merge = cluster2->judge_vertex(test_pt1, dv, 0.5, 0.6); } else { - flag_merge = cluster2->judge_vertex(test_pt1); + flag_merge = cluster2->judge_vertex(test_pt1, dv); } if (cluster1->get_length() > 15 * units::cm && - cluster1->get_pca_value(1) < 0.012 * cluster1->get_pca_value(0)) { + cluster1->get_pca().values.at(1) < 0.012 * cluster1->get_pca().values.at(0)) { if (dis1 > std::max(2.5 * units::cm, dis2 * sin(7.5 / 180. * 3.1415926))) flag_merge = false; } } else { if (dis2 < 5 * units::cm) { - flag_merge = cluster2->judge_vertex(test_pt1, 2. / 3.); + flag_merge = cluster2->judge_vertex(test_pt1, dv, 2. / 3.); } else if (dis < 0.5 * units::cm) { - flag_merge = cluster2->judge_vertex(test_pt1, 0.5, 0.6); + flag_merge = cluster2->judge_vertex(test_pt1, dv, 0.5, 0.6); } else { - flag_merge = cluster2->judge_vertex(test_pt1); + flag_merge = cluster2->judge_vertex(test_pt1,dv ); } if (cluster1->get_length() > 15 * units::cm && - cluster1->get_pca_value(1) < 0.012 * cluster1->get_pca_value(0)) { + cluster1->get_pca().values.at(1) < 0.012 * cluster1->get_pca().values.at(0)) { if (dis1 > std::max(3.5 * units::cm, dis2 * sin(7.5 / 180. * 3.1415926))) flag_merge = false; } if (flag_merge && cluster2->get_length() > 200 * units::cm && dis2 < 12 * units::cm && - cluster2->get_pca_value(1) < 0.0015 * cluster2->get_pca_value(0)) { - geo_point_t cluster2_dir(cluster2->get_pca_axis(0).x(), cluster2->get_pca_axis(0).y(), - cluster2->get_pca_axis(0).z()); + cluster2->get_pca().values.at(1) < 0.0015 * cluster2->get_pca().values.at(0)) { + geo_point_t cluster2_dir(cluster2->get_pca().axis.at(0).x(), cluster2->get_pca().axis.at(0).y(), + cluster2->get_pca().axis.at(0).z()); if (fabs(cluster2_dir.angle(vertical_dir) / 3.1415926 * 180. - 3.1415926 / 2.) / 3.1415926 * 180. > 45 && @@ -696,7 +836,7 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, if (cluster_close_cluster_map[cluster1].second < 1.2 * units::cm && cluster_close_cluster_map[cluster1].first != cluster2 && cluster_close_cluster_map[cluster1].first->get_length() > 60 * units::cm && - cluster1->get_pca_value(1) > 0.012 * cluster1->get_pca_value(0) && dis1 > 0.6 * units::cm) { + cluster1->get_pca().values.at(1) > 0.012 * cluster1->get_pca().values.at(0) && dis1 > 0.6 * units::cm) { flag_merge = false; } @@ -706,15 +846,15 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, if (flag_merge && cluster1->get_length() > 150 * units::cm && cluster2->get_length() > 150 * units::cm && - (cluster1->get_pca_value(1) < 0.03 * cluster1->get_pca_value(0) || - cluster2->get_pca_value(1) < 0.03 * cluster2->get_pca_value(0))) { + (cluster1->get_pca().values.at(1) < 0.03 * cluster1->get_pca().values.at(0) || + cluster2->get_pca().values.at(1) < 0.03 * cluster2->get_pca().values.at(0))) { // protect against two long tracks ... // cluster1->Calc_PCA(); // cluster2->Calc_PCA(); - geo_point_t temp_dir1(cluster1->get_pca_axis(0).x(), cluster1->get_pca_axis(0).y(), - cluster1->get_pca_axis(0).z()); - geo_point_t temp_dir2(cluster2->get_pca_axis(0).x(), cluster2->get_pca_axis(0).y(), - cluster2->get_pca_axis(0).z()); + geo_point_t temp_dir1(cluster1->get_pca().axis.at(0).x(), cluster1->get_pca().axis.at(0).y(), + cluster1->get_pca().axis.at(0).z()); + geo_point_t temp_dir2(cluster2->get_pca().axis.at(0).x(), cluster2->get_pca().axis.at(0).y(), + cluster2->get_pca().axis.at(0).z()); if (fabs(temp_dir1.angle(temp_dir2) - 3.1415926 / 2.) < 60 / 180. * 3.1415926) flag_merge = false; } @@ -724,7 +864,7 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, // if(flag_merge) // std::cout << dis1 / units::cm << " " << dis2 / units::cm << " " << dis / units::cm << " " // << cluster1->get_length() / units::cm << " " << cluster2->get_length() / units::cm << " " - // << flag_merge << " " << merge_type << " " << cluster1->get_center() << " " << cluster2->get_center() << std::endl; + // << flag_merge << " " << merge_type << " " << cluster1->get_pca().center << " " << cluster2->get_pca().center << std::endl; if (dis < 1.8 * units::cm && cluster1->get_length() < 75 * units::cm && cluster2->get_length() < 75 * units::cm && @@ -762,8 +902,8 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, cluster2->get_length() > 30 * units::cm && cluster1->get_length() > 30 * units::cm) { // cluster1->Calc_PCA(); // cluster2->Calc_PCA(); - if (cluster1->get_pca_value(1) > 0.0015 * cluster1->get_pca_value(0) && - cluster2->get_pca_value(1) > 0.0015 * cluster2->get_pca_value(0)) { + if (cluster1->get_pca().values.at(1) > 0.0015 * cluster1->get_pca().values.at(0) && + cluster2->get_pca().values.at(1) > 0.0015 * cluster2->get_pca().values.at(0)) { flag_merge = true; merge_type = 3; } @@ -807,7 +947,7 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, // if(flag_merge ) // std::cout // << cluster1->get_length() / units::cm << " " << cluster2->get_length() / units::cm << " " - // << flag_merge << " " << merge_type << " " << cluster1->get_center() << " " << cluster2->get_center() << std::endl; + // << flag_merge << " " << merge_type << " " << cluster1->get_pca().center << " " << cluster2->get_pca().center << std::endl; } } @@ -827,12 +967,27 @@ void WireCell::PointCloud::Facade::clustering_neutrino(Grouping &live_grouping, ilive2desc[ilive] = boost::add_vertex(ilive, g); } for (auto [cluster1, cluster2] : to_be_merged_pairs) { - // std::cout <get_length()/units::cm << " " << cluster2->get_length()/units::cm << " " << cluster1->get_center() << " " << cluster2->get_center() << std::endl; + // std::cout <get_length()/units::cm << " " << cluster2->get_length()/units::cm << " " << cluster1->get_pca().center << " " << cluster2->get_pca().center << std::endl; boost::add_edge(ilive2desc[map_cluster_index[cluster1]], ilive2desc[map_cluster_index[cluster2]], g); } - cluster_set_t new_clusters; - merge_clusters(g, live_grouping, new_clusters); + + auto new_clusters = merge_clusters(g, live_grouping); + // { + // auto live_clusters = live_grouping.children(); // copy + // // Process each cluster + // for (size_t iclus = 0; iclus < live_clusters.size(); ++iclus) { + // Cluster* cluster = live_clusters.at(iclus); + // auto& scope = cluster->get_default_scope(); + // std::cout << "Test: " << iclus << " " << cluster->nchildren() << " " << scope.pcname << " " << scope.coords[0] << " " << scope.coords[1] << " " << scope.coords[2] << " " << cluster->get_scope_filter(scope)<< " " << cluster->get_pca().center << std::endl; + // } + // } + + + + + + } diff --git a/clus/src/clustering_parallel_prolong.cxx b/clus/src/clustering_parallel_prolong.cxx index 9f0b6b267..a4f41cdd4 100644 --- a/clus/src/clustering_parallel_prolong.cxx +++ b/clus/src/clustering_parallel_prolong.cxx @@ -1,116 +1,103 @@ -#include +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wparentheses" +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + +class ClusteringParallelProlong; +WIRECELL_FACTORY(ClusteringParallelProlong, ClusteringParallelProlong, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) using namespace WireCell; using namespace WireCell::Clus; -using namespace WireCell::Aux; -using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; -using namespace WireCell::PointCloud::Tree; -void WireCell::PointCloud::Facade::clustering_parallel_prolong( - Grouping& live_grouping, - cluster_set_t& cluster_connected_dead, // in/out - const double length_cut // -) -{ - // prepare graph ... - typedef cluster_connectivity_graph_t Graph; - Graph g; - std::unordered_map ilive2desc; // added live index to graph descriptor - std::map map_cluster_index; - const auto& live_clusters = live_grouping.children(); - - - for (size_t ilive = 0; ilive < live_clusters.size(); ++ilive) { - const auto& live = live_clusters[ilive]; - map_cluster_index[live] = ilive; - ilive2desc[ilive] = boost::add_vertex(ilive, g); - } +using namespace WireCell::Clus::Facade; - // original algorithm ... (establish edges ... ) +static void clustering_parallel_prolong( + Grouping& live_clusters, + IDetectorVolumes::pointer dv, + const Tree::Scope& scope, + const double length_cut = 35*units::cm); - for (size_t i=0;i!=live_clusters.size();i++){ - auto cluster_1 = live_clusters.at(i); - for (size_t j=i+1;jget_length(), cluster_2->get_length(), length_cut)){ +class ClusteringParallelProlong : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedScope { +public: + ClusteringParallelProlong() {} + virtual ~ClusteringParallelProlong() {} - // // debug ... - // std::cout << cluster_1->get_length()/units::cm << " " << cluster_2->get_length()/units::cm << std::endl; + void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedScope::configure(config); + + length_cut_ = get(config, "length_cut", 35*units::cm); + } + virtual Configuration default_configuration() const { + Configuration cfg; + return cfg; + } + - //to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); - boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], - ilive2desc[map_cluster_index[cluster_2]], g); + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + clustering_parallel_prolong(live, m_dv, m_scope, length_cut_); + } +private: + double length_cut_{35*units::cm}; +}; - } - } - } - // new function to merge clusters ... - merge_clusters(g, live_grouping, cluster_connected_dead); -} +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wparentheses" +// using namespace WireCell::PointCloud::Tree; -bool WireCell::PointCloud::Facade::Clustering_2nd_round( +static bool Clustering_2nd_round( const Cluster& cluster1, const Cluster& cluster2, double length_1, double length_2, + const std::map > & wpid_U_dir, const std::map > & wpid_V_dir, const std::map > & wpid_W_dir, + const IDetectorVolumes::pointer dv, double length_cut) { - const auto [angle_u,angle_v,angle_w] = cluster1.grouping()->wire_angles(); +// const auto [angle_u,angle_v,angle_w] = cluster1.grouping()->wire_angles(); if (length_1 < 10*units::cm && length_2 < 10*units::cm) return false; geo_point_t p1; geo_point_t p2; -// // debug ... -// bool flag_print = false; -// if (fabs(length_1 + length_2 - 1.42425*units::cm - 144.15 * units::cm) < 0.01*units::cm -// && fabs(fabs(length_1-length_2) - fabs(1.42425 - 144.15)*units::cm) < 0.01*units::cm) flag_print =true; - - double dis = WireCell::PointCloud::Facade::Find_Closest_Points(cluster1, cluster2, + double dis = WireCell::Clus::Facade::Find_Closest_Points(cluster1, cluster2, length_1, length_2, length_cut, p1, p2); -// if (flag_print) { -// std::cout << length_1/units::cm << " " << length_2/units::cm << " " << cluster1.npoints() << -// " " << cluster2.npoints() << " " << p1 << " " << p2 << " " << dis/units::cm << std::endl; -// std::cout << (*cluster1.get_first_blob()) << " " << cluster1.get_first_blob()->center_pos() << " " << (*cluster1.get_last_blob()) << " " << cluster1.get_last_blob()->center_pos() << std::endl; -// auto points = cluster1.get_first_blob()->points(); -// for (auto it1 = points.begin();it1!=points.end();it1++){ -// std::cout << (*it1).x() << " " << (*it1).y() << " " << (*it1).z() << std::endl; -// } -// } + auto wpid_p1 = cluster1.wpid(p1); + auto wpid_p2 = cluster2.wpid(p2); + auto wpid_ps = get_wireplaneid(p1, wpid_p1, p2, wpid_p2, dv); + if ((dis < length_cut || (dis < 80*units::cm && length_1 +length_2 > 50*units::cm && length_1>15*units::cm && length_2 > 15*units::cm))){ geo_point_t cluster1_ave_pos = cluster1.calc_ave_pos(p1,10*units::cm); + // auto wpid_ave_p1 = cluster1.wpid(cluster1_ave_pos); geo_point_t cluster2_ave_pos = cluster2.calc_ave_pos(p2,10*units::cm); + // auto wpid_ave_p2 = cluster2.wpid(cluster2_ave_pos); + // auto wpid_ave_ps = get_wireplaneid(cluster1_ave_pos, wpid_ave_p1, cluster2_ave_pos, wpid_ave_p2, dv); bool flag_para = false; - // bool flag_para_U = false; - // bool flag_para_V = false; - geo_point_t drift_dir(1, 0, 0); // assuming the drift direction is along X ... + geo_point_t drift_dir_abs(1, 0, 0); // assuming the drift direction is along X ... - // pronlonged case for U 3 and V 4 ... - geo_point_t U_dir(0,cos(angle_u),sin(angle_u)); - geo_point_t V_dir(0,cos(angle_v),sin(angle_v)); - geo_point_t W_dir(0,cos(angle_w),sin(angle_w)); - // deal the parallel case ... if (length_1 > 10*units::cm && length_2 >10*units::cm){ geo_point_t tempV1(p2.x() - p1.x(), p2.y() - p1.y(), p2.z() - p1.z()); geo_point_t tempV2(cluster2_ave_pos.x() - cluster1_ave_pos.x(), cluster2_ave_pos.y() - cluster1_ave_pos.y(), cluster2_ave_pos.z() - cluster1_ave_pos.z()); - double angle1 = tempV1.angle(drift_dir); - double angle4 = tempV2.angle(drift_dir); + double angle1 = tempV1.angle(drift_dir_abs); + double angle4 = tempV2.angle(drift_dir_abs); + // looks like a parallel case if ( (fabs(angle1-3.1415926/2.)<10/180.*3.1415926 && dis > 10*units::cm || @@ -121,23 +108,29 @@ bool WireCell::PointCloud::Facade::Clustering_2nd_round( geo_point_t dir1 = cluster1.vhough_transform(p1,60*units::cm); // cluster 1 direction based on hough geo_point_t dir2 = cluster2.vhough_transform(p2,60*units::cm); // cluster 2 direction based on hough - double angle5 = dir1.angle(drift_dir); - double angle6 = dir2.angle(drift_dir); + double angle5 = dir1.angle(drift_dir_abs); + double angle6 = dir2.angle(drift_dir_abs); if (fabs(angle5-3.1415926/2.)<5/180.*3.1415926 && fabs(angle6-3.1415926/2.)<20/180.*3.1415926 || fabs(angle5-3.1415926/2.)<20/180.*3.1415926 && fabs(angle6-3.1415926/2.)<5/180.*3.1415926){ flag_para = true; + + if (dis >= 3*length_1 && dis >= 3*length_2 && flag_para) return false; - double angle2 = tempV1.angle(U_dir); - double angle3 = tempV1.angle(V_dir); + double angle2 = tempV1.angle(wpid_U_dir.at(wpid_ps).first); + double angle3 = tempV1.angle(wpid_V_dir.at(wpid_ps).first); + + // look at parallel U if ((fabs(angle2-3.1415926/2.)<7.5/180.*3.1415926 || (fabs(angle2-3.1415926/2.)<15/180.*3.1415926)&&dis <6*units::cm) && (dis 100*units::cm)) && length_1 >15*units::cm && length_2 > 15*units::cm){ // flag_para_U = true; + + if ((length_1 < 25*units::cm || length_2 < 25*units::cm) && fabs(angle2-3.1415926/2.)<5.0/180.*3.1415926 && dis < 15* units::cm || dis < 3*units::cm){ // for short or small distance one return true; @@ -150,20 +143,17 @@ bool WireCell::PointCloud::Facade::Clustering_2nd_round( }else if (fabs(angle2-3.1415926/2.)<2.5/180.*3.1415926 && fabs(angle5-3.1415926/2.)<5/180.*3.1415926 && fabs(angle6-3.1415926/2.)<5/180.*3.141592 ){ // parallel case, but exclude both very long tracks if (length_1 < 60*units::cm || length_2 < 60*units::cm){ - if (WireCell::PointCloud::Facade::is_angle_consistent(dir1,tempV1,false,15,angle_u,angle_v,angle_w) && WireCell::PointCloud::Facade::is_angle_consistent(dir2,tempV1,true,15,angle_u,angle_v,angle_w)) - return true; - }else if (dis <5*units::cm){ - return true; - }else{ - double angle7 = (3.1415926-dir1.angle(dir2))/3.1415926*180.; - double angle8 = (3.1415926-dir1.angle(tempV1))/3.1415926*180.; // dir1 = -p1, tempV1 = p2 - p1 - double angle9 = dir2.angle(tempV1)/3.1415926*180.; // dir2 = -p2 - if (angle7 < 30 && angle8 < 30 && angle9 < 30) - return true; - if (WireCell::PointCloud::Facade::is_angle_consistent(dir1,tempV1,false,10,angle_u,angle_v,angle_w) && WireCell::PointCloud::Facade::is_angle_consistent(dir2,tempV1,true,10,angle_u,angle_v,angle_w)) - return true; - } - + if (WireCell::Clus::Facade::is_angle_consistent(dir1,tempV1,false,15, wpid_U_dir.at(wpid_ps).second, wpid_V_dir.at(wpid_ps).second, wpid_W_dir.at(wpid_ps).second) && WireCell::Clus::Facade::is_angle_consistent(dir2,tempV1,true,15,wpid_U_dir.at(wpid_ps).second, wpid_V_dir.at(wpid_ps).second, wpid_W_dir.at(wpid_ps).second)) return true; + }else if (dis <5*units::cm){ + return true; + }else{ + double angle7 = (3.1415926-dir1.angle(dir2))/3.1415926*180.; + double angle8 = (3.1415926-dir1.angle(tempV1))/3.1415926*180.; // dir1 = -p1, tempV1 = p2 - p1 + double angle9 = dir2.angle(tempV1)/3.1415926*180.; // dir2 = -p2 + if (angle7 < 30 && angle8 < 30 && angle9 < 30) return true; + if (WireCell::Clus::Facade::is_angle_consistent(dir1,tempV1,false,10,wpid_U_dir.at(wpid_ps).second, wpid_V_dir.at(wpid_ps).second, wpid_W_dir.at(wpid_ps).second) && WireCell::Clus::Facade::is_angle_consistent(dir2,tempV1,true,10,wpid_U_dir.at(wpid_ps).second, wpid_V_dir.at(wpid_ps).second, wpid_W_dir.at(wpid_ps).second)) + return true; + } }else{ // general case ... (not sure how useful though ...) double angle7 = (3.1415926-dir1.angle(dir2))/3.1415926*180.; @@ -172,9 +162,9 @@ bool WireCell::PointCloud::Facade::Clustering_2nd_round( if ((angle7 < 30 && angle8 < 30 && angle9 < 30 || fabs(angle5-3.1415926/2.)<5/180.*3.1415926 && fabs(angle6-3.1415926/2.)<5/180.*3.141592 && angle7 < 45 && angle8 < 45 && angle9 < 45) && dis < 20*units::cm) - return true; - if (WireCell::PointCloud::Facade::is_angle_consistent(dir1,tempV1,false,10,angle_u,angle_v,angle_w) && WireCell::PointCloud::Facade::is_angle_consistent(dir2,tempV1,true,10,angle_u,angle_v,angle_w)) - return true; + return true; + if (WireCell::Clus::Facade::is_angle_consistent(dir1,tempV1,false,10,wpid_U_dir.at(wpid_ps).second, wpid_V_dir.at(wpid_ps).second, wpid_W_dir.at(wpid_ps).second) && WireCell::Clus::Facade::is_angle_consistent(dir2,tempV1,true,10,wpid_U_dir.at(wpid_ps).second, wpid_V_dir.at(wpid_ps).second, wpid_W_dir.at(wpid_ps).second)) + return true; } } @@ -190,7 +180,7 @@ bool WireCell::PointCloud::Facade::Clustering_2nd_round( }else if (dis < 15*units::cm && fabs(angle3-3.1415926/2.)<2.5/180.*3.1415926 && (length_1 < 60*units::cm || length_2 < 60*units::cm) ){ return true; }else if (fabs(angle3-3.1415926/2.)<2.5/180.*3.1415926 && fabs(angle5-3.1415926/2.)<5/180.*3.1415926 && fabs(angle6-3.1415926/2.)<5/180.*3.141592){ - if (WireCell::PointCloud::Facade::is_angle_consistent(dir1,tempV1,false,15,angle_u,angle_v,angle_w) && WireCell::PointCloud::Facade::is_angle_consistent(dir2,tempV1,true,15,angle_u,angle_v,angle_w)) + if (WireCell::Clus::Facade::is_angle_consistent(dir1,tempV1,false,15,wpid_U_dir.at(wpid_ps).second, wpid_V_dir.at(wpid_ps).second, wpid_W_dir.at(wpid_ps).second) && WireCell::Clus::Facade::is_angle_consistent(dir2,tempV1,true,15,wpid_U_dir.at(wpid_ps).second, wpid_V_dir.at(wpid_ps).second, wpid_W_dir.at(wpid_ps).second)) return true; }else{ double angle7 = (3.1415926-dir1.angle(dir2))/3.1415926*180.; @@ -200,7 +190,7 @@ bool WireCell::PointCloud::Facade::Clustering_2nd_round( fabs(angle5-3.1415926/2.)<5/180.*3.1415926 && fabs(angle6-3.1415926/2.)<5/180.*3.141592 && angle7 < 60 && angle8 < 60 && angle9 < 60) return true; - if (WireCell::PointCloud::Facade::is_angle_consistent(dir1,tempV1,false,10,angle_u,angle_v,angle_w) && WireCell::PointCloud::Facade::is_angle_consistent(dir2,tempV1,true,10,angle_u,angle_v,angle_w)) + if (WireCell::Clus::Facade::is_angle_consistent(dir1,tempV1,false,10,wpid_U_dir.at(wpid_ps).second, wpid_V_dir.at(wpid_ps).second, wpid_W_dir.at(wpid_ps).second) && WireCell::Clus::Facade::is_angle_consistent(dir2,tempV1,true,10,wpid_U_dir.at(wpid_ps).second, wpid_V_dir.at(wpid_ps).second, wpid_W_dir.at(wpid_ps).second)) return true; } } @@ -212,17 +202,17 @@ bool WireCell::PointCloud::Facade::Clustering_2nd_round( { geo_point_t tempV1(0, p2.y() - p1.y(), p2.z() - p1.z()); geo_point_t tempV5; - double angle1 = tempV1.angle(U_dir); + double angle1 = tempV1.angle(wpid_U_dir.at(wpid_ps).first); tempV5.set(fabs(p2.x()-p1.x()),sqrt(pow(p2.y() - p1.y(),2)+pow(p2.z() - p1.z(),2))*sin(angle1),0); - angle1 = tempV5.angle(drift_dir); + angle1 = tempV5.angle(drift_dir_abs); - double angle2 = tempV1.angle(V_dir); + double angle2 = tempV1.angle(wpid_V_dir.at(wpid_ps).first); tempV5.set(fabs(p2.x()-p1.x()),sqrt(pow(p2.y() - p1.y(),2)+pow(p2.z() - p1.z(),2))*sin(angle2),0); - angle2 = tempV5.angle(drift_dir); + angle2 = tempV5.angle(drift_dir_abs); - double angle1p = tempV1.angle(W_dir); + double angle1p = tempV1.angle(wpid_W_dir.at(wpid_ps).first); tempV5.set(fabs(p2.x()-p1.x()),sqrt(pow(p2.y() - p1.y(),2)+pow(p2.z() - p1.z(),2))*sin(angle1p),0); - angle1p = tempV5.angle(drift_dir); + angle1p = tempV5.angle(drift_dir_abs); if (angle1<7.5/180.*3.1415926 || angle2<7.5/180.*3.1415926 || @@ -263,4 +253,114 @@ bool WireCell::PointCloud::Facade::Clustering_2nd_round( return false; } +// Expand this function to handle multiple APA/Faces ... +static void clustering_parallel_prolong( + Grouping& live_grouping, + + const IDetectorVolumes::pointer dv, // detector volumes + const Tree::Scope& scope, + const double length_cut // +) +{ + + // Get all the wire plane IDs from the grouping + const auto& wpids = live_grouping.wpids(); + + // Key: pair, Value: drift_dir, angle_u, angle_v, angle_w + std::map> wpid_params; + std::map > wpid_U_dir; + std::map > wpid_V_dir; + std::map > wpid_W_dir; + std::set apas; + // for (const auto& wpid : wpids) { + // int apa = wpid.apa(); + // int face = wpid.face(); + // apas.insert(apa); + + // // Create wpids for all three planes with this APA and face + // WirePlaneId wpid_u(kUlayer, face, apa); + // WirePlaneId wpid_v(kVlayer, face, apa); + // WirePlaneId wpid_w(kWlayer, face, apa); + + // // Get drift direction based on face orientation + // int face_dirx = dv->face_dirx(wpid_u); + // geo_point_t drift_dir(face_dirx, 0, 0); + + // // Get wire directions for all planes + // Vector wire_dir_u = dv->wire_direction(wpid_u); + // Vector wire_dir_v = dv->wire_direction(wpid_v); + // Vector wire_dir_w = dv->wire_direction(wpid_w); + + // // Calculate angles + // double angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + // double angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + // double angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + // wpid_params[wpid] = std::make_tuple(drift_dir, angle_u, angle_v, angle_w); + // wpid_U_dir[wpid] = std::make_pair(geo_point_t(0, cos(angle_u), sin(angle_u)), angle_u); + // wpid_V_dir[wpid] = std::make_pair(geo_point_t(0, cos(angle_v), sin(angle_v)), angle_v); + // wpid_W_dir[wpid] = std::make_pair(geo_point_t(0, cos(angle_w), sin(angle_w)), angle_w); + // } + compute_wireplane_params( + wpids, dv, wpid_params, wpid_U_dir, wpid_V_dir, wpid_W_dir, apas); + + + + // prepare graph ... + typedef cluster_connectivity_graph_t Graph; + Graph g; + std::unordered_map ilive2desc; // added live index to graph descriptor + std::map map_cluster_index; + const auto& live_clusters = live_grouping.children(); + + for (size_t ilive = 0; ilive < live_clusters.size(); ++ilive) { + auto& live = live_clusters[ilive]; + map_cluster_index[live] = ilive; + ilive2desc[ilive] = boost::add_vertex(ilive, g); + if (live->get_default_scope().hash() != scope.hash()) { + live->set_default_scope(scope); + // std::cout << "Test: Set default scope: " << pc_name << " " << coords[0] << " " << coords[1] << " " << coords[2] << " " << cluster->get_default_scope().hash() << " " << scope.hash() << std::endl; + } + } + + // original algorithm ... (establish edges ... ) + + + for (size_t i=0;i!=live_clusters.size();i++){ + auto cluster_1 = live_clusters.at(i); + if (!cluster_1->get_scope_filter(scope)) continue; + for (size_t j=i+1;jget_scope_filter(scope)) continue; + if (Clustering_2nd_round(*cluster_1,*cluster_2, cluster_1->get_length(), cluster_2->get_length(), wpid_U_dir, wpid_V_dir, wpid_W_dir, dv, length_cut)){ + boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], + ilive2desc[map_cluster_index[cluster_2]], g); + + + } + } + } + + // new function to merge clusters ... + merge_clusters(g, live_grouping); + + // { + // auto live_clusters = live_grouping.children(); // copy + // // Process each cluster + // for (size_t iclus = 0; iclus < live_clusters.size(); ++iclus) { + // Cluster* cluster = live_clusters.at(iclus); + // auto& scope = cluster->get_default_scope(); + // std::cout << "Test: " << iclus << " " << cluster->nchildren() << " " << scope.pcname << " " << scope.coords[0] << " " << scope.coords[1] << " " << scope.coords[2] << " " << cluster->get_scope_filter(scope)<< " " << cluster->get_pca().center) << std::endl; + // } + // } + + + + + + +} + + + #pragma GCC diagnostic pop diff --git a/clus/src/clustering_pointed.cxx b/clus/src/clustering_pointed.cxx new file mode 100644 index 000000000..e3e092737 --- /dev/null +++ b/clus/src/clustering_pointed.cxx @@ -0,0 +1,82 @@ +#include "WireCellClus/Facade_Grouping.h" +#include "WireCellClus/Facade_Cluster.h" +#include "WireCellClus/Facade_Blob.h" +#include "WireCellClus/IEnsembleVisitor.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" +#include "WireCellUtil/PointTree.h" + +#include + +class ClusteringPointed; +WIRECELL_FACTORY(ClusteringPointed, ClusteringPointed, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; +using namespace WireCell::PointCloud::Tree; + +class ClusteringPointed : public IConfigurable, public Clus::IEnsembleVisitor { +public: + ClusteringPointed() {}; + virtual ~ClusteringPointed() {}; + + void configure(const WireCell::Configuration& cfg) { + auto jgroupings = cfg["groupings"]; + if (! jgroupings.isArray()) { + return; + } + m_groupings.clear(); + for (const auto& one : jgroupings) { + m_groupings.push_back(one.asString()); + } + } + virtual Configuration default_configuration() const { + Configuration cfg; + cfg["groupings"][0] = "live"; + return cfg; + } + + void visit(Ensemble& ensemble) const { + + for (const auto& name : m_groupings) { + auto got = ensemble.with_name(name); + if (got.empty()) { continue; } + auto* grouping = got[0]; + + std::vector doomed_clusters; + + for (auto* cluster : grouping->children()) { + + std::vector doomed_blobs; + + for (auto* blob : cluster->children()) { + if (! blob->npoints()) { + doomed_blobs.push_back(blob); + } + } + + for (auto* dead : doomed_blobs) { + cluster->destroy_child(dead); + } + + if (! cluster->nchildren()) { + doomed_clusters.push_back(cluster); + } + } + + for (auto* dead : doomed_clusters) { + grouping->destroy_child(dead); + } + + } + } + +private: + + std::vector m_groupings = {"live"}; + +}; diff --git a/clus/src/clustering_protect_overclustering.cxx b/clus/src/clustering_protect_overclustering.cxx index a63ffcfdc..60550e340 100644 --- a/clus/src/clustering_protect_overclustering.cxx +++ b/clus/src/clustering_protect_overclustering.cxx @@ -1,50 +1,79 @@ -#include -#include -#include +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + +#include "WireCellUtil/Graph.h" + +class ClusteringProtectOverclustering; +WIRECELL_FACTORY(ClusteringProtectOverclustering, ClusteringProtectOverclustering, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) -// The original developers do not care. -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wparentheses" using namespace WireCell; using namespace WireCell::Clus; -using namespace WireCell::Aux; -using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; +using namespace WireCell::Clus::Graphs; +using namespace WireCell::Clus::Facade; using namespace WireCell::PointCloud::Tree; +static void clustering_protect_overclustering( + Grouping &live_grouping, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts, + const Tree::Scope& scope + ); + +class ClusteringProtectOverclustering : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedPCTS, private NeedScope { +public: + ClusteringProtectOverclustering() {} + virtual ~ClusteringProtectOverclustering() {} + + void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedPCTS::configure(config); + NeedScope::configure(config); + } -static -std::map Separate_overclustering(Cluster *cluster) + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + clustering_protect_overclustering(live, m_dv, m_pcts, m_scope); + } + +}; + +// The original developers do not care. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wparentheses" + +static std::map Separate_overclustering( + Cluster *cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts, + const Scope& scope) { // can follow ToyClustering_separate to add clusters ... auto* grouping = cluster->grouping(); - const auto &tp = grouping->get_params(); - // copy the create_graph from the PR3D Cluster ... + auto wpids = grouping->wpids(); + std::map map_wpid_nticks_live; + for (const auto& wpid : wpids) { + map_wpid_nticks_live[wpid] = dv->metadata(wpid)["nticks_live_slice"].asDouble(); + } - // PR3DClusterSelection new_clusters; // cluster->Create_point_cloud(); const int N = cluster->npoints(); - std::shared_ptr graph = std::make_shared(N); + auto graph = std::make_shared(N); // ToyPointCloud *point_cloud = cluster->get_point_cloud(); std::vector mcells = cluster->children(); // plane -> point -> wire index const auto& winds = cluster->wire_indices(); - const Cluster::time_blob_map_t &time_cells_set_map = cluster->time_blob_map(); - - // WCP::WCPointCloud &cloud = point_cloud->get_cloud(); - // WCP::WC2DPointCloud &cloud_u = point_cloud->get_cloud_u(); - // WCP::WC2DPointCloud &cloud_v = point_cloud->get_cloud_v(); - // WCP::WC2DPointCloud &cloud_w = point_cloud->get_cloud_w(); - - // blob -> wind -> points - // std::map>> map_mcell_uindex_wcps; - // std::map>> map_mcell_vindex_wcps; - // std::map>> map_mcell_windex_wcps; + std::map>> map_mcell_wind_wcps[3]; for (auto it = mcells.begin(); it != mcells.end(); it++) { @@ -55,16 +84,9 @@ std::map Separate_overclustering(Cluster *cluster) std::map> map_wind_wcps[3]; const std::vector &wcps = cluster->get_blob_indices(mcell); for (const int point_index : wcps) { - auto v = vertex(point_index, *graph); // retrieve vertex descriptor - (*graph)[v].index = point_index; - // if (map_windex_wcps.find(wcp.index_w) == map_windex_wcps.end()) { - // std::set wcps; - // wcps.insert(wcp.index); - // map_windex_wcps[wcp.index_w] = wcps; - // } - // else { - // map_windex_wcps[wcp.index_w].insert(wcp.index); - // } + // auto v = vertex(point_index, *graph); // retrieve vertex descriptor + // (*graph)[v].ident = point_index; + for (size_t plane_ind=0; plane_ind!=3; ++plane_ind) { const int wind = winds[plane_ind][point_index]; if (map_wind_wcps[plane_ind].find(wind) == map_wind_wcps[plane_ind].end()) { @@ -96,24 +118,7 @@ std::map Separate_overclustering(Cluster *cluster) int min_wire_interval = mcell->get_min_wire_interval(); std::map> *map_max_index_wcps; std::map> *map_min_index_wcps; - // if (mcell->get_max_wire_type() == WirePlaneType_t(0)) { - // map_max_index_wcps = &map_mcell_uindex_wcps[mcell]; - // } - // else if (mcell->get_max_wire_type() == WirePlaneType_t(1)) { - // map_max_index_wcps = &map_mcell_wind_wcps[plane_ind][mcell]; - // } - // else { - // map_max_index_wcps = &map_mcell_windex_wcps[mcell]; - // } - // if (mcell->get_min_wire_type() == WirePlaneType_t(0)) { - // map_min_index_wcps = &map_mcell_uindex_wcps[mcell]; - // } - // else if (mcell->get_min_wire_type() == WirePlaneType_t(1)) { - // map_min_index_wcps = &map_mcell_vindex_wcps[mcell]; - // } - // else { - // map_min_index_wcps = &map_mcell_windex_wcps[mcell]; - // } + const int max_wire_type = mcell->get_max_wire_type(); const int min_wire_type = mcell->get_min_wire_type(); map_max_index_wcps = &map_mcell_wind_wcps[max_wire_type][mcell]; @@ -122,39 +127,23 @@ std::map Separate_overclustering(Cluster *cluster) for (const int index1 : wcps) { // WCPointCloud::WCPoint &wcp1 = cloud.pts[*it1]; // int index1 = wcp1.index; + // std::cout << winds.size() << " " << max_wire_type << " " << min_wire_type << " " << winds[max_wire_type].size() << winds[min_wire_type].size() << std::endl; int index_max_wire = winds[max_wire_type][index1]; int index_min_wire = winds[min_wire_type][index1]; - // if (mcell->get_max_wire_type() == WirePlaneType_t(0)) { - // index_max_wire = wcp1.index_u; - // } - // else if (mcell->get_max_wire_type() == WirePlaneType_t(1)) { - // index_max_wire = wcp1.index_v; - // } - // else { - // index_max_wire = wcp1.index_w; - // } - // if (mcell->get_min_wire_type() == WirePlaneType_t(0)) { - // index_min_wire = wcp1.index_u; - // } - // else if (mcell->get_min_wire_type() == WirePlaneType_t(1)) { - // index_min_wire = wcp1.index_v; - // } - // else { - // index_min_wire = wcp1.index_w; - // } + std::vector *> max_wcps_set; std::vector *> min_wcps_set; // go through the first map and find the ones satisfying the condition for (auto it2 = map_max_index_wcps->begin(); it2 != map_max_index_wcps->end(); it2++) { - if (fabs(it2->first - index_max_wire) <= max_wire_interval) { + if (std::abs(it2->first - index_max_wire) <= max_wire_interval) { max_wcps_set.push_back(&(it2->second)); } } // go through the second map and find the ones satisfying the condition for (auto it2 = map_min_index_wcps->begin(); it2 != map_min_index_wcps->end(); it2++) { - if (fabs(it2->first - index_min_wire) <= min_wire_interval) { + if (std::abs(it2->first - index_min_wire) <= min_wire_interval) { min_wcps_set.push_back(&(it2->second)); } } @@ -169,9 +158,7 @@ std::map Separate_overclustering(Cluster *cluster) wcps_set2.insert((*it3)->begin(), (*it3)->end()); } - // std::cout << max_wcps_set.size() << " " << min_wcps_set.size() << std::endl; - // for (auto it2 = max_wcps_set.begin(); it2!=max_wcps_set.end(); it2++){ - // for (auto it3 = min_wcps_set.begin(); it3!=min_wcps_set.end(); it3++){ + { std::set common_set; set_intersection(wcps_set1.begin(), wcps_set1.end(), wcps_set2.begin(), wcps_set2.end(), @@ -182,22 +169,12 @@ std::map Separate_overclustering(Cluster *cluster) for (const int index2 : common_set) { // WCPointCloud::WCPoint &wcp2 = cloud.pts[*it4]; if (index2 != index1) { - // int index2 = wcp2.index; - // std::cout << index1 << " " << index2 << std::endl; - // add edge ... - //auto edge = add_edge(index1, index2, *graph); + const geo_point_t wcp1 = cluster->point3d(index1); const geo_point_t wcp2 = cluster->point3d(index2); double dis = sqrt(pow(wcp1.x() - wcp2.x(), 2) + pow(wcp1.y() - wcp2.y(), 2) + pow(wcp1.z() - wcp2.z(), 2)); - // if (edge.second) { - - // (*graph)[edge.first].dist = dis; - // num_edges++; - // // std::cout << wcp1.x << " " << wcp1.y << " " << wcp1.z << " " << wcp1.index_u << " " << - // // wcp1.index_v << " " << wcp1.index_w << " " << wcp2.index_u << " " << wcp2.index_v << " " - // // << wcp2.index_w << std::endl; - // } - auto edge = add_edge(index1, index2, WireCell::PointCloud::Facade::EdgeProp(dis),*graph); + + auto edge = add_edge(index1, index2, dis,*graph); if (edge.second) { num_edges++; } @@ -208,57 +185,80 @@ std::map Separate_overclustering(Cluster *cluster) } } + (void)num_edges; // std::cout << "Xin: " << num_edges << " " << N << std::endl; - std::vector time_slices; - for (auto it1 = time_cells_set_map.begin(); it1 != time_cells_set_map.end(); it1++) { - time_slices.push_back((*it1).first); + const auto &time_cells_set_map = cluster->time_blob_map(); + + // std::vector time_slices; + // for (auto it1 = time_cells_set_map.begin(); it1 != time_cells_set_map.end(); it1++) { + // time_slices.push_back((*it1).first); + // } + + std::map > > af_time_slices; // apa,face --> time slices + for (auto it = cluster->time_blob_map().begin(); it != cluster->time_blob_map().end(); it++) { + int apa = it->first; + for (auto it1 = it->second.begin(); it1 != it->second.end(); it1++) { + int face = it1->first; + std::vector time_slices_vec; + for (auto it2 = it1->second.begin(); it2 != it1->second.end(); it2++) { + time_slices_vec.push_back(it2->first); + } + af_time_slices[apa][face] = time_slices_vec; + } } - std::vector> connected_mcells; - for (size_t i = 0; i != time_slices.size(); i++) { - const std::set &mcells_set = time_cells_set_map.at(time_slices.at(i)); - - // create graph for points in mcell inside the same time slice - if (mcells_set.size() >= 2) { - for (auto it2 = mcells_set.begin(); it2 != mcells_set.end(); it2++) { - const Blob *mcell1 = *it2; - auto it2p = it2; - if (it2p != mcells_set.end()) { - it2p++; - for (auto it3 = it2p; it3 != mcells_set.end(); it3++) { - const Blob *mcell2 = *(it3); - // std::cout << mcell1 << " " << mcell2 << " " << mcell1->Overlap_fast(mcell2,2) << std::endl; - if (mcell1->overlap_fast(*mcell2, 2)) connected_mcells.push_back(std::make_pair(mcell1, mcell2)); + std::vector> connected_mcells; + for (auto it = af_time_slices.begin(); it != af_time_slices.end(); it++) { + int apa = it->first; + for (auto it1 = it->second.begin(); it1 != it->second.end(); it1++) { + int face = it1->first; + std::vector& time_slices = it1->second; + for (size_t i = 0; i != time_slices.size(); i++) { + const BlobSet &mcells_set = time_cells_set_map.at(apa).at(face).at(time_slices.at(i)); + + // create graph for points in mcell inside the same time slice + if (mcells_set.size() >= 2) { + for (auto it2 = mcells_set.begin(); it2 != mcells_set.end(); it2++) { + const Blob *mcell1 = *it2; + auto it2p = it2; + if (it2p != mcells_set.end()) { + it2p++; + for (auto it3 = it2p; it3 != mcells_set.end(); it3++) { + const Blob *mcell2 = *(it3); + // std::cout << mcell1 << " " << mcell2 << " " << mcell1->Overlap_fast(mcell2,2) << std::endl; + if (mcell1->overlap_fast(*mcell2, 2)) connected_mcells.push_back(std::make_pair(mcell1, mcell2)); + } + } } } - } - } - // create graph for points between connected mcells in adjacent time slices + 1, if not, + 2 - std::vector> vec_mcells_set; - if (i + 1 < time_slices.size()) { - if (time_slices.at(i + 1) - time_slices.at(i) == 1*tp.nticks_live_slice) { - vec_mcells_set.push_back(time_cells_set_map.at(time_slices.at(i + 1))); - if (i + 2 < time_slices.size()) - if (time_slices.at(i + 2) - time_slices.at(i) == 2*tp.nticks_live_slice) - vec_mcells_set.push_back(time_cells_set_map.at(time_slices.at(i + 2))); - } - else if (time_slices.at(i + 1) - time_slices.at(i) == 2*tp.nticks_live_slice) { - vec_mcells_set.push_back(time_cells_set_map.at(time_slices.at(i + 1))); - } - } - // bool flag = false; - for (size_t j = 0; j != vec_mcells_set.size(); j++) { - // if (flag) break; - std::set &next_mcells_set = vec_mcells_set.at(j); - for (auto it1 = mcells_set.begin(); it1 != mcells_set.end(); it1++) { - const Blob *mcell1 = (*it1); - for (auto it2 = next_mcells_set.begin(); it2 != next_mcells_set.end(); it2++) { - const Blob *mcell2 = (*it2); - if (mcell1->overlap_fast(*mcell2, 2)) { - // flag = true; // correct??? - connected_mcells.push_back(std::make_pair(mcell1, mcell2)); + // create graph for points between connected mcells in adjacent time slices + 1, if not, + 2 + std::vector vec_mcells_set; + if (i + 1 < time_slices.size()) { + if (time_slices.at(i + 1) - time_slices.at(i) == 1*grouping->get_nticks_per_slice().at(apa).at(face)) { + vec_mcells_set.push_back(time_cells_set_map.at(apa).at(face).at(time_slices.at(i + 1))); + if (i + 2 < time_slices.size()) + if (time_slices.at(i + 2) - time_slices.at(i) == 2*grouping->get_nticks_per_slice().at(apa).at(face)) + vec_mcells_set.push_back(time_cells_set_map.at(apa).at(face).at(time_slices.at(i + 2))); + } + else if (time_slices.at(i + 1) - time_slices.at(i) == 2*grouping->get_nticks_per_slice().at(apa).at(face)) { + vec_mcells_set.push_back(time_cells_set_map.at(apa).at(face).at(time_slices.at(i + 1))); + } + } + // bool flag = false; + for (size_t j = 0; j != vec_mcells_set.size(); j++) { + // if (flag) break; + BlobSet &next_mcells_set = vec_mcells_set.at(j); + for (auto it1 = mcells_set.begin(); it1 != mcells_set.end(); it1++) { + const Blob *mcell1 = (*it1); + for (auto it2 = next_mcells_set.begin(); it2 != next_mcells_set.end(); it2++) { + const Blob *mcell2 = (*it2); + if (mcell1->overlap_fast(*mcell2, 2)) { + // flag = true; // correct??? + connected_mcells.push_back(std::make_pair(mcell1, mcell2)); + } + } } } } @@ -283,24 +283,7 @@ std::map Separate_overclustering(Cluster *cluster) std::map> *map_max_index_wcps; std::map> *map_min_index_wcps; - // if (mcell1->get_max_wire_type() == WirePlaneType_t(0)) { - // map_max_index_wcps = &map_mcell_uindex_wcps[mcell2]; - // } - // else if (mcell1->get_max_wire_type() == WirePlaneType_t(1)) { - // map_max_index_wcps = &map_mcell_vindex_wcps[mcell2]; - // } - // else { - // map_max_index_wcps = &map_mcell_windex_wcps[mcell2]; - // } - // if (mcell1->get_min_wire_type() == WirePlaneType_t(0)) { - // map_min_index_wcps = &map_mcell_uindex_wcps[mcell2]; - // } - // else if (mcell1->get_min_wire_type() == WirePlaneType_t(1)) { - // map_min_index_wcps = &map_mcell_vindex_wcps[mcell2]; - // } - // else { - // map_min_index_wcps = &map_mcell_windex_wcps[mcell2]; - // } + map_max_index_wcps = &map_mcell_wind_wcps[mcell1->get_max_wire_type()][mcell2]; map_min_index_wcps = &map_mcell_wind_wcps[mcell1->get_min_wire_type()][mcell2]; @@ -309,35 +292,18 @@ std::map Separate_overclustering(Cluster *cluster) // int index1 = wcp1.index; int index_max_wire = winds[mcell1->get_max_wire_type()][index1]; int index_min_wire = winds[mcell1->get_min_wire_type()][index1]; - // if (mcell1->get_max_wire_type() == WirePlaneType_t(0)) { - // index_max_wire = wcp1.index_u; - // } - // else if (mcell1->get_max_wire_type() == WirePlaneType_t(1)) { - // index_max_wire = wcp1.index_v; - // } - // else { - // index_max_wire = wcp1.index_w; - // } - // if (mcell1->get_min_wire_type() == WirePlaneType_t(0)) { - // index_min_wire = wcp1.index_u; - // } - // else if (mcell1->get_min_wire_type() == WirePlaneType_t(1)) { - // index_min_wire = wcp1.index_v; - // } - // else { - // index_min_wire = wcp1.index_w; - // } + std::vector *> max_wcps_set; std::vector *> min_wcps_set; // go through the first map and find the ones satisfying the condition for (auto it2 = map_max_index_wcps->begin(); it2 != map_max_index_wcps->end(); it2++) { - if (fabs(it2->first - index_max_wire) <= max_wire_interval) { + if (std::abs(it2->first - index_max_wire) <= max_wire_interval) { max_wcps_set.push_back(&(it2->second)); } } // go through the second map and find the ones satisfying the condition for (auto it2 = map_min_index_wcps->begin(); it2 != map_min_index_wcps->end(); it2++) { - if (fabs(it2->first - index_min_wire) <= min_wire_interval) { + if (std::abs(it2->first - index_min_wire) <= min_wire_interval) { min_wcps_set.push_back(&(it2->second)); } } @@ -352,19 +318,12 @@ std::map Separate_overclustering(Cluster *cluster) wcps_set2.insert((*it3)->begin(), (*it3)->end()); } - // for (auto it2 = max_wcps_set.begin(); it2!=max_wcps_set.end(); it2++){ - // for (auto it3 = min_wcps_set.begin(); it3!=min_wcps_set.end(); it3++){ + { std::set common_set; set_intersection(wcps_set1.begin(), wcps_set1.end(), wcps_set2.begin(), wcps_set2.end(), std::inserter(common_set, common_set.begin())); - // std::cout << "S1: " << common_set.size() << std::endl; - // std::cout << common_set.size() << std::endl; - - // std::map > closest_index; - - // for (auto it4 = common_set.begin(); it4 != common_set.end(); it4++) { for (const int index2 : common_set) { // WCPointCloud::WCPoint &wcp2 = cloud.pts[*it4]; if (index2 != index1) { @@ -389,57 +348,19 @@ std::map Separate_overclustering(Cluster *cluster) } closest_index[key].erase(it5,closest_index[key].end()); } - //if (dis < closest_index[key].second || (std::abs(dis - closest_index[key].second) < 1e-10 && pind2 < closest_index[key].first)) closest_index[key] = std::make_pair(pind2, dis); } - - // if (closest_index.find(std::make_pair(index1, time2)) == - // closest_index.end()) { - // closest_index[std::make_pair(index1, time2)] = - // std::make_pair(index2, dis); - // } - // else { - // if (dis < closest_index[std::make_pair(index1, time2)].second) - // closest_index[std::make_pair(index1, time2)] = - // std::make_pair(index2, dis); - // } + } } - // std::cout << closest_index.size() << std::endl; - // for (auto it4 = closest_index.begin(); it4!=closest_index.end(); it4++){ - // int index2 = it4->second.first; - // double dis = it4->second.second; - // auto edge = add_edge(index1,index2,*graph); - // if (edge.second){ - // (*graph)[edge.first].dist = dis; - // num_edges ++; - // } - // } + } - //} } // test 1 against 2 ... max_wire_interval = mcell2->get_max_wire_interval(); min_wire_interval = mcell2->get_min_wire_interval(); - // if (mcell2->get_max_wire_type() == WirePlaneType_t(0)) { - // map_max_index_wcps = &map_mcell_uindex_wcps[mcell1]; - // } - // else if (mcell2->get_max_wire_type() == WirePlaneType_t(1)) { - // map_max_index_wcps = &map_mcell_vindex_wcps[mcell1]; - // } - // else { - // map_max_index_wcps = &map_mcell_windex_wcps[mcell1]; - // } - // if (mcell2->get_min_wire_type() == WirePlaneType_t(0)) { - // map_min_index_wcps = &map_mcell_uindex_wcps[mcell1]; - // } - // else if (mcell2->get_min_wire_type() == WirePlaneType_t(1)) { - // map_min_index_wcps = &map_mcell_vindex_wcps[mcell1]; - // } - // else { - // map_min_index_wcps = &map_mcell_windex_wcps[mcell1]; - // } + map_max_index_wcps = &map_mcell_wind_wcps[mcell2->get_max_wire_type()][mcell1]; map_min_index_wcps = &map_mcell_wind_wcps[mcell2->get_min_wire_type()][mcell1]; @@ -449,35 +370,18 @@ std::map Separate_overclustering(Cluster *cluster) // int index1 = wcp1.index; int index_max_wire = winds[mcell2->get_max_wire_type()][index1]; int index_min_wire = winds[mcell2->get_min_wire_type()][index1]; - // if (mcell2->get_max_wire_type() == WirePlaneType_t(0)) { - // index_max_wire = wcp1.index_u; - // } - // else if (mcell2->get_max_wire_type() == WirePlaneType_t(1)) { - // index_max_wire = wcp1.index_v; - // } - // else { - // index_max_wire = wcp1.index_w; - // } - // if (mcell2->get_min_wire_type() == WirePlaneType_t(0)) { - // index_min_wire = wcp1.index_u; - // } - // else if (mcell2->get_min_wire_type() == WirePlaneType_t(1)) { - // index_min_wire = wcp1.index_v; - // } - // else { - // index_min_wire = wcp1.index_w; - // } + std::vector *> max_wcps_set; std::vector *> min_wcps_set; // go through the first map and find the ones satisfying the condition for (auto it2 = map_max_index_wcps->begin(); it2 != map_max_index_wcps->end(); it2++) { - if (fabs(it2->first - index_max_wire) <= max_wire_interval) { + if (std::abs(it2->first - index_max_wire) <= max_wire_interval) { max_wcps_set.push_back(&(it2->second)); } } // go through the second map and find the ones satisfying the condition for (auto it2 = map_min_index_wcps->begin(); it2 != map_min_index_wcps->end(); it2++) { - if (fabs(it2->first - index_min_wire) <= min_wire_interval) { + if (std::abs(it2->first - index_min_wire) <= min_wire_interval) { min_wcps_set.push_back(&(it2->second)); } } @@ -531,44 +435,9 @@ std::map Separate_overclustering(Cluster *cluster) } //if (dis < closest_index[key].second || (std::abs(dis - closest_index[key].second) < 1e-10 && pind2 < closest_index[key].first)) closest_index[key] = std::make_pair(pind2, dis); } - - // if (closest_index.find(std::make_pair(index1, time2)) == - // closest_index.end()) { - // closest_index[std::make_pair(index1, time2)] = - // std::make_pair(index2, dis); - // } - // else { - // if (dis < closest_index[std::make_pair(index1, time2)].second) - // closest_index[std::make_pair(index1, time2)] = - // std::make_pair(index2, dis); - // } } } - - // std::cout << closest_index.size() << std::endl; - // for (auto it4 = closest_index.begin(); it4!=closest_index.end(); it4++){ - // int index2 = it4->second.first; - // double dis = it4->second.second; - // auto edge = add_edge(index1,index2,*graph); - // if (edge.second){ - // (*graph)[edge.first].dist = dis; - // num_edges ++; - // } - // } - - // for (auto it4 = common_set.begin(); it4!=common_set.end(); it4++){ - // WCPointCloud::WCPoint& wcp2 = cloud.pts[*it4]; - // if (wcp2.index != wcp1.index){ - // int index2 = wcp2.index; - // auto edge = add_edge(index1,index2,*graph); - // if (edge.second){ - // (*graph)[edge.first].dist = - // sqrt(pow(wcp1.x-wcp2.x,2)+pow(wcp1.y-wcp2.y,2)+pow(wcp1.z-wcp2.z,2)); num_edges ++; - // } - // } - // } } - // } } } @@ -577,7 +446,7 @@ std::map Separate_overclustering(Cluster *cluster) for (auto it5 = it4->second.begin(); it5!=it4->second.end(); it5++){ int index2 = (*it5).second; double dis = (*it5).first; - auto edge = add_edge(index1,index2,WireCell::PointCloud::Facade::EdgeProp(dis),*graph); + auto edge = add_edge(index1,index2,dis,*graph); if (edge.second){ // (*graph)[edge.first].dist = dis; num_edges ++; @@ -588,17 +457,7 @@ std::map Separate_overclustering(Cluster *cluster) break; } - // int index2 = it4->second.first; - // double dis = it4->second.second; - // // auto edge = add_edge(index1, index2, *graph); - // // if (edge.second) { - // // (*graph)[edge.first].dist = dis; - // // num_edges++; - // // } - // auto edge = add_edge(index1, index2, WireCell::PointCloud::Facade::EdgeProp(dis),*graph); - // if (edge.second) { - // num_edges++; - // } + } // end of copying ... @@ -646,27 +505,6 @@ std::map Separate_overclustering(Cluster *cluster) pt_clouds_global_indices.push_back(global_indices); } - // for (int j = 0; j != num; j++) { - // pt_clouds.push_back(std::make_shared()); - // } - - // // std::vector::size_type i; - // // for (i = 0; i != component.size(); ++i) { - // // pt_clouds.at(component[i])->AddPoint(cloud.pts[i], cloud_u.pts[i], cloud_v.pts[i], cloud_w.pts[i]); - // // // std::cout << "Vertex " << i << " " << cloud.pts[i].x << " " << cloud.pts[i].y << " " << cloud.pts[i].z - // // // << " " << cloud.pts[i].index_u << " " << cloud.pts[i].index_v << " " << cloud.pts[i].index_w << " " << - // // // cloud.pts[i].mcell << " " << cloud.pts[i].mcell->GetTimeSlice() << " is in component " << component[i] - // // // << std::endl; - // // } - // // for (int j = 0; j != num; j++) { - // // pt_clouds.at(j)->build_kdtree_index(); - // // } - // std::vector::size_type i; - // for (i = 0; i != component.size(); ++i) { - // geo_point_t pt = cluster->point3d(i); - // pt_clouds.at(component[i])->add({pt.x(), pt.y(), pt.z()}); - // pt_clouds_global_indices.at(component[i]).push_back(i); - // } std::vector>> index_index_dis( num, std::vector>(num)); @@ -736,7 +574,9 @@ std::map Separate_overclustering(Cluster *cluster) // Now check the path ... { geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); + auto wpid_p1 = cluster->wire_plane_id(pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis[j][k]))); geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); + auto wpid_p2 = cluster->wire_plane_id(pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis[j][k]))); double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); double step_dis = 1.0 * units::cm; @@ -747,9 +587,16 @@ std::map Separate_overclustering(Cluster *cluster) test_p.set(p1.x() + (p2.x() - p1.x()) / num_steps * (ii + 1), p1.y() + (p2.y() - p1.y()) / num_steps * (ii + 1), p1.z() + (p2.z() - p1.z()) / num_steps * (ii + 1)); - if (true) { - /// FIXME: how to add face information? - const bool good_point = cluster->grouping()->is_good_point(test_p, tp.face); + auto test_wpid = get_wireplaneid(test_p, wpid_p1, wpid_p2, dv); + if (test_wpid.apa()!=-1){ + geo_point_t test_p_raw = test_p; + // std::cout <<"Test: " << cluster->get_flash().time() << std::endl; + if (cluster->get_default_scope().hash() != cluster->get_raw_scope().hash()){ + const auto transform = pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + test_p_raw = transform->backward(test_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + } + const bool good_point = cluster->grouping()->is_good_point(test_p_raw, test_wpid.apa(), test_wpid.face()); if (!good_point) num_bad++; } } @@ -762,7 +609,9 @@ std::map Separate_overclustering(Cluster *cluster) // Now check the path ... if (std::get<0>(index_index_dis_dir1[j][k]) >= 0) { geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_dir1[j][k])); + auto wpid_p1 = cluster->wire_plane_id(pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir1[j][k]))); geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_dir1[j][k])); + auto wpid_p2 = cluster->wire_plane_id(pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir1[j][k]))); double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); double step_dis = 1.0 * units::cm; @@ -773,10 +622,16 @@ std::map Separate_overclustering(Cluster *cluster) test_p.set(p1.x() + (p2.x() - p1.x()) / num_steps * (ii + 1), p1.y() + (p2.y() - p1.y()) / num_steps * (ii + 1), p1.z() + (p2.z() - p1.z()) / num_steps * (ii + 1)); - // if (!ct_point_cloud.is_good_point(test_p)) num_bad++; - if (true) { - /// FIXME: how to add face information? - const bool good_point = cluster->grouping()->is_good_point(test_p, tp.face); + auto test_wpid = get_wireplaneid(test_p, wpid_p1, wpid_p2, dv); + if (test_wpid.apa()!=-1){ + geo_point_t test_p_raw = test_p; + // std::cout <<"Test: " << cluster->get_flash().time() << std::endl; + if (cluster->get_default_scope().hash() != cluster->get_raw_scope().hash()){ + const auto transform = pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + test_p_raw = transform->backward(test_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + } + const bool good_point = cluster->grouping()->is_good_point(test_p_raw, test_wpid.apa(), test_wpid.face()); if (!good_point) num_bad++; } } @@ -789,7 +644,9 @@ std::map Separate_overclustering(Cluster *cluster) // Now check the path ... if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_dir2[j][k])); + auto wpid_p1 = cluster->wire_plane_id(pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir2[j][k]))); geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_dir2[j][k])); + auto wpid_p2 = cluster->wire_plane_id(pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir2[j][k]))); double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); double step_dis = 1.0 * units::cm; @@ -800,10 +657,16 @@ std::map Separate_overclustering(Cluster *cluster) test_p.set(p1.x() + (p2.x() - p1.x()) / num_steps * (ii + 1), p1.y() + (p2.y() - p1.y()) / num_steps * (ii + 1), p1.z() + (p2.z() - p1.z()) / num_steps * (ii + 1)); - // if (!ct_point_cloud.is_good_point(test_p)) num_bad++; - if (true) { - /// FIXME: how to add face information? - const bool good_point = cluster->grouping()->is_good_point(test_p, tp.face); + auto test_wpid = get_wireplaneid(test_p, wpid_p1, wpid_p2, dv); + if (test_wpid.apa()!=-1){ + geo_point_t test_p_raw = test_p; + // std::cout <<"Test: " << cluster->get_flash().time() << std::endl; + if (cluster->get_default_scope().hash() != cluster->get_raw_scope().hash()){ + const auto transform = pcts->pc_transform(cluster->get_scope_transform(cluster->get_default_scope())); + double cluster_t0 = cluster->get_cluster_t0(); + test_p_raw = transform->backward(test_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + } + const bool good_point = cluster->grouping()->is_good_point(test_p_raw, test_wpid.apa(), test_wpid.face()); if (!good_point) num_bad++; } } @@ -818,9 +681,7 @@ std::map Separate_overclustering(Cluster *cluster) // deal with MST { const int N = num; - boost::adjacency_list> - temp_graph(N); + Weighted::Graph temp_graph(N); for (int j = 0; j != num; j++) { for (int k = j + 1; k != num; k++) { @@ -834,47 +695,12 @@ std::map Separate_overclustering(Cluster *cluster) // Process MST process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_mst); - // { - // std::vector possible_root_vertex; - // std::vector component(num_vertices(temp_graph)); - // const int num1 = connected_components(temp_graph, &component[0]); - // possible_root_vertex.resize(num1); - // std::vector::size_type i; - // for (i = 0; i != component.size(); ++i) { - // possible_root_vertex.at(component[i]) = i; - // } - - // for (size_t i = 0; i != possible_root_vertex.size(); i++) { - // std::vector::vertex_descriptor> predecessors( - // num_vertices(temp_graph)); - - // prim_minimum_spanning_tree(temp_graph, &predecessors[0], - // boost::root_vertex(possible_root_vertex.at(i))); - - // for (size_t j = 0; j != predecessors.size(); ++j) { - // if (predecessors[j] != j) { - // if (j < predecessors[j]) { - // index_index_dis_mst[j][predecessors[j]] = index_index_dis[j][predecessors[j]]; - // } - // else { - // index_index_dis_mst[predecessors[j]][j] = index_index_dis[predecessors[j]][j]; - // } - // // std::cout << j << " " << predecessors[j] << " " << std::endl; - // } - // else { - // // std::cout << j << " " << std::endl; - // } - // } - // } - // } } // deal with MST for directionality { const int N = num; - boost::adjacency_list> - temp_graph(N); + Weighted::Graph temp_graph(N); for (int j = 0; j != num; j++) { for (int k = j + 1; k != num; k++) { @@ -889,37 +715,7 @@ std::map Separate_overclustering(Cluster *cluster) } process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_dir_mst); - // { - // std::vector possible_root_vertex; - // std::vector component(num_vertices(temp_graph)); - // const int num1 = connected_components(temp_graph, &component[0]); - // possible_root_vertex.resize(num1); - // std::vector::size_type i; - // for (i = 0; i != component.size(); ++i) { - // possible_root_vertex.at(component[i]) = i; - // } - - // for (size_t i = 0; i != possible_root_vertex.size(); i++) { - // std::vector::vertex_descriptor> predecessors( - // num_vertices(temp_graph)); - // prim_minimum_spanning_tree(temp_graph, &predecessors[0], - // boost::root_vertex(possible_root_vertex.at(i))); - // for (size_t j = 0; j != predecessors.size(); ++j) { - // if (predecessors[j] != j) { - // if (j < predecessors[j]) { - // index_index_dis_dir_mst[j][predecessors[j]] = index_index_dis[j][predecessors[j]]; - // } - // else { - // index_index_dis_dir_mst[predecessors[j]][j] = index_index_dis[predecessors[j]][j]; - // } - // // std::cout << j << " " << predecessors[j] << " " << std::endl; - // } - // else { - // // std::cout << j << " " << std::endl; - // } - // } - // } - // } + } for (int j = 0; j != num; j++) { @@ -944,7 +740,7 @@ std::map Separate_overclustering(Cluster *cluster) } // } - /*auto edge =*/ add_edge(gind1, gind2, WireCell::PointCloud::Facade::EdgeProp(dis),*graph); + /*auto edge =*/ add_edge(gind1, gind2, dis,*graph); } if (std::get<0>(index_index_dis_dir_mst[j][k]) >= 0) { @@ -962,7 +758,7 @@ std::map Separate_overclustering(Cluster *cluster) dis = std::get<2>(index_index_dis_dir1[j][k]); } // } - /*auto edge =*/ add_edge(gind1, gind2, WireCell::PointCloud::Facade::EdgeProp(dis),*graph); + /*auto edge =*/ add_edge(gind1, gind2, dis,*graph); } if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { // auto edge = add_edge(std::get<0>(index_index_dis_dir2[j][k]), @@ -978,14 +774,13 @@ std::map Separate_overclustering(Cluster *cluster) dis = std::get<2>(index_index_dis_dir2[j][k]); } // } - /*auto edge =*/ add_edge(gind1, gind2, WireCell::PointCloud::Facade::EdgeProp(dis), *graph); + /*auto edge =*/ add_edge(gind1, gind2, dis, *graph); } } // end check ... } } - // std::cout << "Check: " << cluster->nchildren() << " " << num << std::endl; // study the independent component again ... { // point -> component @@ -994,58 +789,55 @@ std::map Separate_overclustering(Cluster *cluster) if (num1 > 1) { - // std::cout << "Check1: " << cluster->nchildren() << " " << num << " " << num1 << std::endl; - - // form new clusters ... - // WCP logic ... - // std::vector> vec_mcells_set; - // for (int ii = 0; ii != num1; ii++) { - // std::set mcells_set; - // vec_mcells_set.push_back(mcells_set); - // } - // std::vector::size_type i; - // for (i = 0; i != component1.size(); ++i) { - // vec_mcells_set.at(component1[i]).insert(cloud.pts[i].mcell); - // } - // for (int ii = 0; ii != num1; ii++) { - // Cluster *cluster1 = new Cluster(1); - // for (auto it = vec_mcells_set.at(ii).begin(); it != vec_mcells_set.at(ii).end(); it++) { - // Blob *mcell = (*it); - // cluster1->AddCell(mcell, mcell->GetTimeSlice()); - // } - // new_clusters.push_back(cluster1); - // } + std::vector b2groupid(cluster->nchildren(), -1); std::vector::size_type i; for (i = 0; i != component1.size(); ++i) { const int bind = cluster->kd3d().major_index(i); b2groupid.at(bind) = component1[i]; } - return grouping->separate(cluster, b2groupid, true); + auto scope_name = cluster->get_scope_transform(scope); + auto id2clusters = grouping->separate(cluster, b2groupid, true); + return id2clusters; } } - // for (int i = 0; i != num; i++) { - // delete pt_clouds.at(i); - // } - - // } // if (num > 1) - - // delete graph; - // return new_clusters; + return {}; } -void WireCell::PointCloud::Facade::clustering_protect_overclustering(Grouping& live_grouping) +static void clustering_protect_overclustering( + Grouping &live_grouping, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts, + const Tree::Scope& scope) { std::vector live_clusters = live_grouping.children(); // copy - // sort the clusters by length using a lambda function - // std::sort(live_clusters.begin(), live_clusters.end(), [](const Cluster *cluster1, const Cluster *cluster2) { - // return cluster1->get_length() > cluster2->get_length(); - // }); + for (size_t i = 0; i != live_clusters.size(); i++) { Cluster *cluster = live_clusters.at(i); + if (!cluster->get_scope_filter(scope)) continue; + if (cluster->get_default_scope().hash() != scope.hash()) { + cluster->set_default_scope(scope); + // std::cout << "Test: Set default scope: " << pc_name << " " << coords[0] << " " << coords[1] << " " << coords[2] << " " << cluster->get_default_scope().hash() << " " << scope.hash() << std::endl; + } // std::cout << "Cluster: " << i << " " << cluster->npoints() << std::endl; - Separate_overclustering(cluster); + Separate_overclustering(cluster, dv, pcts, scope); } + + // { + // auto live_clusters = live_grouping.children(); // copy + // // Process each cluster + // for (size_t iclus = 0; iclus < live_clusters.size(); ++iclus) { + // Cluster* cluster = live_clusters.at(iclus); + // auto& scope = cluster->get_default_scope(); + // std::cout << "Test: " << iclus << " " << cluster->nchildren() << " " << scope.pcname << " " << scope.coords[0] << " " << scope.coords[1] << " " << scope.coords[2] << " " << cluster->get_scope_filter(scope)<< " " << cluster->get_pca().center) << std::endl; + // } + // } + + + + + + } diff --git a/clus/src/clustering_regular.cxx b/clus/src/clustering_regular.cxx index d8a152d33..b70693fdd 100644 --- a/clus/src/clustering_regular.cxx +++ b/clus/src/clustering_regular.cxx @@ -1,75 +1,66 @@ -#include +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wparentheses" +#include "WireCellIface/IConfigurable.h" -using namespace WireCell; -using namespace WireCell::Clus; -using namespace WireCell::Aux; -using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; -using namespace WireCell::PointCloud::Tree; -void WireCell::PointCloud::Facade::clustering_regular( - Grouping& live_grouping, - cluster_set_t& cluster_connected_dead, // in/out - const double length_cut, // - bool flag_enable_extend // -) -{ - double internal_length_cut = 10 *units::cm; - if (flag_enable_extend) { - internal_length_cut = 15 *units::cm; - } +#include "WireCellUtil/NamedFactory.h" - // prepare graph ... - typedef cluster_connectivity_graph_t Graph; - Graph g; - std::unordered_map ilive2desc; // added live index to graph descriptor - std::map map_cluster_index; - const auto& live_clusters = live_grouping.children(); - +class ClusteringRegular; +WIRECELL_FACTORY(ClusteringRegular, ClusteringRegular, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) - for (size_t ilive = 0; ilive < live_clusters.size(); ++ilive) { - const auto& live = live_clusters.at(ilive); - map_cluster_index[live] = ilive; - ilive2desc[ilive] = boost::add_vertex(ilive, g); - } - // original algorithm ... (establish edges ... ) +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; +static void clustering_regular(Grouping& live_clusters, - for (size_t i=0;i!=live_clusters.size();i++){ - auto cluster_1 = live_clusters.at(i); - if (cluster_1->get_length() < internal_length_cut) continue; - for (size_t j=i+1;jget_length() < internal_length_cut) continue; + IDetectorVolumes::pointer dv, + const Tree::Scope& scope, + const double length_cut = 45*units::cm, + bool flag_enable_extend = true); - if (Clustering_1st_round(*cluster_1,*cluster_2, cluster_1->get_length(), cluster_2->get_length(), length_cut, flag_enable_extend)){ +class ClusteringRegular : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedScope { + double m_length_cut{45*units::cm}; + bool m_flag_enable_extend{true}; +public: + ClusteringRegular() {} + virtual ~ClusteringRegular() {} - // debug ... - //std::cout << cluster_1->get_length()/units::cm << " " << cluster_2->get_length()/units::cm << std::endl; + void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedScope::configure(config); - // to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); - boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], - ilive2desc[map_cluster_index[cluster_2]], g); - } + m_length_cut = get(config, "length_cut", 45*units::cm); + m_flag_enable_extend = get(config, "flag_enable_extend", true); + } + virtual Configuration default_configuration() const { + Configuration cfg; + return cfg; } - } - // new function to merge clusters ... - merge_clusters(g, live_grouping, cluster_connected_dead); -} + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + clustering_regular(live, m_dv, m_scope, m_length_cut, m_flag_enable_extend); + } +}; + -bool WireCell::PointCloud::Facade::Clustering_1st_round( +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wparentheses" + +static bool Clustering_1st_round( const Cluster& cluster1, const Cluster& cluster2, double length_1, double length_2, + const std::map& wpid_U_dir, const std::map& wpid_V_dir, const std::map& wpid_W_dir, + const IDetectorVolumes::pointer dv, double length_cut, bool flag_enable_extend) { - const auto [angle_u,angle_v,angle_w] = cluster1.grouping()->wire_angles(); geo_point_t p1; geo_point_t p2; @@ -79,19 +70,13 @@ bool WireCell::PointCloud::Facade::Clustering_1st_round( // if (fabs(length_1 + length_2 - 12.9601*units::cm - 83.8829*units::cm) < 0.3*units::cm // && fabs(fabs(length_1-length_2) - fabs(12.9601*units::cm - 83.8829*units::cm)) < 0.3*units::cm) flag_print =true; - double dis = WireCell::PointCloud::Facade::Find_Closest_Points(cluster1, cluster2, + double dis = WireCell::Clus::Facade::Find_Closest_Points(cluster1, cluster2, length_1, length_2, length_cut, p1,p2); - // if (flag_print) { - // std::cout << length_1/units::cm << " " << length_2/units::cm << " " << cluster1.npoints() << - // " " << cluster2.npoints() << " " << p1 << " " << p2 << " " << dis/units::cm << std::endl; - // std::cout << (*cluster1.get_first_blob()) << " " << cluster1.get_first_blob()->center_pos() << " " << (*cluster1.get_last_blob()) << " " << cluster1.get_last_blob()->center_pos() << std::endl; - // auto points = cluster1.get_first_blob()->points(); - // for (auto it1 = points.begin();it1!=points.end();it1++){ - // std::cout << (*it1).x() << " " << (*it1).y() << " " << (*it1).z() << std::endl; - // } - // } + auto wpid_p1 = cluster1.wpid(p1); + auto wpid_p2 = cluster2.wpid(p2); + auto wpid_ps = get_wireplaneid(p1, wpid_p1, p2, wpid_p2, dv); if (dis < length_cut){ @@ -106,18 +91,15 @@ bool WireCell::PointCloud::Facade::Clustering_1st_round( bool flag_force_extend = false; - geo_point_t drift_dir(1, 0, 0); // assuming the drift direction is along X ... + geo_point_t drift_dir_abs(1, 0, 0); // assuming the drift direction is along X ... - // pronlonged case for U 3 and V 4 ... - geo_point_t U_dir(0,cos(angle_u),sin(angle_u)); - geo_point_t V_dir(0,cos(angle_v),sin(angle_v)); - geo_point_t W_dir(0,cos(angle_w),sin(angle_w)); - // calculate average distance ... geo_point_t cluster1_ave_pos = cluster1.calc_ave_pos(p1,5*units::cm); + auto wpid_ave_p1 = cluster1.wpid(cluster1_ave_pos); geo_point_t cluster2_ave_pos = cluster2.calc_ave_pos(p2,5*units::cm); + auto wpid_ave_p2 = cluster2.wpid(cluster2_ave_pos); + auto wpid_ave_ps = get_wireplaneid(cluster1_ave_pos, wpid_ave_p1, cluster2_ave_pos, wpid_ave_p2, dv); - geo_point_t dir2_1(p2.x() - p1.x()+1e-9, p2.y() - p1.y()+1e-9, p2.z() - p1.z()+1e-9); // 2-1 geo_point_t dir2(cluster2_ave_pos.x() - cluster1_ave_pos.x()+1e-9, cluster2_ave_pos.y() - cluster1_ave_pos.y()+1e-9, @@ -125,36 +107,35 @@ bool WireCell::PointCloud::Facade::Clustering_1st_round( dir2_1 = dir2_1/dir2_1.magnitude(); dir2 = dir2/dir2.magnitude(); - // parallle case - double angle1 = dir2_1.angle(drift_dir); - double angle2 = dir2.angle(drift_dir); + double angle1 = dir2_1.angle(drift_dir_abs); + double angle2 = dir2.angle(drift_dir_abs); double angle3{0}, angle4{0}; double angle3_1{0}, angle4_1{0}; if ((fabs(angle1-3.1415926/2.)<7.5/180.*3.1415926 || - fabs(angle2-3.1415926/2.)<7.5/180.*3.1415926) && dis < 45*units::cm && - length_1 > 12*units::cm && length_2 > 12*units::cm){ + fabs(angle2-3.1415926/2.)<7.5/180.*3.1415926) && dis < 45*units::cm && + length_1 > 12*units::cm && length_2 > 12*units::cm){ flag_para = true; if (dis >=3*length_1 && dis >= 3*length_2 && flag_para) return false; - angle3 = dir2_1.angle(U_dir); - angle4 = dir2_1.angle(V_dir); + angle3 = dir2_1.angle(wpid_U_dir.at(wpid_ps)); // betwen points p ... + angle4 = dir2_1.angle(wpid_V_dir.at(wpid_ps)); - angle3_1 = dir2.angle(U_dir); - angle4_1 = dir2.angle(V_dir); + angle3_1 = dir2.angle(wpid_U_dir.at(wpid_ave_ps)); // between ave points ... + angle4_1 = dir2.angle(wpid_V_dir.at(wpid_ave_ps)); if (fabs(angle3-3.1415926/2.)<7.5/180.*3.1415926 || fabs(angle3_1-3.1415926/2.)<7.5/180.*3.1415926 || - ((fabs(angle3-3.1415926/2.)<15/180.*3.1415926 || fabs(angle3_1-3.1415926/2.)<15/180.*3.1415926) - &&dis < 6*units::cm)) - flag_para_U = true; + ((fabs(angle3-3.1415926/2.)<15/180.*3.1415926 || fabs(angle3_1-3.1415926/2.)<15/180.*3.1415926) + &&dis < 6*units::cm)) + flag_para_U = true; if (fabs(angle4-3.1415926/2.)<7.5/180.*3.1415926 || fabs(angle4_1-3.1415926/2.)<7.5/180.*3.1415926 || - ((fabs(angle4-3.1415926/2.)<15/180.*3.1415926 || fabs(angle4_1-3.1415926/2.)<15/180.*3.1415926)&& - dis < 6*units::cm)) - flag_para_V = true; + ((fabs(angle4-3.1415926/2.)<15/180.*3.1415926 || fabs(angle4_1-3.1415926/2.)<15/180.*3.1415926)&& + dis < 6*units::cm)) + flag_para_V = true; } if (!flag_para){ @@ -164,29 +145,29 @@ bool WireCell::PointCloud::Facade::Clustering_1st_round( cluster2_ave_pos.z() - cluster1_ave_pos.z()); geo_point_t tempV5; - double angle6 = tempV3.angle(U_dir); + double angle6 = tempV3.angle(wpid_U_dir.at(wpid_ps)); tempV5.set(fabs(p2.x()-p1.x()),sqrt(pow(p2.y() - p1.y(),2)+pow(p2.z() - p1.z(),2))*sin(angle6),0); - angle6 = tempV5.angle(drift_dir); + angle6 = tempV5.angle(drift_dir_abs); - double angle7 = tempV3.angle(V_dir); + double angle7 = tempV3.angle(wpid_V_dir.at(wpid_ps)); tempV5.set(fabs(p2.x()-p1.x()),sqrt(pow(p2.y() - p1.y(),2)+pow(p2.z() - p1.z(),2))*sin(angle7),0); - angle7 = tempV5.angle(drift_dir); + angle7 = tempV5.angle(drift_dir_abs); - double angle8 = tempV3.angle(W_dir); + double angle8 = tempV3.angle(wpid_W_dir.at(wpid_ps)); tempV5.set(fabs(p2.x()-p1.x()),sqrt(pow(p2.y() - p1.y(),2)+pow(p2.z() - p1.z(),2))*sin(angle8),0); - angle8 = tempV5.angle(drift_dir); + angle8 = tempV5.angle(drift_dir_abs); - double angle6_1 = tempV4.angle(U_dir); + double angle6_1 = tempV4.angle(wpid_U_dir.at(wpid_ave_ps)); tempV5.set(fabs(cluster2_ave_pos.x()-cluster1_ave_pos.x()),sqrt(pow(cluster2_ave_pos.y()-cluster1_ave_pos.y(),2)+pow(cluster2_ave_pos.z()-cluster1_ave_pos.z(),2))*sin(angle6_1),0); - angle6_1 = tempV5.angle(drift_dir); + angle6_1 = tempV5.angle(drift_dir_abs); - double angle7_1 = tempV4.angle(V_dir); + double angle7_1 = tempV4.angle(wpid_V_dir.at(wpid_ave_ps)); tempV5.set(fabs(cluster2_ave_pos.x()-cluster1_ave_pos.x()),sqrt(pow(cluster2_ave_pos.y()-cluster1_ave_pos.y(),2)+pow(cluster2_ave_pos.z()-cluster1_ave_pos.z(),2))*sin(angle7_1),0); - angle7_1 = tempV5.angle(drift_dir); + angle7_1 = tempV5.angle(drift_dir_abs); - double angle8_1 = tempV4.angle(W_dir); + double angle8_1 = tempV4.angle(wpid_W_dir.at(wpid_ave_ps)); tempV5.set(fabs(cluster2_ave_pos.x()-cluster1_ave_pos.x()),sqrt(pow(cluster2_ave_pos.y()-cluster1_ave_pos.y(),2)+pow(cluster2_ave_pos.z()-cluster1_ave_pos.z(),2))*sin(angle8_1),0); - angle8_1 = tempV5.angle(drift_dir); + angle8_1 = tempV5.angle(drift_dir_abs); if (angle6<15/180.*3.1415926 || angle6_1<15/180.*3.1415926 ) @@ -206,7 +187,7 @@ bool WireCell::PointCloud::Facade::Clustering_1st_round( flag_regular = true; }else if ( length_1 > 30*units::cm && length_2 > 30*units::cm) { if (dis <= 25*units::cm) - flag_regular = true; + flag_regular = true; } if ((flag_para_U || flag_para_V ) || @@ -298,15 +279,21 @@ bool WireCell::PointCloud::Facade::Clustering_1st_round( if (flag_para && (flag_para_U || flag_para_V || flag_regular)){ + + // add a special fix for very long tracks PDHD + if (flag_para && (wpid_p1.apa() != wpid_p2.apa() || wpid_p1.face() != wpid_p2.face())){ + if (length_1 > 100*units::cm && length_2 > 100*units::cm && dis < 5*units::cm ) return true; + } + // if (length_1 > 250*units::cm && length_2 > 250*units::cm) std::cout << "Test: " << length_1/units::cm << " " << length_2/units::cm << " " << flag_para << " " << dis/units::cm << " " << length_cut/units::cm << std::endl; - double dangle1 = (dir1.angle(drift_dir)-3.1415926/2.)/3.1415926*180.; - double dangle1_1 = (dir1_1.angle(drift_dir)-3.1415926/2.)/3.1415926*180.; + double dangle1 = (dir1.angle(drift_dir_abs)-3.1415926/2.)/3.1415926*180.; + double dangle1_1 = (dir1_1.angle(drift_dir_abs)-3.1415926/2.)/3.1415926*180.; - double dangle2 = (dir2.angle(drift_dir)-3.1415926/2.)/3.1415926*180.; - double dangle2_1 = (dir2_1.angle(drift_dir)-3.1415926/2.)/3.1415926*180.; + double dangle2 = (dir2.angle(drift_dir_abs)-3.1415926/2.)/3.1415926*180.; + double dangle2_1 = (dir2_1.angle(drift_dir_abs)-3.1415926/2.)/3.1415926*180.; - double dangle3 = (dir3.angle(drift_dir)-3.1415926/2.)/3.1415926*180.; - double dangle3_1 = (dir3_1.angle(drift_dir)-3.1415926/2.)/3.1415926*180.; + double dangle3 = (dir3.angle(drift_dir_abs)-3.1415926/2.)/3.1415926*180.; + double dangle3_1 = (dir3_1.angle(drift_dir_abs)-3.1415926/2.)/3.1415926*180.; double dangle4 = dangle1 + dangle2; double dangle4_1 = dangle1_1 + dangle2_1; @@ -427,4 +414,117 @@ bool WireCell::PointCloud::Facade::Clustering_1st_round( return false; } + +// Expand this function to handle multiple APA/Faces ... +static void clustering_regular( + Grouping& live_grouping, + + IDetectorVolumes::pointer dv, + const Tree::Scope& scope, + const double length_cut, + bool flag_enable_extend) +{ + // Get all the wire plane IDs from the grouping + const auto& wpids = live_grouping.wpids(); + + // Key: pair, Value: drift_dir, angle_u, angle_v, angle_w + std::map> wpid_params; + std::map wpid_U_dir; + std::map wpid_V_dir; + std::map wpid_W_dir; + std::set apas; + for (const auto& wpid : wpids) { + int apa = wpid.apa(); + int face = wpid.face(); + apas.insert(apa); + + // Create wpids for all three planes with this APA and face + WirePlaneId wpid_u(kUlayer, face, apa); + WirePlaneId wpid_v(kVlayer, face, apa); + WirePlaneId wpid_w(kWlayer, face, apa); + + // Get drift direction based on face orientation + int face_dirx = dv->face_dirx(wpid_u); + geo_point_t drift_dir(face_dirx, 0, 0); + + // Get wire directions for all planes + Vector wire_dir_u = dv->wire_direction(wpid_u); + Vector wire_dir_v = dv->wire_direction(wpid_v); + Vector wire_dir_w = dv->wire_direction(wpid_w); + + // Calculate angles + double angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + double angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + double angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + wpid_params[wpid] = std::make_tuple(drift_dir, angle_u, angle_v, angle_w); + wpid_U_dir[wpid] = geo_point_t(0, cos(angle_u), sin(angle_u)); + wpid_V_dir[wpid] = geo_point_t(0, cos(angle_v), sin(angle_v)); + wpid_W_dir[wpid] = geo_point_t(0, cos(angle_w), sin(angle_w)); + } + + + + + double internal_length_cut = 10 *units::cm; + if (flag_enable_extend) { + internal_length_cut = 15 *units::cm; + } + + // prepare graph ... + typedef cluster_connectivity_graph_t Graph; + Graph g; + std::unordered_map ilive2desc; // added live index to graph descriptor + std::map map_cluster_index; + const auto& live_clusters = live_grouping.children(); + + for (size_t ilive = 0; ilive < live_clusters.size(); ++ilive) { + auto& live = live_clusters.at(ilive); + map_cluster_index[live] = ilive; + ilive2desc[ilive] = boost::add_vertex(ilive, g); + if (live->get_default_scope().hash() != scope.hash()) { + live->set_default_scope(scope); + // std::cout << "Test: Set default scope: " << pc_name << " " << coords[0] << " " << coords[1] << " " << coords[2] << " " << cluster->get_default_scope().hash() << " " << scope.hash() << std::endl; + } + } + + // original algorithm ... (establish edges ... ) + + for (size_t i=0;i!=live_clusters.size();i++){ + auto cluster_1 = live_clusters.at(i); + if (!cluster_1->get_scope_filter(scope)) continue; + if (cluster_1->get_length() < internal_length_cut) continue; + for (size_t j=i+1;jget_scope_filter(scope)) continue; + if (cluster_2->get_length() < internal_length_cut) continue; + if (Clustering_1st_round(*cluster_1,*cluster_2, cluster_1->get_length(), cluster_2->get_length(), wpid_U_dir, wpid_V_dir, wpid_W_dir, dv, length_cut, flag_enable_extend)){ + // to_be_merged_pairs.insert(std::make_pair(cluster_1,cluster_2)); + boost::add_edge(ilive2desc[map_cluster_index[cluster_1]], + ilive2desc[map_cluster_index[cluster_2]], g); + } + } + } + + // new function to merge clusters ... + merge_clusters(g, live_grouping); + + + // { + // auto live_clusters = live_grouping.children(); // copy + // // Process each cluster + // for (size_t iclus = 0; iclus < live_clusters.size(); ++iclus) { + // Cluster* cluster = live_clusters.at(iclus); + // auto& scope = cluster->get_default_scope(); + // std::cout << "Test: " << iclus << " " << cluster->nchildren() << " " << scope.pcname << " " << scope.coords[0] << " " << scope.coords[1] << " " << scope.coords[2] << " " << cluster->get_scope_filter(scope)<< " " << cluster->get_pca().center) << std::endl; + // } + // } + + + + + + +} + #pragma GCC diagnostic pop diff --git a/clus/src/clustering_retile.cxx b/clus/src/clustering_retile.cxx new file mode 100644 index 000000000..2c0a06046 --- /dev/null +++ b/clus/src/clustering_retile.cxx @@ -0,0 +1,120 @@ +// This defines the component ClusteringRetile, a "clustering function" aka +// "ensemble visitor" which delegates to an IPCTreeTransform to produce a new +// set of clusters from a subset of an input set. See retile_cluster.cxx. + + +#include "WireCellUtil/RayTiling.h" +#include "WireCellUtil/RayHelpers.h" + +#include "WireCellIface/IBlob.h" +#include "WireCellIface/IBlobSampler.h" +#include "WireCellIface/IAnodeFace.h" +#include "WireCellIface/IDetectorVolumes.h" +#include "WireCellIface/IPCTreeMutate.h" + +#include "WireCellAux/PlaneTools.h" + +#include "WireCellClus/Facade_Grouping.h" +#include "WireCellClus/Facade_Cluster.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + + +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + +#include "WireCellAux/SimpleBlob.h" +#include "WireCellAux/SamplingHelpers.h" + +#include "WireCellUtil/PointTree.h" + +#include "WireCellAux/SimpleSlice.h" +#include "WireCellClus/GroupingHelper.h" + + + +#include + +class ClusteringRetile; +WIRECELL_FACTORY(ClusteringRetile, ClusteringRetile, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; +using namespace WireCell::PointCloud::Tree; + + + +class ClusteringRetile : public IConfigurable, public Clus::IEnsembleVisitor, private Clus::NeedScope { +public: + ClusteringRetile() {}; + virtual ~ClusteringRetile() {}; + + void configure(const WireCell::Configuration& cfg) { + NeedScope::configure(cfg); + m_retiler = Factory::find_tn(get(cfg, "retiler", "RetileCluster")); + } + virtual Configuration default_configuration() const { + Configuration cfg; + cfg["retiler"] = "RetileCluster"; + return cfg; + } + + void visit(Ensemble& ensemble) const; + +private: + IPCTreeMutate::pointer m_retiler; + + // std::map> process_groupings( + // Grouping& original, + // Grouping& shadow, + // const std::string& aname = "isolated", + // const std::string& pname = "perblob") const; + + +}; + +void ClusteringRetile::visit(Ensemble& ensemble) const +{ + // fixme: make grouping names configurable + + auto& orig_grouping = *ensemble.with_name("live").at(0); + auto& shad_grouping = ensemble.make_grouping("shadow"); + shad_grouping.from(orig_grouping); + auto* shad_root = shad_grouping.node(); + + for (auto* orig_cluster : orig_grouping.children()) { + + if (!orig_cluster->get_scope_filter(m_scope)) { + // move on if the cluster is not in the scope filter ... + continue; + } + + if (orig_cluster->get_default_scope().hash() != m_scope.hash()) { + orig_cluster->set_default_scope(m_scope); + } + + auto shad_node = m_retiler->mutate(*orig_cluster->node()); + if (! shad_node) { + continue; + } + shad_root->insert(std::move(shad_node)); + } +} + + +// std::map> +// ClusteringRetile::process_groupings( +// WCF::Grouping& original, +// WCF::Grouping& shadow, +// const std::string& aname, +// const std::string& pname) const +// { +// return process_groupings_helper(original, shadow, aname, pname); +// } + diff --git a/clus/src/clustering_separate.cxx b/clus/src/clustering_separate.cxx index ab98c9c75..eb94e88f9 100644 --- a/clus/src/clustering_separate.cxx +++ b/clus/src/clustering_separate.cxx @@ -1,102 +1,136 @@ -#include +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + +class ClusteringSeparate; +WIRECELL_FACTORY(ClusteringSeparate, ClusteringSeparate, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) -// The original developers do not care. -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wparentheses" using namespace WireCell; using namespace WireCell::Clus; -using namespace WireCell::Aux; -using namespace WireCell::Aux::TensorDM; -using namespace WireCell::PointCloud::Facade; +using namespace WireCell::Clus::Graphs; +using namespace WireCell::Clus::Facade; using namespace WireCell::PointCloud::Tree; -// bool flag_debug_porting = false; -void WireCell::PointCloud::Facade::clustering_separate(Grouping& live_grouping, - const bool use_ctpc) +static void clustering_separate(Grouping& live_grouping, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts, + const Tree::Scope& scope, + const bool use_ctpc); + +class ClusteringSeparate : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedPCTS, private NeedScope { +public: + ClusteringSeparate() {} + virtual ~ClusteringSeparate() {} + + void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedPCTS::configure(config); + NeedScope::configure(config); + + use_ctpc_ = get(config, "use_ctpc", true); + } + + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + clustering_separate(live, m_dv, m_pcts, m_scope, use_ctpc_); + } + +private: + double use_ctpc_{true}; +}; + + +// The original developers do not care. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wparentheses" + +// this algorithm should be able to handle multiple APA/face now .. +static void clustering_separate( + Grouping& live_grouping, + const IDetectorVolumes::pointer dv, // detector volumes + const IPCTransformSet::pointer pcts, + const Tree::Scope& scope, + const bool use_ctpc) { - std::map>& dead_u_index = live_grouping.get_dead_winds(0, 0); - std::map>& dead_v_index = live_grouping.get_dead_winds(0, 1); - std::map>& dead_w_index = live_grouping.get_dead_winds(0, 2); - // std::cout << "dead_u_index size: " << dead_u_index.size() << std::endl; - // std::cout << "dead_v_index size: " << dead_v_index.size() << std::endl; - // std::cout << "dead_w_index size: " << dead_w_index.size() << std::endl; + // Check that live_grouping has exactly one wpid + // if (live_grouping.wpids().size() != 1 ) { + // throw std::runtime_error("Live or Dead grouping must have exactly one wpid"); + // } + geo_point_t drift_dir_abs(1,0,0); + std::vector live_clusters = live_grouping.children(); // copy // sort the clusters by length using a lambda function std::sort(live_clusters.begin(), live_clusters.end(), [](const Cluster *cluster1, const Cluster *cluster2) { return cluster1->get_length() > cluster2->get_length(); }); - geo_point_t drift_dir(1, 0, 0); - geo_point_t beam_dir(0, 0, 1); - geo_point_t vertical_dir(0, 1, 0); + auto wpids = live_grouping.wpids(); - // ExecMon em("sep starting"); + WirePlaneId wpid_all(0); + // double det_FV_ymin = dv->metadata(wpid_all)["FV_ymin"].asDouble(); + double det_FV_ymax = dv->metadata(wpid_all)["FV_ymax"].asDouble(); - const auto &mp = live_grouping.get_params(); - // this is for 4 time slices - double live_time_slice_width = mp.nticks_live_slice * mp.tick_drift; + geo_point_t beam_dir(0, 0, 1); + geo_point_t vertical_dir(0, 1, 0); + + // Get vertical_dir from metadata + Json::Value vertical_dir_json = dv->metadata(wpid_all)["vertical_dir"]; + Json::Value beam_dir_json = dv->metadata(wpid_all)["beam_dir"]; + + if (!vertical_dir_json.isNull() && vertical_dir_json.isArray() && vertical_dir_json.size() >= 3) { + vertical_dir = geo_point_t( + vertical_dir_json[0].asDouble(), + vertical_dir_json[1].asDouble(), + vertical_dir_json[2].asDouble() + ); + } + if (!beam_dir_json.isNull() && beam_dir_json.isArray() && beam_dir_json.size() >= 3) { + beam_dir = geo_point_t( + beam_dir_json[0].asDouble(), + beam_dir_json[1].asDouble(), + beam_dir_json[2].asDouble() + ); + } std::vector new_clusters; std::vector del_clusters; for (size_t i = 0; i != live_clusters.size(); i++) { Cluster *cluster = live_clusters.at(i); - // FIXME: remove this after debugging - // std::cout << "Cluster #b " << cluster->nchildren() << std::endl; - // std::set debug_nblobs = {19, 62, 612, 37}; - // if (debug_nblobs.find(cluster->nchildren()) != debug_nblobs.end()) { - // if (false) { - // const size_t orig_nchildren = cluster->nchildren(); - // // cluster->Create_graph(); - // // std::cout << " dump_graph: " << cluster->dump_graph() << std::endl; - // const auto debug_clusters = Separate_2(cluster, 5 * units::cm); - // std::cout << " #b " << orig_nchildren << " debug_clusters.size() " << debug_clusters.size() << std::endl; - // continue; - // } - // flag_debug_porting = false; - // if (cluster->nchildren() == 612) { - // flag_debug_porting = true; - // std::cout << " cluster->dump() " << cluster->dump() << std::endl; - // } + if (!cluster->get_scope_filter(scope)) continue; + if (cluster->get_default_scope().hash() != scope.hash()) { + cluster->set_default_scope(scope); + // std::cout << "Test: Set default scope: " << pc_name << " " << coords[0] << " " << coords[1] << " " << coords[2] << " " << cluster->get_default_scope().hash() << " " << scope.hash() << std::endl; + } if (cluster->get_length() > 100 * units::cm) { std::vector boundary_points; std::vector independent_points; bool flag_proceed = - JudgeSeparateDec_2(cluster, drift_dir, boundary_points, independent_points, cluster->get_length()); - // if (flag_debug_porting) { - // std::cout - // << " flag_proceed " << flag_proceed - // << " boundary_points.size() " << boundary_points.size() - // << " independent_points.size() " << independent_points.size() - // << std::endl; - // } - - // std::cout << "Info: " << cluster->nchildren() << " " << cluster->get_length() << std::endl; - - + JudgeSeparateDec_2(cluster, dv, drift_dir_abs, boundary_points, independent_points, cluster->get_length()); + if (!flag_proceed && cluster->get_length() > 100 * units::cm && - JudgeSeparateDec_1(cluster, drift_dir, cluster->get_length(), live_time_slice_width) && + JudgeSeparateDec_1(cluster, drift_dir_abs, cluster->get_length()) && independent_points.size() > 0) { bool flag_top = false; for (size_t j = 0; j != independent_points.size(); j++) { - if (independent_points.at(j).y() > mp.FV_ymax) { + if (independent_points.at(j).y() > det_FV_ymax) { flag_top = true; break; } } - // if (flag_debug_porting) { - // std::cout << "flag_top " << flag_top << std::endl; - // } - - // cluster->Calc_PCA(); - geo_point_t main_dir(cluster->get_pca_axis(0).x(), cluster->get_pca_axis(0).y(), - cluster->get_pca_axis(0).z()); + geo_point_t main_dir(cluster->get_pca().axis.at(0).x(), cluster->get_pca().axis.at(0).y(), + cluster->get_pca().axis.at(0).z()); if (flag_top) { if (fabs(main_dir.angle(beam_dir) - 3.1415926 / 2.) / 3.1415926 * 180. < 16 || @@ -107,26 +141,22 @@ void WireCell::PointCloud::Facade::clustering_separate(Grouping& live_grouping, fabs(main_dir.angle(beam_dir) - 3.1415926 / 2.) / 3.1415926 * 180. < 65 && cluster->get_length() > 360 * units::cm || fabs(main_dir.angle(beam_dir) - 3.1415926 / 2.) / 3.1415926 * 180. < 45 && - cluster->get_pca_value(1) > 0.75 * cluster->get_pca_value(0) || + cluster->get_pca().values.at(1) > 0.75 * cluster->get_pca().values.at(0) || fabs(main_dir.angle(beam_dir) - 3.1415926 / 2.) / 3.1415926 * 180. < 40 && - cluster->get_pca_value(1) > 0.55 * cluster->get_pca_value(0)) { + cluster->get_pca().values.at(1) > 0.55 * cluster->get_pca().values.at(0)) { flag_proceed = true; } else { if (fabs(main_dir.angle(beam_dir) - 3.1415926 / 2.) / 3.1415926 * 180. < 40 && - cluster->get_pca_value(1) > 0.2 * cluster->get_pca_value(0)) { + cluster->get_pca().values.at(1) > 0.2 * cluster->get_pca().values.at(0)) { // std::vector temp_sep_clusters = Separate_2(cluster, 10 * units::cm); - const auto b2id = Separate_2(cluster, 10 * units::cm); + const auto b2id = Separate_2(cluster, scope, 10 * units::cm); std::set ids; for (const auto& id : b2id) { ids.insert(id); } int num_clusters = 0; - // for (size_t k = 0; k != temp_sep_clusters.size(); k++) { - // double length_1 = temp_sep_clusters.at(k)->get_length(); - // if (length_1 > 60 * units::cm) num_clusters++; - // // delete temp_sep_clusters.at(k); - // } + for (const auto id : ids) { double length_1 = get_length(cluster, b2id, id); if (length_1 > 60 * units::cm) num_clusters++; @@ -145,27 +175,25 @@ void WireCell::PointCloud::Facade::clustering_separate(Grouping& live_grouping, fabs(main_dir.angle(beam_dir) - 3.1415926 / 2.) / 3.1415926 * 180. < 35 && cluster->get_length() > 330 * units::cm || fabs(main_dir.angle(beam_dir) - 3.1415926 / 2.) / 3.1415926 * 180. < 30 && - cluster->get_pca_value(1) > 0.55 * cluster->get_pca_value(0)) { + cluster->get_pca().values.at(1) > 0.55 * cluster->get_pca().values.at(0)) { flag_proceed = true; } } // std::cout << flag_top << " " << flag_proceed << std::endl; } - // if (flag_debug_porting) { - // std::cout << "flag_proceed " << flag_proceed << std::endl; - // } + if (flag_proceed) { - if (JudgeSeparateDec_1(cluster, drift_dir, cluster->get_length(), live_time_slice_width)) { + if (JudgeSeparateDec_1(cluster, drift_dir_abs, cluster->get_length())) { // std::cerr << em("sep prepare sep") << std::endl; - // const size_t orig_nchildren = cluster->nchildren(); + const size_t orig_nchildren = cluster->nchildren(); //std::cout << "Separate Cluster with " << orig_nchildren << " blobs (ctpc) length " << cluster->get_length() << std::endl; std::vector sep_clusters = - Separate_1(use_ctpc, cluster, boundary_points, independent_points, dead_u_index, - dead_v_index, dead_w_index, cluster->get_length()); + Separate_1(use_ctpc, cluster, boundary_points, independent_points, cluster->get_length(), vertical_dir, beam_dir, dv, pcts, scope); - //std::cout << "Separate Separate_1 for " << orig_nchildren << " " << " returned " << sep_clusters.size() << " clusters" << std::endl; + std::cout << "Separate Separate_1 for " << orig_nchildren << " " << " returned " << sep_clusters.size() << " clusters" << std::endl; + Cluster *cluster1 = sep_clusters.at(0); new_clusters.push_back(cluster1); @@ -188,14 +216,13 @@ void WireCell::PointCloud::Facade::clustering_separate(Grouping& live_grouping, boundary_points.clear(); independent_points.clear(); - if (JudgeSeparateDec_1(cluster2, drift_dir, length_1, live_time_slice_width) && - JudgeSeparateDec_2(cluster2, drift_dir, boundary_points, independent_points, + if (JudgeSeparateDec_1(cluster2, drift_dir_abs, length_1) && + JudgeSeparateDec_2(cluster2, dv, drift_dir_abs, boundary_points, independent_points, length_1)) { std::vector sep_clusters = - Separate_1(use_ctpc, cluster2, boundary_points, independent_points, - dead_u_index, dead_v_index, dead_w_index, length_1); + Separate_1(use_ctpc, cluster2, boundary_points, independent_points, length_1, vertical_dir, beam_dir, dv, pcts, scope); - //std::cout << "Separate Separate_1 1 for " << orig_nchildren << " " << " returned " << sep_clusters.size() << " clusters" << std::endl; + // std::cout << "Separate Separate_1 1 for " << orig_nchildren << " " << " returned " << sep_clusters.size() << " clusters" << std::endl; Cluster *cluster3 = sep_clusters.at(0); new_clusters.push_back(cluster3); @@ -215,14 +242,14 @@ void WireCell::PointCloud::Facade::clustering_separate(Grouping& live_grouping, if (length_1 > 100 * units::cm) { boundary_points.clear(); independent_points.clear(); - if (JudgeSeparateDec_1(cluster4, drift_dir, length_1, live_time_slice_width) && - JudgeSeparateDec_2(cluster4, drift_dir, boundary_points, independent_points, + if (JudgeSeparateDec_1(cluster4, drift_dir_abs, length_1) && + JudgeSeparateDec_2(cluster4, dv, drift_dir_abs, boundary_points, independent_points, length_1)) { - // std::cout << "Separate 3rd level" << std::endl; + + // std::cout << "Separate 3rd level" << std::endl; std::vector sep_clusters = Separate_1( - use_ctpc, cluster4, boundary_points, independent_points, - dead_u_index, dead_v_index, dead_w_index, length_1); + use_ctpc, cluster4, boundary_points, independent_points, length_1, vertical_dir, beam_dir, dv, pcts, scope); // std::cerr << em("sep sep3") << std::endl; @@ -259,15 +286,15 @@ void WireCell::PointCloud::Facade::clustering_separate(Grouping& live_grouping, if (length_1 > 60 * units::cm) { boundary_points.clear(); independent_points.clear(); - JudgeSeparateDec_1(final_sep_cluster, drift_dir, length_1, live_time_slice_width); - JudgeSeparateDec_2(final_sep_cluster, drift_dir, boundary_points, independent_points, + JudgeSeparateDec_1(final_sep_cluster, drift_dir_abs, length_1); + JudgeSeparateDec_2(final_sep_cluster, dv, drift_dir_abs, boundary_points, independent_points, length_1); if (independent_points.size() > 0) { + // std::cout << "Separate final one" << std::endl; std::vector sep_clusters = Separate_1( - use_ctpc, final_sep_cluster, boundary_points, independent_points, - dead_u_index, dead_v_index, dead_w_index, length_1); + use_ctpc, final_sep_cluster, boundary_points, independent_points, length_1, vertical_dir, beam_dir, dv, pcts, scope); //std::cout << "Separate Separate_1 2 for " << orig_nchildren << " " << " returned " << sep_clusters.size() << " clusters" << std::endl; // std::cerr << em("sep sep4") << std::endl; @@ -293,21 +320,15 @@ void WireCell::PointCloud::Facade::clustering_separate(Grouping& live_grouping, if (final_sep_cluster != 0) { // 2 // std::vector final_sep_clusters = Separate_2(final_sep_cluster); - const auto b2id = Separate_2(final_sep_cluster); + const auto b2id = Separate_2(final_sep_cluster, scope); + auto scope_transform = final_sep_cluster->get_scope_transform(scope); auto final_sep_clusters = live_grouping.separate(final_sep_cluster,b2id,true); + assert(final_sep_cluster == nullptr); - // for (auto it = final_sep_clusters.begin(); it != final_sep_clusters.end(); it++) { - // new_clusters.push_back(*it); - // } - - //temp_del_clusters.push_back(final_sep_cluster); + } } - // for (auto it = temp_del_clusters.begin(); it != temp_del_clusters.end(); it++) { - // delete *it; - // } - // std::cerr << em("sep del sep1") << std::endl; } } else if (cluster->get_length() < 6 * units::m) { @@ -315,9 +336,9 @@ void WireCell::PointCloud::Facade::clustering_separate(Grouping& live_grouping, //std::cout << "Stripping Cluster with " << orig_nchildren << " blobs (ctpc) length " << cluster->get_length() << std::endl; // std::cout << boundary_points.size() << " " << independent_points.size() << std::endl; std::vector sep_clusters = - Separate_1(use_ctpc, cluster, boundary_points, independent_points, dead_u_index, - dead_v_index, dead_w_index, cluster->get_length()); - // std::cout << "Stripping Separate_1 for " << orig_nchildren << " returned " << sep_clusters.size() << " clusters" << std::endl; + Separate_1(use_ctpc, cluster, boundary_points, independent_points, cluster->get_length(), vertical_dir, beam_dir, dv, pcts, scope); + + // std::cout << "Stripping Separate_1 for " << orig_nchildren << " returned " << sep_clusters.size() << " clusters" << std::endl; Cluster *cluster1 = sep_clusters.at(0); new_clusters.push_back(cluster1); @@ -336,75 +357,98 @@ void WireCell::PointCloud::Facade::clustering_separate(Grouping& live_grouping, Cluster *final_sep_cluster = cluster2; // std::vector final_sep_clusters = Separate_2(final_sep_cluster); - const auto b2id = Separate_2(final_sep_cluster); + const auto b2id = Separate_2(final_sep_cluster, scope); + auto scope_transform = final_sep_cluster->get_scope_transform(scope); auto final_sep_clusters = live_grouping.separate(final_sep_cluster, b2id, true); assert(final_sep_cluster == nullptr); cluster2 = final_sep_cluster = sep_clusters[1] = nullptr; - // for (auto it = final_sep_clusters.begin(); it != final_sep_clusters.end(); it++) { - // new_clusters.push_back(*it); - // } - //temp_del_clusters.push_back(final_sep_cluster); - // delete final_sep_cluster; - - // for (auto it = temp_del_clusters.begin(); it != temp_del_clusters.end(); it++) { - // delete *it; - // } } } // else ... } } } - // std::cout << "Separate clusters: " << new_clusters.size() << std::endl; - // /// FIXME: remove these? since the live_clusters is just a copy of raw pointers - // for (auto it = new_clusters.begin(); it != new_clusters.end(); it++) { - // Cluster *ncluster = (*it); - // live_clusters.push_back(ncluster); - // } - // std::cout << "Delete clusters: " << del_clusters.size() << std::endl; - // for (auto it = del_clusters.begin(); it != del_clusters.end(); it++) { - // Cluster *ocluster = (*it); - // live_clusters.erase(find(live_clusters.begin(), live_clusters.end(), ocluster)); - // // delete ocluster; - // } + +// { +// auto live_clusters = live_grouping.children(); // copy +// // Process each cluster +// for (size_t iclus = 0; iclus < live_clusters.size(); ++iclus) { +// Cluster* cluster = live_clusters.at(iclus); +// auto& scope = cluster->get_default_scope(); +// std::cout << "Test: " << iclus << " " << cluster->nchildren() << " " << scope.pcname << " " << scope.coords[0] << " " << scope.coords[1] << " " << scope.coords[2] << " " << cluster->get_scope_filter(scope)<< " " << cluster->get_pca().center << std::endl; +// } +// } + + + + + + + } -/// @brief PCA based -bool WireCell::PointCloud::Facade::JudgeSeparateDec_1(const Cluster* cluster, const geo_point_t& drift_dir, const double length, const double time_slice_length) +/// @brief PCA based, drift_dir +x, -x the same ... +bool WireCell::Clus::Facade::JudgeSeparateDec_1(const Cluster* cluster, const geo_point_t& drift_dir_abs, const double length) { // get the main axis - geo_point_t dir1(cluster->get_pca_axis(0).x(), cluster->get_pca_axis(0).y(), cluster->get_pca_axis(0).z()); - geo_point_t dir2(cluster->get_pca_axis(1).x(), cluster->get_pca_axis(1).y(), cluster->get_pca_axis(1).z()); - geo_point_t dir3(cluster->get_pca_axis(2).x(), cluster->get_pca_axis(2).y(), cluster->get_pca_axis(2).z()); + geo_point_t dir1(cluster->get_pca().axis.at(0).x(), cluster->get_pca().axis.at(0).y(), cluster->get_pca().axis.at(0).z()); + geo_point_t dir2(cluster->get_pca().axis.at(1).x(), cluster->get_pca().axis.at(1).y(), cluster->get_pca().axis.at(1).z()); + geo_point_t dir3(cluster->get_pca().axis.at(2).x(), cluster->get_pca().axis.at(2).y(), cluster->get_pca().axis.at(2).z()); - double angle1 = fabs(dir2.angle(drift_dir) - 3.1415926 / 2.) / 3.1415926 * 180.; + double angle1 = fabs(dir2.angle(drift_dir_abs) - 3.1415926 / 2.) / 3.1415926 * 180.; /// CHECKME: is "time_slice_length" drift_speed * tick? - double temp_angle1 = asin(cluster->get_num_time_slices() * time_slice_length / length) / 3.1415926 * 180.; + auto points = cluster->get_earliest_latest_points(); + double temp_angle1 = asin(fabs(points.first.x() - points.second.x()) / length) / 3.1415926 * 180.; - double angle2 = fabs(dir3.angle(drift_dir) - 3.1415926 / 2.) / 3.1415926 * 180.; - double ratio1 = cluster->get_pca_value(1) / cluster->get_pca_value(0); - double ratio2 = cluster->get_pca_value(2) / cluster->get_pca_value(0); + double angle2 = fabs(dir3.angle(drift_dir_abs) - 3.1415926 / 2.) / 3.1415926 * 180.; + double ratio1 = cluster->get_pca().values.at(1) / cluster->get_pca().values.at(0); + double ratio2 = cluster->get_pca().values.at(2) / cluster->get_pca().values.at(0); - // if (flag_debug_porting) - // { - // std::cout << "JudgeSeparateDec_1: angle1 " << angle1 << " temp_angle1 " << temp_angle1 << " angle2 " << angle2 - // << " ratio1 " << ratio1 << " ratio2 " << ratio2 << std::endl; - // } + // std::cout << ratio1 << " " << pow(10, exp(1.38115 - 1.19312 * pow(angle1, 1. / 3.)) - 2.2) << " " << ratio1 << " " << pow(10, exp(1.38115 - 1.19312 * pow(temp_angle1, 1. / 3.)) - 2.2) << " " << ratio2 << " " << pow(10, exp(1.38115 - 1.19312 * pow(angle2, 1. / 3.)) - 2.2) << " " << ratio1 << " " << angle1 << " " << angle2 << std::endl; + + // add some additional check ... PDHD + if (angle1 < 5 && ratio2 < 0.05 && cluster->get_length() > 300*units::cm) return false; if (ratio1 > pow(10, exp(1.38115 - 1.19312 * pow(angle1, 1. / 3.)) - 2.2) || ratio1 > pow(10, exp(1.38115 - 1.19312 * pow(temp_angle1, 1. / 3.)) - 2.2) || - ratio2 > pow(10, exp(1.38115 - 1.19312 * pow(angle2, 1. / 3.)) - 2.2) || ratio1 > 0.75) + ratio2 > pow(10, exp(1.38115 - 1.19312 * pow(angle2, 1. / 3.)) - 2.2) || + ratio1 > 0.75) return true; return false; } -bool WireCell::PointCloud::Facade::JudgeSeparateDec_2(const Cluster* cluster, const geo_point_t& drift_dir, +bool WireCell::Clus::Facade::JudgeSeparateDec_2(const Cluster* cluster, const IDetectorVolumes::pointer dv, const geo_point_t& drift_dir_abs, std::vector& boundary_points, std::vector& independent_points, const double cluster_length) { - const auto &mp = cluster->grouping()->get_params(); + // const auto &mp = cluster->grouping()->get_params(); + + auto wpids = cluster->grouping()->wpids(); + std::map map_FV_xmin; + std::map map_FV_xmax; + std::map map_FV_xmin_margin; + std::map map_FV_xmax_margin; + for (const auto& wpid : wpids) { + map_FV_xmin[wpid] = dv->metadata(wpid)["FV_xmin"].asDouble() ; + map_FV_xmax[wpid] = dv->metadata(wpid)["FV_xmax"].asDouble() ; + map_FV_xmin_margin[wpid] = dv->metadata(wpid)["FV_xmin_margin"].asDouble() ; + map_FV_xmax_margin[wpid] = dv->metadata(wpid)["FV_xmax_margin"].asDouble() ; + } + WirePlaneId wpid_all(0); + double det_FV_xmin = dv->metadata(wpid_all)["FV_xmin"].asDouble(); + double det_FV_xmax = dv->metadata(wpid_all)["FV_xmax"].asDouble(); + double det_FV_ymin = dv->metadata(wpid_all)["FV_ymin"].asDouble(); + double det_FV_ymax = dv->metadata(wpid_all)["FV_ymax"].asDouble(); + double det_FV_zmin = dv->metadata(wpid_all)["FV_zmin"].asDouble(); + double det_FV_zmax = dv->metadata(wpid_all)["FV_zmax"].asDouble(); + double det_FV_xmin_margin = dv->metadata(wpid_all)["FV_xmin_margin"].asDouble(); + double det_FV_xmax_margin = dv->metadata(wpid_all)["FV_xmax_margin"].asDouble(); + // double det_FV_ymin_margin = dv->metadata(wpid_all)["FV_ymin_margin"].asDouble(); + double det_FV_ymax_margin = dv->metadata(wpid_all)["FV_ymax_margin"].asDouble(); + double det_FV_zmin_margin = dv->metadata(wpid_all)["FV_zmin_margin"].asDouble(); + double det_FV_zmax_margin = dv->metadata(wpid_all)["FV_zmax_margin"].asDouble(); boundary_points = cluster->get_hull(); std::vector hy_points; @@ -439,12 +483,11 @@ bool WireCell::PointCloud::Facade::JudgeSeparateDec_2(const Cluster* cluster, co } bool flag_outx = false; - /// FIXME: hard-coded fiducial volume boundaries, needs to be passed in - if (hx_points.at(0).x() > mp.FV_xmax + mp.FV_xmax_margin || lx_points.at(0).x() < mp.FV_xmin - mp.FV_xmin_margin) flag_outx = true; + if (hx_points.at(0).x() > det_FV_xmax + det_FV_xmax_margin || lx_points.at(0).x() < det_FV_xmin - det_FV_xmin_margin) flag_outx = true; - if (hy_points.at(0).y() > mp.FV_ymax) { + if (hy_points.at(0).y() > det_FV_ymax) { for (size_t j = 0; j != boundary_points.size(); j++) { - if (boundary_points.at(j).y() > mp.FV_ymax) { + if (boundary_points.at(j).y() > det_FV_ymax) { bool flag_save = true; for (size_t k = 0; k != hy_points.size(); k++) { double dis = sqrt(pow(hy_points.at(k).x() - boundary_points.at(j).x(), 2) + @@ -460,9 +503,9 @@ bool WireCell::PointCloud::Facade::JudgeSeparateDec_2(const Cluster* cluster, co } } - if (ly_points.at(0).y() < mp.FV_ymin) { + if (ly_points.at(0).y() < det_FV_ymin) { for (size_t j = 0; j != boundary_points.size(); j++) { - if (boundary_points.at(j).y() < mp.FV_ymin) { + if (boundary_points.at(j).y() < det_FV_ymin) { bool flag_save = true; for (size_t k = 0; k != ly_points.size(); k++) { double dis = sqrt(pow(ly_points.at(k).x() - boundary_points.at(j).x(), 2) + @@ -477,9 +520,9 @@ bool WireCell::PointCloud::Facade::JudgeSeparateDec_2(const Cluster* cluster, co } } } - if (hz_points.at(0).z() > mp.FV_zmax) { + if (hz_points.at(0).z() > det_FV_zmax) { for (size_t j = 0; j != boundary_points.size(); j++) { - if (boundary_points.at(j).z() > mp.FV_zmax) { + if (boundary_points.at(j).z() > det_FV_zmax) { bool flag_save = true; for (size_t k = 0; k != hz_points.size(); k++) { double dis = sqrt(pow(hz_points.at(k).x() - boundary_points.at(j).x(), 2) + @@ -494,9 +537,9 @@ bool WireCell::PointCloud::Facade::JudgeSeparateDec_2(const Cluster* cluster, co } } } - if (lz_points.at(0).z() < mp.FV_zmin) { + if (lz_points.at(0).z() < det_FV_zmin) { for (size_t j = 0; j != boundary_points.size(); j++) { - if (boundary_points.at(j).z() < mp.FV_zmin) { + if (boundary_points.at(j).z() < det_FV_zmin) { bool flag_save = true; for (size_t k = 0; k != lz_points.size(); k++) { double dis = sqrt(pow(lz_points.at(k).x() - boundary_points.at(j).x(), 2) + @@ -516,9 +559,9 @@ bool WireCell::PointCloud::Facade::JudgeSeparateDec_2(const Cluster* cluster, co int num_outx_points = 0; for (size_t j = 0; j != hy_points.size(); j++) { - if (hy_points.at(j).x() >= mp.FV_xmin && hy_points.at(j).x() <= mp.FV_xmax && - hy_points.at(j).y() >= mp.FV_ymin && hy_points.at(j).y() <= mp.FV_ymax && - hy_points.at(j).z() >= mp.FV_zmin && hy_points.at(j).z() <= mp.FV_zmax && (!flag_outx)) + if (hy_points.at(j).x() >= det_FV_xmin && hy_points.at(j).x() <= det_FV_xmax && + hy_points.at(j).y() >= det_FV_ymin && hy_points.at(j).y() <= det_FV_ymax && + hy_points.at(j).z() >= det_FV_zmin && hy_points.at(j).z() <= det_FV_zmax && (!flag_outx)) continue; bool flag_save = true; @@ -530,36 +573,36 @@ bool WireCell::PointCloud::Facade::JudgeSeparateDec_2(const Cluster* cluster, co } if (flag_save) { independent_points.push_back(hy_points.at(j)); - if (hy_points.at(j).y() > mp.FV_ymax + mp.FV_ymax_margin) { + if (hy_points.at(j).y() > det_FV_ymax + det_FV_ymax_margin) { independent_surfaces.insert(0); } - else if (hy_points.at(j).y() < mp.FV_ymin) { + else if (hy_points.at(j).y() < det_FV_ymin) { independent_surfaces.insert(1); } - else if (hy_points.at(j).z() > mp.FV_zmax + mp.FV_zmax_margin) { + else if (hy_points.at(j).z() > det_FV_zmax + det_FV_zmax_margin) { independent_surfaces.insert(2); } - else if (hy_points.at(j).z() < mp.FV_zmin - mp.FV_zmin_margin) { + else if (hy_points.at(j).z() < det_FV_zmin - det_FV_zmin_margin) { independent_surfaces.insert(3); } - else if (hy_points.at(j).x() > mp.FV_xmax) { + else if (hy_points.at(j).x() > det_FV_xmax) { independent_surfaces.insert(4); } - else if (hy_points.at(j).x() < mp.FV_xmin) { + else if (hy_points.at(j).x() < det_FV_xmin) { independent_surfaces.insert(5); } - if (hy_points.at(j).y() > mp.FV_ymax + mp.FV_ymax_margin || hy_points.at(j).y() < mp.FV_ymin || - hy_points.at(j).z() < mp.FV_zmin - mp.FV_zmin_margin || hy_points.at(j).z() > mp.FV_zmax + mp.FV_zmax_margin || - hy_points.at(j).x() < mp.FV_xmin || hy_points.at(j).x() > mp.FV_xmax) + if (hy_points.at(j).y() > det_FV_ymax + det_FV_ymax_margin || hy_points.at(j).y() < det_FV_ymin || + hy_points.at(j).z() < det_FV_zmin - det_FV_zmin_margin || hy_points.at(j).z() > det_FV_zmax + det_FV_zmax_margin || + hy_points.at(j).x() < det_FV_xmin || hy_points.at(j).x() > det_FV_xmax) num_outside_points++; - if (hy_points.at(j).x() < mp.FV_xmin - mp.FV_xmin_margin || hy_points.at(j).x() > mp.FV_xmax - mp.FV_xmax_margin) num_outx_points++; + if (hy_points.at(j).x() < det_FV_xmin - det_FV_xmin_margin || hy_points.at(j).x() > det_FV_xmax - det_FV_xmax_margin) num_outx_points++; } } for (size_t j = 0; j != ly_points.size(); j++) { - if (ly_points.at(j).x() >= mp.FV_xmin && ly_points.at(j).x() <= mp.FV_xmax && - ly_points.at(j).y() >= mp.FV_ymin && ly_points.at(j).y() <= mp.FV_ymax && - ly_points.at(j).z() >= mp.FV_zmin && ly_points.at(j).z() <= mp.FV_zmax && (!flag_outx)) + if (ly_points.at(j).x() >= det_FV_xmin && ly_points.at(j).x() <= det_FV_xmax && + ly_points.at(j).y() >= det_FV_ymin && ly_points.at(j).y() <= det_FV_ymax && + ly_points.at(j).z() >= det_FV_zmin && ly_points.at(j).z() <= det_FV_zmax && (!flag_outx)) continue; bool flag_save = true; @@ -572,36 +615,36 @@ bool WireCell::PointCloud::Facade::JudgeSeparateDec_2(const Cluster* cluster, co if (flag_save) { independent_points.push_back(ly_points.at(j)); - if (ly_points.at(j).y() < mp.FV_ymin) { + if (ly_points.at(j).y() < det_FV_ymin) { independent_surfaces.insert(1); } - else if (ly_points.at(j).y() > mp.FV_ymax + mp.FV_ymax_margin) { + else if (ly_points.at(j).y() > det_FV_ymax + det_FV_ymax_margin) { independent_surfaces.insert(0); } - else if (ly_points.at(j).z() > mp.FV_zmax + mp.FV_zmax_margin) { + else if (ly_points.at(j).z() > det_FV_zmax + det_FV_zmax_margin) { independent_surfaces.insert(2); } - else if (ly_points.at(j).z() < mp.FV_zmin - mp.FV_zmin_margin) { + else if (ly_points.at(j).z() < det_FV_zmin - det_FV_zmin_margin) { independent_surfaces.insert(3); } - else if (ly_points.at(j).x() > mp.FV_xmax) { + else if (ly_points.at(j).x() > det_FV_xmax) { independent_surfaces.insert(4); } - else if (ly_points.at(j).x() < mp.FV_xmin) { + else if (ly_points.at(j).x() < det_FV_xmin) { independent_surfaces.insert(5); } - if (ly_points.at(j).y() > mp.FV_ymax + mp.FV_ymax_margin || ly_points.at(j).y() < mp.FV_ymin || - ly_points.at(j).z() < mp.FV_zmin - mp.FV_zmin_margin || ly_points.at(j).z() > mp.FV_zmax + mp.FV_zmax_margin || - ly_points.at(j).x() < mp.FV_xmin || ly_points.at(j).x() > mp.FV_xmax) + if (ly_points.at(j).y() > det_FV_ymax + det_FV_ymax_margin || ly_points.at(j).y() < det_FV_ymin || + ly_points.at(j).z() < det_FV_zmin - det_FV_zmin_margin || ly_points.at(j).z() > det_FV_zmax + det_FV_zmax_margin || + ly_points.at(j).x() < det_FV_xmin || ly_points.at(j).x() > det_FV_xmax) num_outside_points++; - if (ly_points.at(j).x() < mp.FV_xmin - mp.FV_xmin_margin || ly_points.at(j).x() > mp.FV_xmax - mp.FV_xmax_margin) num_outx_points++; + if (ly_points.at(j).x() < det_FV_xmin - det_FV_xmin_margin || ly_points.at(j).x() > det_FV_xmax - det_FV_xmax_margin) num_outx_points++; } } for (size_t j = 0; j != hz_points.size(); j++) { - if (hz_points.at(j).x() >= mp.FV_xmin && hz_points.at(j).x() <= mp.FV_xmax && - hz_points.at(j).y() >= mp.FV_ymin && hz_points.at(j).y() <= mp.FV_ymax && - hz_points.at(j).z() >= mp.FV_zmin && hz_points.at(j).z() <= mp.FV_zmax && (!flag_outx)) + if (hz_points.at(j).x() >= det_FV_xmin && hz_points.at(j).x() <= det_FV_xmax && + hz_points.at(j).y() >= det_FV_ymin && hz_points.at(j).y() <= det_FV_ymax && + hz_points.at(j).z() >= det_FV_zmin && hz_points.at(j).z() <= det_FV_zmax && (!flag_outx)) continue; bool flag_save = true; @@ -614,36 +657,36 @@ bool WireCell::PointCloud::Facade::JudgeSeparateDec_2(const Cluster* cluster, co if (flag_save) { independent_points.push_back(hz_points.at(j)); - if (hz_points.at(j).z() > mp.FV_zmax + mp.FV_zmax_margin) { + if (hz_points.at(j).z() > det_FV_zmax + det_FV_zmax_margin) { independent_surfaces.insert(2); } - else if (hz_points.at(j).z() < mp.FV_zmin - mp.FV_zmin_margin) { + else if (hz_points.at(j).z() < det_FV_zmin - det_FV_zmin_margin) { independent_surfaces.insert(3); } - else if (hz_points.at(j).y() > mp.FV_ymax + mp.FV_ymax_margin) { + else if (hz_points.at(j).y() > det_FV_ymax + det_FV_ymax_margin) { independent_surfaces.insert(0); } - else if (hz_points.at(j).y() < mp.FV_ymin) { + else if (hz_points.at(j).y() < det_FV_ymin) { independent_surfaces.insert(1); } - else if (hz_points.at(j).x() > mp.FV_xmax) { + else if (hz_points.at(j).x() > det_FV_xmax) { independent_surfaces.insert(4); } - else if (hz_points.at(j).x() < mp.FV_xmin) { + else if (hz_points.at(j).x() < det_FV_xmin) { independent_surfaces.insert(5); } - if (hz_points.at(j).y() > mp.FV_ymax + mp.FV_ymax_margin || hz_points.at(j).y() < mp.FV_ymin || - hz_points.at(j).z() < mp.FV_zmin - mp.FV_zmin_margin || hz_points.at(j).z() > mp.FV_zmax + mp.FV_zmax_margin || - hz_points.at(j).x() < mp.FV_xmin || hz_points.at(j).x() > mp.FV_xmax) + if (hz_points.at(j).y() > det_FV_ymax + det_FV_ymax_margin || hz_points.at(j).y() < det_FV_ymin || + hz_points.at(j).z() < det_FV_zmin - det_FV_zmin_margin || hz_points.at(j).z() > det_FV_zmax + det_FV_zmax_margin || + hz_points.at(j).x() < det_FV_xmin || hz_points.at(j).x() > det_FV_xmax) num_outside_points++; - if (hz_points.at(j).x() < mp.FV_xmin - mp.FV_xmin_margin || hz_points.at(j).x() > mp.FV_xmax - mp.FV_xmax_margin) num_outx_points++; + if (hz_points.at(j).x() < det_FV_xmin - det_FV_xmin_margin || hz_points.at(j).x() > det_FV_xmax - det_FV_xmax_margin) num_outx_points++; } } for (size_t j = 0; j != lz_points.size(); j++) { - if (lz_points.at(j).x() >= mp.FV_xmin && lz_points.at(j).x() <= mp.FV_xmax && - lz_points.at(j).y() >= mp.FV_ymin && lz_points.at(j).y() <= mp.FV_ymax && - lz_points.at(j).z() >= mp.FV_zmin && lz_points.at(j).z() <= mp.FV_zmax && (!flag_outx)) + if (lz_points.at(j).x() >= det_FV_xmin && lz_points.at(j).x() <= det_FV_xmax && + lz_points.at(j).y() >= det_FV_ymin && lz_points.at(j).y() <= det_FV_ymax && + lz_points.at(j).z() >= det_FV_zmin && lz_points.at(j).z() <= det_FV_zmax && (!flag_outx)) continue; bool flag_save = true; @@ -656,36 +699,36 @@ bool WireCell::PointCloud::Facade::JudgeSeparateDec_2(const Cluster* cluster, co if (flag_save) { independent_points.push_back(lz_points.at(j)); - if (lz_points.at(j).z() < mp.FV_zmin - mp.FV_zmin_margin) { + if (lz_points.at(j).z() < det_FV_zmin - det_FV_zmin_margin) { independent_surfaces.insert(3); } - else if (lz_points.at(j).z() > mp.FV_zmax + mp.FV_zmax_margin) { + else if (lz_points.at(j).z() > det_FV_zmax + det_FV_zmax_margin) { independent_surfaces.insert(2); } - else if (lz_points.at(j).y() > mp.FV_ymax + mp.FV_ymax_margin) { + else if (lz_points.at(j).y() > det_FV_ymax + det_FV_ymax_margin) { independent_surfaces.insert(0); } - else if (lz_points.at(j).y() < mp.FV_ymin) { + else if (lz_points.at(j).y() < det_FV_ymin) { independent_surfaces.insert(1); } - else if (lz_points.at(j).x() > mp.FV_xmax) { + else if (lz_points.at(j).x() > det_FV_xmax) { independent_surfaces.insert(4); } - else if (lz_points.at(j).x() < mp.FV_xmin) { + else if (lz_points.at(j).x() < det_FV_xmin) { independent_surfaces.insert(5); } - if (lz_points.at(j).y() > mp.FV_ymax + mp.FV_ymax_margin || lz_points.at(j).y() < mp.FV_ymin || - lz_points.at(j).z() < mp.FV_zmin - mp.FV_zmin_margin || lz_points.at(j).z() > mp.FV_zmax + mp.FV_zmax_margin || - lz_points.at(j).x() < mp.FV_xmin || lz_points.at(j).x() > mp.FV_xmax) + if (lz_points.at(j).y() > det_FV_ymax + det_FV_ymax_margin || lz_points.at(j).y() < det_FV_ymin || + lz_points.at(j).z() < det_FV_zmin - det_FV_zmin_margin || lz_points.at(j).z() > det_FV_zmax + det_FV_zmax_margin || + lz_points.at(j).x() < det_FV_xmin || lz_points.at(j).x() > det_FV_xmax) num_outside_points++; - if (lz_points.at(j).x() < mp.FV_xmin - mp.FV_xmin_margin || lz_points.at(j).x() > mp.FV_xmax - mp.FV_xmax_margin) num_outx_points++; + if (lz_points.at(j).x() < det_FV_xmin - det_FV_xmin_margin || lz_points.at(j).x() > det_FV_xmax - det_FV_xmax_margin) num_outx_points++; } } for (size_t j = 0; j != hx_points.size(); j++) { - if (hx_points.at(j).x() >= mp.FV_xmin && hx_points.at(j).x() <= mp.FV_xmax && - hx_points.at(j).y() >= mp.FV_ymin && hx_points.at(j).y() <= mp.FV_ymax && - hx_points.at(j).z() >= mp.FV_zmin && hx_points.at(j).z() <= mp.FV_zmax && (!flag_outx)) + if (hx_points.at(j).x() >= det_FV_xmin && hx_points.at(j).x() <= det_FV_xmax && + hx_points.at(j).y() >= det_FV_ymin && hx_points.at(j).y() <= det_FV_ymax && + hx_points.at(j).z() >= det_FV_zmin && hx_points.at(j).z() <= det_FV_zmax && (!flag_outx)) continue; bool flag_save = true; @@ -698,38 +741,38 @@ bool WireCell::PointCloud::Facade::JudgeSeparateDec_2(const Cluster* cluster, co if (flag_save) { independent_points.push_back(hx_points.at(j)); - if (hx_points.at(j).y() > mp.FV_ymax + mp.FV_ymax_margin || hx_points.at(j).y() < mp.FV_ymin || - hx_points.at(j).z() < mp.FV_zmin - mp.FV_zmin_margin || hx_points.at(j).z() > mp.FV_zmax + mp.FV_zmax_margin || - hx_points.at(j).x() < mp.FV_xmin || hx_points.at(j).x() > mp.FV_xmax) + if (hx_points.at(j).y() > det_FV_ymax + det_FV_ymax_margin || hx_points.at(j).y() < det_FV_ymin || + hx_points.at(j).z() < det_FV_zmin - det_FV_zmin_margin || hx_points.at(j).z() > det_FV_zmax + det_FV_zmax_margin || + hx_points.at(j).x() < det_FV_xmin || hx_points.at(j).x() > det_FV_xmax) num_outside_points++; - if (hx_points.at(j).x() < mp.FV_xmin - mp.FV_xmin_margin || hx_points.at(j).x() > mp.FV_xmax - mp.FV_xmax_margin) { + if (hx_points.at(j).x() < det_FV_xmin - det_FV_xmin_margin || hx_points.at(j).x() > det_FV_xmax - det_FV_xmax_margin) { num_outx_points++; } - if (lx_points.at(j).x() > mp.FV_xmax) { + if (lx_points.at(j).x() > det_FV_xmax) { independent_surfaces.insert(4); } - else if (lx_points.at(j).x() < mp.FV_xmin) { + else if (lx_points.at(j).x() < det_FV_xmin) { independent_surfaces.insert(5); } - else if (lx_points.at(j).y() > mp.FV_ymax + mp.FV_ymax_margin) { + else if (lx_points.at(j).y() > det_FV_ymax + det_FV_ymax_margin) { independent_surfaces.insert(0); } - else if (lx_points.at(j).y() < mp.FV_ymin) { + else if (lx_points.at(j).y() < det_FV_ymin) { independent_surfaces.insert(1); } - else if (lx_points.at(j).z() > mp.FV_zmax + mp.FV_zmax_margin) { + else if (lx_points.at(j).z() > det_FV_zmax + det_FV_zmax_margin) { independent_surfaces.insert(2); } - else if (lx_points.at(j).z() < mp.FV_zmin - mp.FV_zmin_margin) { + else if (lx_points.at(j).z() < det_FV_zmin - det_FV_zmin_margin) { independent_surfaces.insert(3); } } } for (size_t j = 0; j != lx_points.size(); j++) { - if (lx_points.at(j).x() >= mp.FV_xmin && lx_points.at(j).x() <= mp.FV_xmax && - lx_points.at(j).y() >= mp.FV_ymin && lx_points.at(j).y() <= mp.FV_ymax && - lx_points.at(j).z() >= mp.FV_zmin && lx_points.at(j).z() <= mp.FV_zmax && (!flag_outx)) + if (lx_points.at(j).x() >= det_FV_xmin && lx_points.at(j).x() <= det_FV_xmax && + lx_points.at(j).y() >= det_FV_ymin && lx_points.at(j).y() <= det_FV_ymax && + lx_points.at(j).z() >= det_FV_zmin && lx_points.at(j).z() <= det_FV_zmax && (!flag_outx)) continue; bool flag_save = true; @@ -742,30 +785,30 @@ bool WireCell::PointCloud::Facade::JudgeSeparateDec_2(const Cluster* cluster, co if (flag_save) { independent_points.push_back(lx_points.at(j)); - if (lx_points.at(j).y() > mp.FV_ymax + mp.FV_ymax_margin || lx_points.at(j).y() < mp.FV_ymin || - lx_points.at(j).z() < mp.FV_zmin - mp.FV_zmin_margin || lx_points.at(j).z() > mp.FV_zmax + mp.FV_zmax_margin || - lx_points.at(j).x() < mp.FV_xmin || lx_points.at(j).x() > mp.FV_xmax) + if (lx_points.at(j).y() > det_FV_ymax + det_FV_ymax_margin || lx_points.at(j).y() < det_FV_ymin || + lx_points.at(j).z() < det_FV_zmin - det_FV_zmin_margin || lx_points.at(j).z() > det_FV_zmax + det_FV_zmax_margin || + lx_points.at(j).x() < det_FV_xmin || lx_points.at(j).x() > det_FV_xmax) num_outside_points++; - if (lx_points.at(j).x() < mp.FV_xmin - mp.FV_xmin_margin || lx_points.at(j).x() > mp.FV_xmax - mp.FV_xmax_margin) { + if (lx_points.at(j).x() < det_FV_xmin - det_FV_xmin_margin || lx_points.at(j).x() > det_FV_xmax - det_FV_xmax_margin) { num_outx_points++; } - if (lx_points.at(j).x() < mp.FV_xmin) { + if (lx_points.at(j).x() < det_FV_xmin) { independent_surfaces.insert(5); } - else if (lx_points.at(j).x() > mp.FV_xmax) { + else if (lx_points.at(j).x() > det_FV_xmax) { independent_surfaces.insert(4); } - else if (lx_points.at(j).y() > mp.FV_ymax + mp.FV_ymax_margin) { + else if (lx_points.at(j).y() > det_FV_ymax + det_FV_ymax_margin) { independent_surfaces.insert(0); } - else if (lx_points.at(j).y() < mp.FV_ymin) { + else if (lx_points.at(j).y() < det_FV_ymin) { independent_surfaces.insert(1); } - else if (lx_points.at(j).z() > mp.FV_zmax + mp.FV_zmax_margin) { + else if (lx_points.at(j).z() > det_FV_zmax + det_FV_zmax_margin) { independent_surfaces.insert(2); } - else if (lx_points.at(j).z() < mp.FV_zmin - mp.FV_zmin_margin) { + else if (lx_points.at(j).z() < det_FV_zmin - det_FV_zmin_margin) { independent_surfaces.insert(3); } } @@ -784,12 +827,12 @@ bool WireCell::PointCloud::Facade::JudgeSeparateDec_2(const Cluster* cluster, co boundary_points.at(j).z() - independent_points.at(0).z()); double angle_12 = dir_1.angle(dir_2); geo_vector_t dir_3 = dir_2 - dir_1 * dir_2.magnitude() * cos(angle_12); - double angle_3 = dir_3.angle(drift_dir); + double angle_3 = dir_3.angle(drift_dir_abs); // std::cout << dir_3.Mag()/units::cm << " " << fabs(angle_3-3.1415926/2.)/3.1415926*180. << " " << // fabs(dir_3.X()/units::cm) << std::endl; if (fabs(angle_3 - 3.1415926 / 2.) / 3.1415926 * 180. < 7.5) { if (fabs(dir_3.x() / units::cm) > 14 * units::cm) num_far_points++; - if (fabs(dir_1.angle(drift_dir) - 3.1415926 / 2.) / 3.1415926 * 180. > 15) { + if (fabs(dir_1.angle(drift_dir_abs) - 3.1415926 / 2.) / 3.1415926 * 180. > 15) { if (dir_3.magnitude() > 20 * units::cm) num_far_points++; } } @@ -918,74 +961,59 @@ bool WireCell::PointCloud::Facade::JudgeSeparateDec_2(const Cluster* cluster, co #define _INDEV_ #ifdef _INDEV_ -std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_ctpc, Cluster *cluster, +std::vector WireCell::Clus::Facade::Separate_1(const bool use_ctpc, Cluster *cluster, std::vector &boundary_points, std::vector &independent_points, - std::map> &dead_u_index, - std::map> &dead_v_index, - std::map> &dead_w_index, - double length) + double length, geo_point_t dir_cosmic, geo_point_t dir_beam, const IDetectorVolumes::pointer dv, const IPCTransformSet::pointer pcts, const Tree::Scope& scope) { - /// FIXME:REMOVE THIS AFTER DEBUGGING - // bool flag_debug_porting = false; - // if (cluster->nchildren() == 612) { - // flag_debug_porting = true; - // } - // std::cout << "Separate_1 with use_ctpc: start " << std::endl; + const std::string graph_flavor = use_ctpc ? "ctpc" : "basic"; - // translate all the points at the beginning - // TODO: is this the best way to do this? - // std::vector independent_points(boundary_points_idxs.size()); - // for(auto idx : independent_point_idxs) { - // independent_points[idx] = point3d(independent_point_idxs.at(idx)); - // } - // std::vector boundary_points(boundary_points_idxs.size()); - // for(auto idx : boundary_points_idxs) { - // boundary_points[idx] = point3d(boundary_points_idxs.at(idx)); - // } auto* grouping = cluster->grouping(); - const auto& tp = grouping->get_params(); - // TPCParams &mp = Singleton::Instance(); - // double pitch_u = mp.get_pitch_u(); - // double pitch_v = mp.get_pitch_v(); - // double pitch_w = mp.get_pitch_w(); - // double angle_u = mp.get_angle_u(); - // double angle_v = mp.get_angle_v(); - // double angle_w = mp.get_angle_w(); - // double time_slice_width = mp.get_ts_width(); - - geo_point_t dir_drift(1, 0, 0); - geo_point_t dir_cosmic(0, 1, 0); - geo_point_t dir_beam(0, 0, 1); - - // ToyPointCloud *temp_cloud = new ToyPointCloud(angle_u, angle_v, angle_w); - auto temp_cloud = std::make_shared(tp.angle_u, tp.angle_v, tp.angle_w); - - // ToyPointCloud *cloud = cluster->get_point_cloud(); - - geo_point_t cluster_center = cluster->get_center(); + auto gwpids = grouping->wpids(); + + std::map>>> af_dead_u_index ; + std::map>>> af_dead_v_index ; + std::map>>> af_dead_w_index ; + std::map>> af_temp_cloud; + for (auto wpid : gwpids) { + int apa = wpid.apa(); + int face = wpid.face(); + af_dead_u_index[apa][face] = grouping->get_dead_winds(apa, face, 0); // raw + af_dead_v_index[apa][face] = grouping->get_dead_winds(apa, face, 1); // raw + af_dead_w_index[apa][face] = grouping->get_dead_winds(apa, face, 2); // raw + + // Create wpids for all three planes with this APA and face + WirePlaneId wpid_u(kUlayer, face, apa); + WirePlaneId wpid_v(kVlayer, face, apa); + WirePlaneId wpid_w(kWlayer, face, apa); + + // Get wire directions for all planes + Vector wire_dir_u = dv->wire_direction(wpid_u); + Vector wire_dir_v = dv->wire_direction(wpid_v); + Vector wire_dir_w = dv->wire_direction(wpid_w); + + // Calculate angles + double angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + double angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + double angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + af_temp_cloud[apa][face] = std::make_shared(angle_u, angle_v, angle_w); // 2D Dynamic Point Cloud + } - // std::cout << cluster->get_PCA_value(0) << " " << cluster->get_PCA_value(1) << " " << cluster->get_PCA_value(2) << - // " " << cluster->get_PCA_axis(0) << " " << cluster->get_PCA_axis(1) << std::endl; + // std::cout << "Test: " << pc_name << " " << coords[0] << " " << coords[1] << " " << coords[2] << std::endl; - // geo_point_t main_dir, second_dir; - // main_dir.SetXYZ(cluster->get_PCA_axis(0).x(), cluster->get_PCA_axis(0).y(), cluster->get_PCA_axis(0).z()); - // second_dir.SetXYZ(cluster->get_PCA_axis(1).x(), cluster->get_PCA_axis(1).y(), cluster->get_PCA_axis(1).z()); - geo_point_t main_dir = cluster->get_pca_axis(0); - geo_point_t second_dir = cluster->get_pca_axis(1); - // if (flag_debug_porting) { - // std::cout << "main_dir" << main_dir << " second_dir" << second_dir << std::endl; - // } + geo_point_t cluster_center = cluster->get_pca().center; + geo_point_t main_dir = cluster->get_pca().axis.at(0); + geo_point_t second_dir = cluster->get_pca().axis.at(1); + // special case, if one of the cosmic is very close to the beam direction - if (cluster->get_pca_value(1) > 0.08 * cluster->get_pca_value(0) && + if (cluster->get_pca().values.at(1) > 0.08 * cluster->get_pca().values.at(0) && fabs(main_dir.angle(dir_beam) - 3.1415926 / 2.) > 75 / 180. * 3.1415926 && fabs(second_dir.angle(dir_cosmic) - 3.1415926 / 2.) > 60 / 180. * 3.1415926) { main_dir = second_dir; } - // std::cout << main_dir.angle(dir_beam)/3.1415926*180. << " " << second_dir.angle(dir_cosmic)/3.1415926*180. << " - // " << independent_points.size() << " " << std::endl; main_dir = main_dir.norm(); if (main_dir.y() > 0) @@ -993,7 +1021,7 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c geo_point_t start_wcpoint; geo_point_t end_wcpoint; - geo_point_t drift_dir(1, 0, 0); + geo_point_t drift_dir_abs(1, 0, 0); geo_point_t dir; double min_dis = 1e9; @@ -1036,11 +1064,7 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c // max_pca_dis = dis_to_pca; } } - // if (flag_debug_porting) { - // std::cout << "min_dis: " << min_dis << " max_dis: " << max_dis << std::endl; - // std::cout << "min_index: " << min_index << " max_index: " << max_index << std::endl; - // std::cout << "min_pca_dis: " << min_pca_dis << " max_pca_dis: " << max_pca_dis << std::endl; - // } + size_t start_wcpoint_idx = 0; @@ -1074,16 +1098,16 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c } if ((!flag_change) && - fabs(temp_dir1.angle(drift_dir) - 3.1415926 / 2.) > fabs(temp_dir2.angle(drift_dir) - 3.1415926 / 2.) && - fabs(temp_dir2.angle(drift_dir) - 3.1415926 / 2.) / 3.1415926 * 180. < 10 && + fabs(temp_dir1.angle(drift_dir_abs) - 3.1415926 / 2.) > fabs(temp_dir2.angle(drift_dir_abs) - 3.1415926 / 2.) && + fabs(temp_dir2.angle(drift_dir_abs) - 3.1415926 / 2.) / 3.1415926 * 180. < 10 && fabs(temp_dir2.angle(main_dir) - 3.1415926 / 2.) / 3.1415926 * 180. < 80) { start_wcpoint = independent_points.at(max_index); main_dir = main_dir * -1; max_index = min_index; } - if ((!flag_change) && fabs(temp_dir2.angle(drift_dir) - 3.1415926 / 2.) < 1. / 180. * 3.1415926 && - fabs(temp_dir1.angle(drift_dir) - 3.1415926 / 2.) > 3. / 180. * 3.1415926 && + if ((!flag_change) && fabs(temp_dir2.angle(drift_dir_abs) - 3.1415926 / 2.) < 1. / 180. * 3.1415926 && + fabs(temp_dir1.angle(drift_dir_abs) - 3.1415926 / 2.) > 3. / 180. * 3.1415926 && fabs(temp_dir1.angle(main_dir) - 3.1415926 / 2.) / 3.1415926 * 180. > 70) { start_wcpoint = independent_points.at(max_index); main_dir = main_dir * -1; @@ -1093,12 +1117,12 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c geo_point_t start_point(start_wcpoint.x(), start_wcpoint.y(), start_wcpoint.z()); { - geo_point_t drift_dir(1, 0, 0); + // geo_point_t drift_dir_abs(1, 0, 0); dir = cluster->vhough_transform(start_point, 100 * units::cm); geo_point_t dir1 = cluster->vhough_transform(start_point, 30 * units::cm); if (dir.angle(dir1) > 20 * 3.1415926 / 180.) { - if (fabs(dir.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180. || - fabs(dir1.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { + if (fabs(dir.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180. || + fabs(dir1.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { dir = cluster->vhough_transform(start_point, 200 * units::cm); } else { @@ -1112,24 +1136,16 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c start_wcpoint = cluster->get_furthest_wcpoint(start_wcpoint, inv_dir, 1 * units::cm, 0); end_wcpoint = cluster->get_furthest_wcpoint(start_wcpoint, dir); - // if (flag_debug_porting) { - // std::cout << "before adjust_wcpoints_parallel" << std::endl; - // std::cout << "start_wcpoint: " << start_wcpoint << std::endl; - // std::cout << "end_wcpoint: " << end_wcpoint << std::endl; - // } + geo_point_t test_dir(end_wcpoint.x() - start_wcpoint.x(), end_wcpoint.y() - start_wcpoint.y(), end_wcpoint.z() - start_wcpoint.z()); start_wcpoint_idx = cluster->get_closest_point_index(start_wcpoint); end_wcpoint_idx = cluster->get_closest_point_index(end_wcpoint); - if (fabs(test_dir.angle(drift_dir) - 3.1415926 / 2.) < 2.5 * 3.1415926 / 180.) { + if (fabs(test_dir.angle(drift_dir_abs) - 3.1415926 / 2.) < 2.5 * 3.1415926 / 180.) { cluster->adjust_wcpoints_parallel(start_wcpoint_idx, end_wcpoint_idx); start_wcpoint = cluster->point3d(start_wcpoint_idx); end_wcpoint = cluster->point3d(end_wcpoint_idx); - // if (flag_debug_porting) { - // std::cout << "after adjust_wcpoints_parallel" << std::endl; - // std::cout << "start_wcpoint: " << start_wcpoint << std::endl; - // std::cout << "end_wcpoint: " << end_wcpoint << std::endl; - // } + } } if (sqrt(pow(start_wcpoint.x() - end_wcpoint.x(), 2) + pow(start_wcpoint.y() - end_wcpoint.y(), 2) + @@ -1138,12 +1154,12 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c start_wcpoint = independent_points.at(max_index); geo_point_t start_point(start_wcpoint.x(), start_wcpoint.y(), start_wcpoint.z()); { - geo_point_t drift_dir(1, 0, 0); + // geo_point_t drift_dir_abs(1, 0, 0); dir = cluster->vhough_transform(start_point, 100 * units::cm); geo_point_t dir1 = cluster->vhough_transform(start_point, 30 * units::cm); if (dir.angle(dir1) > 20 * 3.1415926 / 180.) { - if (fabs(dir.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180. || - fabs(dir1.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { + if (fabs(dir.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180. || + fabs(dir1.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { dir = cluster->vhough_transform(start_point, 200 * units::cm); } else { @@ -1166,8 +1182,8 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c dir = cluster->vhough_transform(start_point, 100 * units::cm); geo_point_t dir1 = cluster->vhough_transform(start_point, 30 * units::cm); if (dir.angle(dir1) > 20 * 3.1415926 / 180.) { - if (fabs(dir.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180. || - fabs(dir1.angle(drift_dir) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { + if (fabs(dir.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180. || + fabs(dir1.angle(drift_dir_abs) - 3.1415926 / 2.) < 5 * 3.1415926 / 180.) { dir = cluster->vhough_transform(start_point, 200 * units::cm); } else { @@ -1181,44 +1197,23 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c end_wcpoint = cluster->get_furthest_wcpoint(start_wcpoint, dir); } - // if (flag_debug_porting) { - // std::cout << "before adjust_wcpoints_parallel" << std::endl; - // std::cout << "start_wcpoint: " << start_wcpoint << std::endl; - // std::cout << "end_wcpoint: " << end_wcpoint << std::endl; - // } + + geo_point_t test_dir(end_wcpoint.x() - start_wcpoint.x(), end_wcpoint.y() - start_wcpoint.y(), end_wcpoint.z() - start_wcpoint.z()); start_wcpoint_idx = cluster->get_closest_point_index(start_wcpoint); end_wcpoint_idx = cluster->get_closest_point_index(end_wcpoint); - if (fabs(test_dir.angle(drift_dir) - 3.1415926 / 2.) < 2.5 * 3.1415926 / 180.) { + if (fabs(test_dir.angle(drift_dir_abs) - 3.1415926 / 2.) < 2.5 * 3.1415926 / 180.) { cluster->adjust_wcpoints_parallel(start_wcpoint_idx, end_wcpoint_idx); start_wcpoint = cluster->point3d(start_wcpoint_idx); end_wcpoint = cluster->point3d(end_wcpoint_idx); - // if (flag_debug_porting) { - // std::cout << "after adjust_wcpoints_parallel" << std::endl; - // std::cout << "start_wcpoint: " << start_wcpoint << std::endl; - // std::cout << "end_wcpoint: " << end_wcpoint << std::endl; - // } + } } - // if (flag_debug_porting) { - // std::cout << "dijkstra adjust_wcpoints_parallel" << std::endl; - // std::cout << "start_wcpoint: " << start_wcpoint << std::endl; - // std::cout << "end_wcpoint: " << end_wcpoint << std::endl; - // } + + const auto& path_wcps = cluster->graph_algorithms(graph_flavor, dv, pcts).shortest_path(start_wcpoint_idx, end_wcpoint_idx); - // std::cout << "Start Point: " << start_wcpoint.x() << " " << start_wcpoint.y() << " " << start_wcpoint.z() << std::endl; - // std::cout << "End Point: " << end_wcpoint.x() << " " << end_wcpoint.y() << " " << end_wcpoint.z() << std::endl; - // std::cout << "dijkstra_shortest_paths, face: " << tp.face << std::endl; - cluster->dijkstra_shortest_paths(start_wcpoint_idx, use_ctpc); - cluster->cal_shortest_path(end_wcpoint_idx); - - const auto& path_wcps = cluster->get_path_wcps(); - // if (flag_debug_porting) { - // // std::cout << " graph: " << cluster->dump_graph() << std::endl; - // std::cout << cluster->nchildren() << " " << "path_wcps.size()" << path_wcps.size() << " " << start_wcpoint << " " << end_wcpoint << std::endl; - // } std::vector flag_u_pts, flag_v_pts, flag_w_pts; std::vector flag1_u_pts, flag1_v_pts, flag1_w_pts; std::vector flag2_u_pts, flag2_v_pts, flag2_w_pts; @@ -1264,7 +1259,10 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c prev_wcp_idx = (*it); } for (const auto &pt : pts) { - temp_cloud->add(pt); + auto test_wpid = cluster->wpid(pt); + if (test_wpid.apa()!=-1){ + af_temp_cloud.at(test_wpid.apa()).at(test_wpid.face())->add(pt); + } } // if (flag_debug_porting) { // std::cout << "temp_cloud->get_num_points() " << temp_cloud->get_num_points() << std::endl; @@ -1274,10 +1272,8 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c for (size_t j = 0; j != flag_u_pts.size(); j++) { geo_point_t test_p = cluster->point3d(j); - // test_p.x() = cluster->point3d(j).x(); - // test_p.y() = cluster->point3d(j).y(); - // test_p.z() = cluster->point3d(j).z(); - std::pair temp_results = temp_cloud->get_closest_2d_dis(test_p, 0); + auto test_wpid = cluster->wire_plane_id(j) ; + std::pair temp_results = af_temp_cloud.at(test_wpid.apa()).at(test_wpid.face())->get_closest_2d_dis(test_p, 0); double dis = temp_results.second; // if (flag_debug_porting && cluster->blob_with_point(j)->slice_index_min() == 8060) { // std::cout << "get_closest_2d_dis(test_p, 0) " << test_p << " " << dis / units::cm << " cm " << (dis <= 2.4 * units::cm) << std::endl; @@ -1292,15 +1288,17 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c flag1_u_pts.at(j) = true; } else { + auto& dead_u_index = af_dead_u_index.at(test_wpid.apa()).at(test_wpid.face()); if (dead_u_index.find(winds[0][j]) != dead_u_index.end()) { - if (cluster->point3d(j).x() >= dead_u_index[winds[0][j]].first && - cluster->point3d(j).x() <= dead_u_index[winds[0][j]].second) { + // dead channels are corresponding to raw points + if (cluster->point3d_raw(j).x() >= dead_u_index[winds[0][j]].first && + cluster->point3d_raw(j).x() <= dead_u_index[winds[0][j]].second) { if (dis < 10 * units::cm) flag1_u_pts.at(j) = true; flag2_u_pts.at(j) = true; } } } - temp_results = temp_cloud->get_closest_2d_dis(test_p, 1); + temp_results = af_temp_cloud.at(test_wpid.apa()).at(test_wpid.face())->get_closest_2d_dis(test_p, 1); dis = temp_results.second; if (dis <= 1.5 * units::cm) { flag_v_pts.at(j) = true; @@ -1309,15 +1307,17 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c flag1_v_pts.at(j) = true; } else { + auto& dead_v_index = af_dead_v_index.at(test_wpid.apa()).at(test_wpid.face()); + // dead channels are corresponding to raw points if (dead_v_index.find(winds[1][j]) != dead_v_index.end()) { - if (cluster->point3d(j).x() >= dead_v_index[winds[1][j]].first && - cluster->point3d(j).x() <= dead_v_index[winds[1][j]].second) { + if (cluster->point3d_raw(j).x() >= dead_v_index[winds[1][j]].first && + cluster->point3d_raw(j).x() <= dead_v_index[winds[1][j]].second) { if (dis < 10.0 * units::cm) flag1_v_pts.at(j) = true; flag2_v_pts.at(j) = true; } } } - temp_results = temp_cloud->get_closest_2d_dis(test_p, 2); + temp_results = af_temp_cloud.at(test_wpid.apa()).at(test_wpid.face())->get_closest_2d_dis(test_p, 2); dis = temp_results.second; if (dis <= 1.5 * units::cm) { flag_w_pts.at(j) = true; @@ -1326,9 +1326,11 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c flag1_w_pts.at(j) = true; } else { + auto& dead_w_index = af_dead_w_index.at(test_wpid.apa()).at(test_wpid.face()); + // dead channels are corresponding to raw points if (dead_w_index.find(winds[2][j]) != dead_w_index.end()) { - if (cluster->point3d(j).x() >= dead_w_index[winds[2][j]].first && - cluster->point3d(j).x() <= dead_w_index[winds[2][j]].second) { + if (cluster->point3d_raw(j).x() >= dead_w_index[winds[2][j]].first && + cluster->point3d_raw(j).x() <= dead_w_index[winds[2][j]].second) { if (dis < 10 * units::cm) flag1_w_pts.at(j) = true; flag2_w_pts.at(j) = true; } @@ -1338,36 +1340,34 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c // special treatment of first and last point { - std::vector indices = cluster->get_closest_2d_index(pts.front(), 2.1 * units::cm, 0); - // if (flag_debug_porting) { - // std::cout << *cluster << std::endl; - // std::cout << "pts.front()" << pts.front() << " indices.size() " << indices.size() << std::endl; - // std::cout << "pts.back()" << pts.back() << std::endl; - // const auto&[ifront, bfront] = cluster->get_closest_point_blob(pts.front()); - // const auto&[iback, bback] = cluster->get_closest_point_blob(pts.back()); - // std::cout << "bfront " << *bfront << std::endl; - // std::cout << "bback " << *bback << std::endl; - // } + auto wpid_front = cluster->wpid(pts.front()); + auto idx_front = cluster->get_closest_point_index(pts.front()); + + + auto wpid_back = cluster->wpid(pts.back()); + auto idx_back = cluster->get_closest_point_index(pts.back()); + + std::vector indices = cluster->get_closest_2d_index(cluster->point3d_raw(idx_front), 2.1 * units::cm, wpid_front.apa(), wpid_front.face(), 0); for (size_t k = 0; k != indices.size(); k++) { flag_u_pts.at(indices.at(k)) = true; } - indices = cluster->get_closest_2d_index(pts.front(), 2.1 * units::cm, 1); + indices = cluster->get_closest_2d_index(cluster->point3d_raw(idx_front), 2.1 * units::cm, wpid_front.apa(), wpid_front.face(), 1); for (size_t k = 0; k != indices.size(); k++) { flag_v_pts.at(indices.at(k)) = true; } - indices = cluster->get_closest_2d_index(pts.front(), 2.1 * units::cm, 2); + indices = cluster->get_closest_2d_index(cluster->point3d_raw(idx_front), 2.1 * units::cm, wpid_front.apa(), wpid_front.face(), 2); for (size_t k = 0; k != indices.size(); k++) { flag_w_pts.at(indices.at(k)) = true; } - indices = cluster->get_closest_2d_index(pts.back(), 2.1 * units::cm, 0); + indices = cluster->get_closest_2d_index(cluster->point3d_raw(idx_back), 2.1 * units::cm, wpid_back.apa(), wpid_back.face(), 0); for (size_t k = 0; k != indices.size(); k++) { flag_u_pts.at(indices.at(k)) = true; } - indices = cluster->get_closest_2d_index(pts.back(), 2.1 * units::cm, 1); + indices = cluster->get_closest_2d_index(cluster->point3d_raw(idx_back), 2.1 * units::cm, wpid_back.apa(), wpid_back.face(), 1); for (size_t k = 0; k != indices.size(); k++) { flag_v_pts.at(indices.at(k)) = true; } - indices = cluster->get_closest_2d_index(pts.back(), 2.1 * units::cm, 2); + indices = cluster->get_closest_2d_index(cluster->point3d_raw(idx_back), 2.1 * units::cm, wpid_back.apa(), wpid_back.face(), 2); for (size_t k = 0; k != indices.size(); k++) { flag_w_pts.at(indices.at(k)) = true; } @@ -1382,12 +1382,7 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c } for (size_t j = 0; j != flag_u_pts.size(); j++) { const Blob* mcell = cluster->blob_with_point(j); - // if (flag_debug_porting && mcell->slice_index_min() == 8060) { - // std::cout << "flags " - // << flag_u_pts.at(j) << " " << flag_v_pts.at(j) << " " << flag_w_pts.at(j) << " " - // << flag1_u_pts.at(j) << " " << flag1_v_pts.at(j) << " " << flag1_w_pts.at(j) << " " - // << flag2_u_pts.at(j) << " " << flag2_v_pts.at(j) << " " << flag2_w_pts.at(j) << std::endl; - // } + if (flag_u_pts.at(j) && flag_v_pts.at(j) && flag1_w_pts.at(j) || flag_u_pts.at(j) && flag_w_pts.at(j) && flag1_v_pts.at(j) || flag_w_pts.at(j) && flag_v_pts.at(j) && flag1_u_pts.at(j)) { @@ -1400,15 +1395,10 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c mcell_np_map1[mcell]++; } } - // std::cout << "mcell_np_map.size() " << mcell_np_map.size() << " " << mcell_np_map1.size() << std::endl; std::vector final_clusters; - // Cluster& cluster1 = grouping.make_child(); - // Cluster& cluster2 = grouping.make_child(); - - // blob (index) -> cluster_id (0 or 1) std::vector b2groupid(cluster->nchildren(), 0); std::set groupids; @@ -1418,14 +1408,6 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c const size_t total_wires = mcell->u_wire_index_max() - mcell->u_wire_index_min() + mcell->v_wire_index_max() - mcell->v_wire_index_min() + mcell->w_wire_index_max() - mcell->w_wire_index_min(); - // if (flag_debug_porting) { - // std::cout << "grouping mcell " - // << mcell->slice_index_min() << " " - // << mcell->nbpoints() << " " - // << mcell_np_map[mcell] << " " - // << mcell_np_map1[mcell] << " " - // << total_wires << std::endl; - // } if (mcell_np_map[mcell] > 0.5 * mcell->nbpoints() || (mcell_np_map[mcell] > 0.25 * mcell->nbpoints() && total_wires < 25)) { @@ -1444,23 +1426,16 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c groupids.insert(1); } } - // std::cout << "before separate, cluster has " << cluster->nchildren() << " children " << " with " << groupids.size() << " groups" << std::endl; + auto scope_transform = cluster->get_scope_transform(scope); auto clusters_step0 = grouping->separate(cluster, b2groupid, true); assert(cluster == nullptr); - // std::cout << "separated into " << clusters_step0.size() << " clusters" << std::endl; - // for (size_t i=0;i!=clusters_step0.size();i++) { - // std::cout << "cluster " << clusters_step0[i]->nchildren() << " children" << std::endl; - // } - // for (int id : groupids) { - // std::cout << "separated cluster " << id << " has " << clusters_step0[id]->nchildren() << " children" << std::endl; - // } std::vector other_clusters; if (clusters_step0.find(1) != clusters_step0.end()) { // other_clusters = Separate_2(clusters_step0[1], 5 * units::cm); - const auto b2id = Separate_2(clusters_step0[1], 5 * units::cm); + const auto b2id = Separate_2(clusters_step0[1], scope, 5 * units::cm); auto other_clusters1 = grouping->separate(clusters_step0[1],b2id, true); // the cluster is now nullptr assert(clusters_step0[1] == nullptr); @@ -1468,13 +1443,6 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c other_clusters.push_back(it->second); } } - // delete cluster2; - // if (flag_debug_porting) { - //std::cout << "other_clusters.size() " << other_clusters.size() << std::endl; - // for (size_t i = 0; i != other_clusters.size(); i++) { - // std::cout << "other_cluster " << i << " has " << other_clusters.at(i)->nchildren() << " children" << std::endl; - // } - // } // if (clusters_step0.find(0) != clusters_step0.end()) { @@ -1487,14 +1455,8 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c // check against other clusters for (size_t i = 0; i != other_clusters.size(); i++) { // other_clusters.at(i)->Create_point_cloud(); - // ToyPointCloud *temp_cloud1 = other_clusters.at(i)->get_point_cloud(); std::tuple temp_dis = other_clusters.at(i)->get_closest_points(*clusters_step0[0]); if (std::get<2>(temp_dis) < 0.5 * units::cm) { - // std::vector range_v1 = other_clusters.at(i)->get_uvwt_range(); - // double length_1 = sqrt(2. / 3. * - // (pow(pitch_u * range_v1.at(0), 2) + pow(pitch_v * range_v1.at(1), 2) + - // pow(pitch_w * range_v1.at(2), 2)) + - // pow(time_slice_width * range_v1.at(3), 2)); double length_1 = other_clusters.at(i)->get_length(); geo_point_t p1(end_wcpoint.x(), end_wcpoint.y(), end_wcpoint.z()); double close_dis = other_clusters.at(i)->get_closest_dis(p1); @@ -1504,20 +1466,31 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c geo_point_t temp_dir2 = other_clusters.at(i)->vhough_transform(p1, 15 * units::cm); if (temp_dir1.angle(temp_dir2) / 3.1415926 * 180. > 145 && length_1 < 30 * units::cm && close_dis < 3 * units::cm || - fabs(temp_dir1.angle(drift_dir) - 3.1415926 / 2.) / 3.1415926 * 180. < 3 && - fabs(temp_dir2.angle(drift_dir) - 3.1415926 / 2.) / 3.1415926 * 180. < 3) { + fabs(temp_dir1.angle(drift_dir_abs) - 3.1415926 / 2.) / 3.1415926 * 180. < 3 && + fabs(temp_dir2.angle(drift_dir_abs) - 3.1415926 / 2.) / 3.1415926 * 180. < 3) { temp_merge_clusters.push_back(other_clusters.at(i)); } } } } + auto scope = clusters_step0[0]->get_default_scope(); + auto scope_transform = clusters_step0[0]->get_scope_transform(scope); + + // std::cout << "Xin1: " << clusters_step0[0]->npoints() << " " << clusters_step0[0]->kd3d().npoints() << " " << clusters_step0[0]->sv().nodes().size() << " " << clusters_step0[0]->sv().npoints() << std::endl; + for (auto temp_cluster : temp_merge_clusters) { other_clusters.erase(find(other_clusters.begin(),other_clusters.end(),temp_cluster)); clusters_step0[0]->take_children(*temp_cluster, true); grouping->destroy_child(temp_cluster); assert(temp_cluster == nullptr); } + clusters_step0[0]->set_default_scope(scope); + clusters_step0[0]->set_scope_filter(scope, true); + clusters_step0[0]->set_scope_transform(scope, scope_transform); + + // std::cout << "Xin2: " << clusters_step0[0]->npoints() << " " << clusters_step0[0]->kd3d().npoints() << " " << clusters_step0[0]->sv().nodes().size() << " " << clusters_step0[0]->sv().npoints() << std::endl; + final_clusters.push_back(clusters_step0[0]); } @@ -1528,16 +1501,8 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c for (size_t i = 0; i != other_clusters.size(); i++) { // How to write??? bool flag_save = false; - // std::vector range_v1 = other_clusters.at(i)->get_uvwt_range(); - // double length_1 = sqrt(2. / 3. * - // (pow(pitch_u * range_v1.at(0), 2) + pow(pitch_v * range_v1.at(1), 2) + - // pow(pitch_w * range_v1.at(2), 2)) + - // pow(time_slice_width * range_v1.at(3), 2)); double length_1 = other_clusters.at(i)->get_length(); - // other_clusters.at(i)->Create_point_cloud(); - // other_clusters.at(i)->Calc_PCA(); - // ToyPointCloud *temp_cloud1 = other_clusters.at(i)->get_point_cloud(); - // std::tuple temp_dis = temp_cloud1->get_closest_points(cluster1_cloud); + std::tuple temp_dis = other_clusters.at(i)->get_closest_points(*clusters_step0[0]); if (length_1 < 30 * units::cm && std::get<2>(temp_dis) < 5 * units::cm) { int temp_total_points = other_clusters.at(i)->npoints(); @@ -1579,14 +1544,14 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c Cluster *cluster1 = saved_clusters.at(i); if (cluster1->get_length() < 5 * units::cm) continue; // ToyPointCloud *cloud1 = cluster1->get_point_cloud(); - geo_point_t dir1(cluster1->get_pca_axis(0).x(), cluster1->get_pca_axis(0).y(), - cluster1->get_pca_axis(0).z()); + geo_point_t dir1(cluster1->get_pca().axis.at(0).x(), cluster1->get_pca().axis.at(0).y(), + cluster1->get_pca().axis.at(0).z()); for (size_t j = 0; j != to_be_merged_clusters.size(); j++) { Cluster *cluster2 = to_be_merged_clusters.at(j); if (cluster2->get_length() < 10 * units::cm) continue; // ToyPointCloud *cloud2 = cluster2->get_point_cloud(); - geo_point_t dir2(cluster2->get_pca_axis(0).x(), cluster2->get_pca_axis(0).y(), - cluster2->get_pca_axis(0).z()); + geo_point_t dir2(cluster2->get_pca().axis.at(0).x(), cluster2->get_pca().axis.at(0).y(), + cluster2->get_pca().axis.at(0).z()); std::tuple temp_dis = cluster1->get_closest_points(*cluster2); if (std::get<2>(temp_dis) < 15 * units::cm && fabs(dir1.angle(dir2) - 3.1415926 / 2.) / 3.1415926 * 180 > 75) { @@ -1603,11 +1568,15 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c } Cluster& cluster2 = grouping->make_child(); + cluster2.set_default_scope(scope); + cluster2.set_scope_filter(scope, true); + if (to_be_merged_clusters.size() > 0) cluster2.set_scope_transform(scope, to_be_merged_clusters[0]->get_scope_transform(scope)); for (size_t i = 0; i != to_be_merged_clusters.size(); i++) { cluster2.take_children(*to_be_merged_clusters[i], true); grouping->destroy_child(to_be_merged_clusters[i]); assert(to_be_merged_clusters[i] == nullptr); - } + } + to_be_merged_clusters.clear(); final_clusters.push_back(&cluster2); @@ -1621,77 +1590,96 @@ std::vector WireCell::PointCloud::Facade::Separate_1(const bool use_c final_clusters.push_back(other_clusters.at(i)); } } - // if (flag_debug_porting) { - // std::cout << "final_clusters.size() " << final_clusters.size() << std::endl; - // for (size_t i = 0; i != final_clusters.size(); i++) { - // std::cout << "final_cluster " << i << " has " << final_clusters.at(i)->nchildren() << " children" << std::endl; - // } - // } - - // delete temp_cloud; - // std::cout << "Separate_1 with use_ctpc: finished\n"; + return final_clusters; } #endif //_INDEV_ /// blob -> cluster_id -std::vector WireCell::PointCloud::Facade::Separate_2(Cluster *cluster, const double dis_cut, const size_t ticks_per_slice) +std::vector WireCell::Clus::Facade::Separate_2(Cluster *cluster, + const Tree::Scope& scope, + const double dis_cut) { - const auto& time_cells_set_map = cluster->time_blob_map(); + if (cluster->nchildren() == 0) { + return std::vector(); + } + + // std::cout << "Test: cluster has " << cluster->nchildren() << " blobs" << std::endl; + auto& time_cells_set_map = cluster->time_blob_map(); + // Safe access to nested maps + + // std::cout << "Separate_2 nchildren: " << cluster->nchildren() << std::endl; - std::vector& mcells = cluster->children(); + const auto& mcells = cluster->children(); - std::vector time_slices; - for (auto it1 = time_cells_set_map.begin(); it1 != time_cells_set_map.end(); it1++) { - time_slices.push_back((*it1).first); + // create graph for points between connected mcells, need to separate apa, face, and then ... + std::map > > af_time_slices; // apa,face --> time slices + for (auto it = cluster->time_blob_map().begin(); it != cluster->time_blob_map().end(); it++) { + int apa = it->first; + for (auto it1 = it->second.begin(); it1 != it->second.end(); it1++) { + int face = it1->first; + std::vector time_slices_vec; + for (auto it2 = it1->second.begin(); it2 != it1->second.end(); it2++) { + time_slices_vec.push_back(it2->first); + } + af_time_slices[apa][face] = time_slices_vec; + } } - using BlobSet = std::set; std::vector> connected_mcells; - for (size_t i = 0; i != time_slices.size(); i++) { - const BlobSet &mcells_set = time_cells_set_map.at(time_slices.at(i)); - // std::cout << "time_slices.at(i)" << time_slices.at(i) << " mcells_set.size() " << mcells_set.size() << std::endl; - - // create graph for points in mcell inside the same time slice - if (mcells_set.size() >= 2) { - for (auto it2 = mcells_set.begin(); it2 != mcells_set.end(); it2++) { - const Blob *mcell1 = *it2; - auto it2p = it2; - if (it2p != mcells_set.end()) { - it2p++; - for (auto it3 = it2p; it3 != mcells_set.end(); it3++) { - const Blob *mcell2 = *(it3); - if (mcell1->overlap_fast(*mcell2, 5)) connected_mcells.push_back(std::make_pair(mcell1, mcell2)); + + for (auto it = af_time_slices.begin(); it != af_time_slices.end(); it++) { + int apa = it->first; + for (auto it1 = it->second.begin(); it1 != it->second.end(); it1++) { + int face = it1->first; + std::vector& time_slices = it1->second; + + for (size_t i = 0; i != time_slices.size(); i++) { + const BlobSet &mcells_set = time_cells_set_map.at(apa).at(face).at(time_slices.at(i)); + // std::cout << "time_slices.at(i)" << time_slices.at(i) << " mcells_set.size() " << mcells_set.size() << std::endl; + + // create graph for points in mcell inside the same time slice + if (mcells_set.size() >= 2) { + for (auto it2 = mcells_set.begin(); it2 != mcells_set.end(); it2++) { + const Blob *mcell1 = *it2; + auto it2p = it2; + if (it2p != mcells_set.end()) { + it2p++; + for (auto it3 = it2p; it3 != mcells_set.end(); it3++) { + const Blob *mcell2 = *(it3); + if (mcell1->overlap_fast(*mcell2, 5)) connected_mcells.push_back(std::make_pair(mcell1, mcell2)); + } + } } } - } - } - // create graph for points between connected mcells in adjacent time slices + 1, if not, + 2 - std::vector vec_mcells_set; - if (i + 1 < time_slices.size()) { - if (time_slices.at(i + 1) - time_slices.at(i) == (int)(1*ticks_per_slice)) { - vec_mcells_set.push_back(time_cells_set_map.at(time_slices.at(i + 1))); - if (i + 2 < time_slices.size()) - if (time_slices.at(i + 2) - time_slices.at(i) == (int)(2*ticks_per_slice)) - vec_mcells_set.push_back(time_cells_set_map.at(time_slices.at(i + 2))); - } - else if (time_slices.at(i + 1) - time_slices.at(i) == (int)(2*ticks_per_slice)) { - vec_mcells_set.push_back(time_cells_set_map.at(time_slices.at(i + 1))); - } - } - // std::cout << "time_slices.at(i)" << time_slices.at(i) << " vec_mcells_set.size() " << vec_mcells_set.size() << std::endl; - bool flag = false; - for (size_t j = 0; j != vec_mcells_set.size(); j++) { - if (flag) break; - BlobSet &next_mcells_set = vec_mcells_set.at(j); - for (auto it1 = mcells_set.begin(); it1 != mcells_set.end(); it1++) { - const Blob *mcell1 = (*it1); - for (auto it2 = next_mcells_set.begin(); it2 != next_mcells_set.end(); it2++) { - const Blob *mcell2 = (*it2); - if (mcell1->overlap_fast(*mcell2, 2)) { - flag = true; - connected_mcells.push_back(std::make_pair(mcell1, mcell2)); + // create graph for points between connected mcells in adjacent time slices + 1, if not, + 2 + std::vector vec_mcells_set; + if (i + 1 < time_slices.size()) { + if (time_slices.at(i + 1) - time_slices.at(i) == (int)(1*cluster->grouping()->get_nticks_per_slice().at(apa).at(face))) { + vec_mcells_set.push_back(time_cells_set_map.at(apa).at(face).at(time_slices.at(i + 1))); + if (i + 2 < time_slices.size()) + if (time_slices.at(i + 2) - time_slices.at(i) == (int)(2*cluster->grouping()->get_nticks_per_slice().at(apa).at(face))) + vec_mcells_set.push_back(time_cells_set_map.at(apa).at(face).at(time_slices.at(i + 2))); + } + else if (time_slices.at(i + 1) - time_slices.at(i) == (int)(2*cluster->grouping()->get_nticks_per_slice().at(apa).at(face))) { + vec_mcells_set.push_back(time_cells_set_map.at(apa).at(face).at(time_slices.at(i + 1))); + } + } + // std::cout << "time_slices.at(i)" << time_slices.at(i) << " vec_mcells_set.size() " << vec_mcells_set.size() << std::endl; + bool flag = false; + for (size_t j = 0; j != vec_mcells_set.size(); j++) { + if (flag) break; + BlobSet &next_mcells_set = vec_mcells_set.at(j); + for (auto it1 = mcells_set.begin(); it1 != mcells_set.end(); it1++) { + const Blob *mcell1 = (*it1); + for (auto it2 = next_mcells_set.begin(); it2 != next_mcells_set.end(); it2++) { + const Blob *mcell2 = (*it2); + if (mcell1->overlap_fast(*mcell2, 2)) { + flag = true; + connected_mcells.push_back(std::make_pair(mcell1, mcell2)); + } + } } } } @@ -1701,15 +1689,15 @@ std::vector WireCell::PointCloud::Facade::Separate_2(Cluster *cluster, cons // form ... const int N = mcells.size(); - MCUGraph graph(N); + Weighted::Graph graph(N); std::map mcell_index_map; for (size_t i = 0; i != mcells.size(); i++) { Blob *curr_mcell = mcells.at(i); mcell_index_map[curr_mcell] = i; - auto v = vertex(i, graph); // retrieve vertex descriptor - (graph)[v].index = i; + // auto v = vertex(i, graph); // retrieve vertex descriptor + // (graph)[v].ident = i; } for (auto it = connected_mcells.begin(); it != connected_mcells.end(); it++) { @@ -1719,10 +1707,12 @@ std::vector WireCell::PointCloud::Facade::Separate_2(Cluster *cluster, cons // if (edge.second) { // (graph)[edge.first].dist = 1; // } - /*auto edge =*/ add_edge(index1, index2, WireCell::PointCloud::Facade::EdgeProp(1),graph); + /*auto edge =*/ add_edge(index1, index2, 1.0,graph); } { + // std::string hack_pc_name = "3d"; + // std::vector hack_coords = {"x", "y", "z"}; // std::cout << "Separate_2: num_edges: " << num_edges(graph) << std::endl; std::vector component(num_vertices(graph)); const int num = connected_components(graph, &component[0]); @@ -1737,8 +1727,9 @@ std::vector WireCell::PointCloud::Facade::Separate_2(Cluster *cluster, cons for (i = 0; i != component.size(); ++i) { vec_vec.at(component[i]).push_back(i); Blob *mcell = mcells.at(i); - for (const auto & pt : mcell->points()) { - pt_clouds.at(component[i])->add({pt.x(), pt.y(), pt.z()}); + for (const auto & pt : mcell->points(scope.pcname, scope.coords)) { + const std::vector newpt = {pt.x(), pt.y(), pt.z()}; + pt_clouds.at(component[i])->add(newpt); } } @@ -1752,7 +1743,7 @@ std::vector WireCell::PointCloud::Facade::Separate_2(Cluster *cluster, cons // if (edge.second) { // (graph)[edge.first].dist = 1; // } - /*auto edge =*/ add_edge(index1, index2, WireCell::PointCloud::Facade::EdgeProp(1),graph); + /*auto edge =*/ add_edge(index1, index2, 1.0,graph); } } } @@ -1761,32 +1752,10 @@ std::vector WireCell::PointCloud::Facade::Separate_2(Cluster *cluster, cons // std::cout << num << std::endl; } - // std::vector final_clusters; - // { - // std::vector component(num_vertices(graph)); - // const int num = connected_components(graph, &component[0]); - // final_clusters.resize(num); - // for (size_t i = 0; i != num; i++) { - // final_clusters.at(i) = new PR3DCluster(i); - // } - - // std::vector::size_type i; - // for (i = 0; i != component.size(); ++i) { - // Blob *mcell = mcells.at(i); - // final_clusters[component[i]]->AddCell(mcell, mcell->GetTimeSlice()); - // } - // } - // delete graph; - // return final_clusters; + std::vector component(num_vertices(graph)); /*const int num =*/ connected_components(graph, &component[0]); return component; - // auto id2cluster = cluster->separate(component); - // std::vector ret; - // for (auto [id, cluster] : id2cluster) { - // ret.push_back(cluster); - // } - // return ret; } diff --git a/clus/src/clustering_switch_scope.cxx b/clus/src/clustering_switch_scope.cxx new file mode 100644 index 000000000..d64042bfc --- /dev/null +++ b/clus/src/clustering_switch_scope.cxx @@ -0,0 +1,154 @@ +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + +class ClusteringSwitchScope; +WIRECELL_FACTORY(ClusteringSwitchScope, ClusteringSwitchScope, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; +using namespace WireCell::PointCloud::Tree; + + +static void clustering_switch_scope( + Grouping& live_grouping, + IPCTransformSet::pointer pcts, + const Tree::Scope& scope, + const std::string& correction_name // name of correction to apply + ); + +class ClusteringSwitchScope : public IConfigurable, public Clus::IEnsembleVisitor, private NeedPCTS, private NeedScope { +public: + ClusteringSwitchScope() {} + virtual ~ClusteringSwitchScope() {} + + void configure(const WireCell::Configuration& config) { + NeedPCTS::configure(config); + NeedScope::configure(config); + + // Get configuration parameters + correction_name_ = convert(config["correction_name"], "T0Correction"); + } + + void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + clustering_switch_scope(live, m_pcts, m_scope, correction_name_); + } + +private: + std::string correction_name_{"T0Correction"}; +}; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wparentheses" + +static void clustering_switch_scope( + Grouping& live_grouping, + const Clus::IPCTransformSet::pointer pcts, // detector volumes + const Tree::Scope& default_scope, + const std::string& correction_name // name of correction to apply +) +{ + using spdlog::debug; + using spdlog::info; + + // Check that live_grouping has at least one wpid + if (live_grouping.wpids().empty()) { + throw std::runtime_error("Live grouping must have at least one wpid"); + } + + // Get all clusters from the grouping + std::vector live_clusters = live_grouping.children(); // copy + + + // std::cout << "Test: " << pc_name << " " << correction_name << " " << live_clusters.size() << std::endl; + + // Process each cluster + for (size_t iclus = 0; iclus < live_clusters.size(); ++iclus) { + Cluster* cluster = live_clusters.at(iclus); + + if (correction_name == "T0Correction") { + // Get original bounds before correction + // info("Cluster {} original bounds:", iclus); + // const auto [earliest_orig, latest_orig] = cluster->get_earliest_latest_points(); + // info(" earliest: {}", earliest_orig); + // info(" latest: {}", latest_orig); + + // Add corrected points - this returns filter values for each blob + // cluster->set_cluster_t0(-1000 * units::us); // Set cluster t0 for correction + std::vector filter_results = cluster->add_corrected_points(pcts, correction_name); + // Get the new scope with corrected points + const auto correction_scope = cluster->get_scope(correction_name); + + // std::cout << "Test: " << correction_name << " " << correction_scope.pcname << " " << correction_scope.coords[0] << " " << correction_scope.coords[1] << " " << correction_scope.coords[2] << std::endl; + // Set this as the default scope for viewing + cluster->set_default_scope(correction_scope); + cluster->set_scope_transform(correction_scope, correction_name); + + // Get bounds after correction + // info("Cluster {} corrected bounds:", iclus); + // const auto [earliest_corr, latest_corr] = cluster->get_earliest_latest_points(); + // info(" earliest: {}", earliest_corr); + // info(" latest: {}", latest_corr); + + // Get unique filter result values + std::set filter_result_set(filter_results.begin(), filter_results.end()); + // info("Cluster {} has {} unique filter results:", iclus, filter_result_set.size()); + // for (const auto& result : filter_result_set) { + // info(" filter result: {}", result); + // } + + // Separate the cluster based on filter results + // This will create new clusters in the grouping + auto separated_clusters = live_grouping.separate(cluster, filter_results, true); + + // Process each separated cluster + for (auto& [id, new_cluster] : separated_clusters) { + // info(" Separated cluster filter={}, nchildren={}", id, new_cluster->nchildren()); + + // std::cout << "Test: " << id << " " << new_cluster->nchildren() << std::endl; + + // Set the new scope as default for the separated cluster + new_cluster->set_default_scope(correction_scope); + if (id == 0) + new_cluster->set_scope_filter(correction_scope, false); + else if (id==1) + new_cluster->set_scope_filter(correction_scope, true); + new_cluster->set_scope_transform(correction_scope, correction_name); + + + // Get bounds of the separated cluster + // const auto [earliest_sep, latest_sep] = new_cluster->get_earliest_latest_points(); + // info(" earliest: {}", earliest_sep); + // info(" latest: {}", latest_sep); + } + } + } + + // live_clusters = live_grouping.children(); // copy + // std::cout << "Test: " << pc_name << " " << correction_name << " " << live_clusters.size() << std::endl; + // // Process each cluster + // for (size_t iclus = 0; iclus < live_clusters.size(); ++iclus) { + // Cluster* cluster = live_clusters.at(iclus); + // auto& scope = cluster->get_default_scope(); + // std::cout << "Test: " << iclus << " " << cluster->nchildren() << " " << scope.pcname << " " << scope.coords[0] << " " << scope.coords[1] << " " << scope.coords[2] << " " << cluster->get_scope_filter(scope)<< std::endl; + // } + + + + + + + + + // info("Completed scope switching with correction: {}", correction_name); +} + +#pragma GCC diagnostic pop diff --git a/clus/src/clustering_test.cxx b/clus/src/clustering_test.cxx new file mode 100644 index 000000000..1ded9a56b --- /dev/null +++ b/clus/src/clustering_test.cxx @@ -0,0 +1,330 @@ +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + +#include "WireCellClus/DynamicPointCloud.h" // for make_points_cluster() + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + +#include + +class ClusteringTest; +WIRECELL_FACTORY(ClusteringTest, ClusteringTest, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) + + +// #pragma GCC diagnostic push +// #pragma GCC diagnostic ignored "-Wparentheses" + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; +using namespace WireCell::PointCloud::Tree; + + +class ClusteringTest : public IConfigurable, public Clus::IEnsembleVisitor, private NeedDV, private NeedPCTS { +public: + ClusteringTest() {} + virtual ~ClusteringTest() {}; + + virtual void configure(const WireCell::Configuration& config) { + NeedDV::configure(config); + NeedPCTS::configure(config); + } + virtual Configuration default_configuration() const { + Configuration cfg; + return cfg; + } + + virtual void visit(Ensemble& ensemble) const { + auto& live = *ensemble.with_name("live").at(0); + clustering_test(live); + } + + // This is a test function, not used in clustering + void clustering_test(Grouping& live_grouping) const { + using spdlog::debug; + + // form map from dead to set of live clusters ... + std::map> dead_live_cluster_mapping; + std::vector dead_cluster_order; + std::map>> dead_live_mcells_mapping; + + std::vector live_clusters = live_grouping.children(); // copy + std::sort(live_clusters.begin(), live_clusters.end(), [](const Cluster *cluster1, const Cluster *cluster2) { + return cluster1->get_length() > cluster2->get_length(); + }); + + /// TEST: wpid + for (const auto& wpid : live_grouping.wpids()) { + SPDLOG_INFO("CTest live_grouping wpid {}", wpid.name()); + } + for (size_t iclus = 0; iclus != live_clusters.size(); iclus++) { + Cluster* cluster = live_clusters.at(iclus); + const auto& wpids = cluster->wpids_blob(); + for (size_t i=0; i != wpids.size(); i++) { + const auto& wpid = wpids.at(i); + SPDLOG_INFO("CTest Cluster {} i {} name {}", iclus, i, wpid.name()); + break; + } + for (size_t iblob = 0; iblob != cluster->children().size(); iblob++) { + const auto* blob = cluster->children().at(iblob); + SPDLOG_INFO("CTest Cluster {} Blob {} blob->wpid().name() {}", iclus, iblob, blob->wpid().name()); + break; + } + break; + } + + /// TEST: IDetectorVolumes + { + std::vector layers = {kUlayer, kVlayer, kWlayer}; + for (const auto& gwpid : live_grouping.wpids()) { + for (const auto& layer : layers) { + WirePlaneId wpid(layer, gwpid.face(), gwpid.apa()); + int face_dirx = m_dv->face_dirx(wpid); + Vector wire_direction = m_dv->wire_direction(wpid); + double angle = std::atan2(wire_direction.z(), wire_direction.y()); + Vector pitch_vector = m_dv->pitch_vector(wpid); + SPDLOG_INFO("CTest wpid.name {} face_dirx {} wire_direction {} angle rad:{} deg:{} pitch_vector {}", wpid.name(), face_dirx, wire_direction, angle, angle*180/3.1415926, pitch_vector); + } + } + // metadata + { + WirePlaneId all(0); + SPDLOG_INFO("FV_xmax {}", m_dv->metadata(all)["FV_xmax"].asDouble()); + WirePlaneId a0f0pA(kAllLayers, 0, 0); + SPDLOG_INFO("FV_xmax {}", m_dv->metadata(a0f0pA)["FV_xmax"].asDouble()); + Json::FastWriter fastWriter; + SPDLOG_INFO("metadata(a0f0pA): {}", fastWriter.write(m_dv->metadata(a0f0pA))); + } + } + + /// TEST: points: wpid, merge 3d/2d + { + for (size_t iclus = 0; iclus != live_clusters.size(); iclus++) { + Cluster* cluster = live_clusters.at(iclus); + + // TEST: kd2d with wpid + { + // expecting: + // kd3d.ndim() 3 kd3d.npoints() 4248 (non-zero) + // kd2dp0.ndim() 2 kd2dp0.npoints() 4248 (same as 3D) + // kd2dp0_a0f1.ndim() 2 kd2dp0_a0f1.npoints() 0 (if we do not have a0f1) + auto& kd3d = cluster->kd3d(); + SPDLOG_INFO("CTest Cluster {} kd3d.ndim() {} kd3d.npoints() {}", iclus, kd3d.ndim(), kd3d.npoints()); + auto& kd2dp0 = cluster->kd2d(0, 0, 0); + SPDLOG_INFO("CTest Cluster {} kd2dp0.ndim() {} kd2dp0.npoints() {}", iclus, kd2dp0.ndim(), kd2dp0.npoints()); + auto& kd2dp0_a0f1 = cluster->kd2d(0, 0 , 0); + SPDLOG_INFO("CTest Cluster {} kd2dp0_a0f1.ndim() {} kd2dp0_a0f1.npoints() {}", iclus, kd2dp0_a0f1.ndim(), kd2dp0_a0f1.npoints()); + } + + // TEST: flat_pc and points_property + { + auto& sv3d = cluster->sv3d(); + const auto fpc = sv3d.flat_pc("3d", {"uwire_index"}); + SPDLOG_INFO("CTest Cluster {} sv3d.keys().size() {} sv3d.size_major() {}", + iclus, fpc.keys().size(), fpc.size_major()); + const auto& uwire_index_fpc = fpc.get("uwire_index")->elements(); + SPDLOG_INFO("CTest Cluster {} flat_pc uwire_index[0] {}", iclus, uwire_index_fpc[0]); + const auto uwire_index_pp = cluster->points_property("uwire_index"); + SPDLOG_INFO("CTest Cluster {} points_property uwire_index[0] {}", iclus, uwire_index_pp[0]); + } + + // TEST: points_property + { + const auto x = cluster->points_property("x"); + const auto y = cluster->points_property("y"); + const auto z = cluster->points_property("z"); + const auto wpid_ident = cluster->points_property("wpid"); + for (size_t ipt=0; ipt!=x.size(); ipt++) { + WirePlaneId wpid(wpid_ident[ipt]); + SPDLOG_INFO("CTest Cluster {} wpid {} x {} y {} z {}", iclus, wpid.name(), x[ipt], y[ipt], z[ipt]); + break; // only one point + } + } + + break; // only one cluster + } + } + + /// TEST: DynamicPointCloud + { + // Get all the wire plane IDs from the grouping + const auto& wpids = live_grouping.wpids(); + // Key: pair, Value: drift_dir, angle_u, angle_v, angle_w + std::map> wpid_params; + std::set apas; + + std::map>>> af_dead_u_index; + std::map>>> af_dead_v_index; + std::map>>> af_dead_w_index; + + for (const auto& wpid : wpids) { + int apa = wpid.apa(); + int face = wpid.face(); + apas.insert(apa); + + // Create wpids for all three planes with this APA and face + WirePlaneId wpid_u(kUlayer, face, apa); + WirePlaneId wpid_v(kVlayer, face, apa); + WirePlaneId wpid_w(kWlayer, face, apa); + + // Get drift direction based on face orientation + int face_dirx = m_dv->face_dirx(wpid_u); + geo_point_t drift_dir(face_dirx, 0, 0); + + // Get wire directions for all planes + Vector wire_dir_u = m_dv->wire_direction(wpid_u); + Vector wire_dir_v = m_dv->wire_direction(wpid_v); + Vector wire_dir_w = m_dv->wire_direction(wpid_w); + + // Calculate angles + double angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + double angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + double angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + wpid_params[wpid] = std::make_tuple(drift_dir, angle_u, angle_v, angle_w); + + + af_dead_u_index[apa][face] = live_grouping.get_dead_winds(apa, face, 0); + af_dead_v_index[apa][face] = live_grouping.get_dead_winds(apa, face, 1); + af_dead_w_index[apa][face] = live_grouping.get_dead_winds(apa, face, 2); + } + + // auto [drift_dir, angle_u, angle_v, angle_w] = extract_geometry_params(live_grouping, m_dv); + auto dpc = std::make_shared(wpid_params); + // auto dpcl = std::make_shared(angle_u, angle_v, angle_w); + double extending_dis = 50 * units::cm; + double angle = 7.5; + double loose_dis_cut = 7.5 * units::cm; + geo_point_t dir1(1, 0, 0); + for (size_t iclus = 0; iclus != live_clusters.size(); iclus++) { + Cluster* cluster = live_clusters.at(iclus); + const auto test_point = cluster->point3d(0); + std::pair extreme_points = cluster->get_two_extreme_points(); + // dpcl->add_points(cluster, extreme_points.first, dir1, extending_dis * 3, 1.2 * units::cm, angle); + dpc->add_points(make_points_linear_extrapolation(cluster, extreme_points.first, dir1, extending_dis * 3, 1.2 * units::cm, angle, m_dv, wpid_params)); + // const auto results_legacy = dpcl->get_2d_points_info(test_point, loose_dis_cut, 0); + const auto results = dpc->get_2d_points_info(test_point, loose_dis_cut, 0, 0, 0); + // SPDLOG_INFO("CTest Cluster {} results_legacy.size() {} results.size() {}", iclus, results_legacy.size(), results.size()); + } + for (size_t iclus = 0; iclus != live_clusters.size(); iclus++) { + Cluster* cluster = live_clusters.at(iclus); + std::pair extreme_points = cluster->get_two_extreme_points(); + // dpcl->add_points(cluster,0); + dpc->add_points(make_points_cluster(cluster, wpid_params)); + // SPDLOG_INFO("CTest dpcl->get_num_points() {} dpc->get_points().size() {}", + // dpcl->get_num_points(), dpc->get_points().size()); + const auto dir_hough = dpc->vhough_transform(extreme_points.first, extending_dis); + SPDLOG_INFO("CTest Cluster {} dir_hough {} ", iclus, dir_hough); + // const auto dir_hough_legacy = dpcl->vhough_transform(extreme_points.first, extending_dis); + break; + } + } + + /// TEST T0Correction + { + int face = 0; + int apa = 0; + double cluster_t0 = -400*units::us; + WireCell::Point test_point(0, 0, 0); + WirePlaneId wpid_all(kAllLayers, face, apa); + double drift_speed = m_dv->metadata(wpid_all)["drift_speed"].asDouble(); + double time_offset = m_dv->metadata(wpid_all)["time_offset"].asDouble(); + int face_dirx = m_dv->face_dirx(wpid_all); + SPDLOG_INFO("CTest T0Correction face_dirx {} drift_speed {} time_offset {} cluster_t0 {}", + face_dirx, drift_speed, time_offset, cluster_t0); + // expectation: + const auto expected_corrected_point_x = test_point.x() - face_dirx * (cluster_t0 + time_offset) * drift_speed; + const auto T0Correction = m_pcts->pc_transform("T0Correction"); + const auto corrected_point = T0Correction->forward(test_point, cluster_t0, face, apa); + const auto filter_result = T0Correction->filter(corrected_point, cluster_t0, face, apa); + const auto backward_corrected_point = T0Correction->backward(corrected_point, cluster_t0, face, apa); + SPDLOG_INFO("CTest T0Correction test_point {} corrected_point {} expected_corrected_point_x {} filter_result {} backward_corrected_point {}", + test_point, corrected_point, expected_corrected_point_x, filter_result, backward_corrected_point); + Dataset pc; + pc.add("x", Array({test_point.x()})); + pc.add("y", Array({test_point.y()})); + pc.add("z", Array({test_point.z()})); + const auto fpc = T0Correction->forward(pc, {"x", "y", "z"}, {"x_cor","y_cor","z_cor"}, cluster_t0, face, apa); + const auto bpc = T0Correction->backward(fpc, {"x_cor", "y_cor", "z_cor"}, {"x","y","z"}, cluster_t0, face, apa); + const auto filter_result_fpc = T0Correction->filter(fpc, {"x_cor", "y_cor", "z_cor"}, cluster_t0, face, apa); + const auto fpc_x = fpc.get("x_cor")->elements(); + const auto fpc_y = fpc.get("y_cor")->elements(); + const auto fpc_z = fpc.get("z_cor")->elements(); + const auto bpc_x = bpc.get("x")->elements(); + const auto bpc_y = bpc.get("y")->elements(); + const auto bpc_z = bpc.get("z")->elements(); + const auto filter_result_fpc_filter = filter_result_fpc.get("filter")->elements(); + SPDLOG_INFO("CTest T0Correction fpc_x {} fpc_y {} fpc_z {} bpc_x {} bpc_y {} bpc_z {} filter_result_fpc_filter {}", + fpc_x[0], fpc_y[0], fpc_z[0], bpc_x[0], bpc_y[0], bpc_z[0], filter_result_fpc_filter[0]); + } + + /// TEST: m_dv->contained_by() + { + for (double x = 254*units::cm; x < 255*units::cm; x += 0.1*units::cm) { + Point point(x, 0*units::cm, 50*units::cm); + WirePlaneId wpid = m_dv->contained_by(point); + if (wpid.valid()) { + SPDLOG_INFO("CTest dv->contained_by point {} wpid {}", point, wpid.name()); + } else { + SPDLOG_INFO("CTest dv->contained_by point {} wpid not found", point); + } + } + } + + /// TEST: add corrected points to Cluster and separate according to filter + { + for (size_t iclus = 0; iclus != live_clusters.size(); iclus++) { + Cluster *cluster = live_clusters.at(iclus); + + { + // earliest (-52.848 -1137.79 2036.5) latest (594.54 1137.27 1897.01) + const auto [earliest, latest] = cluster->get_earliest_latest_points(); + SPDLOG_INFO("CTest Cluster {} earliest {} latest {}", iclus, earliest, latest); + } + cluster->set_cluster_t0(1600*units::us); + std::vector b2filter_result = cluster->add_corrected_points(m_pcts, "T0Correction"); + const auto scope_T0Correction = cluster->get_scope("T0Correction"); + cluster->set_default_scope(scope_T0Correction); + { + const auto [earliest, latest] = cluster->get_earliest_latest_points(); + SPDLOG_INFO("CTest Cluster {} earliest {} latest {}", iclus, earliest, latest); + } + std::set b2filter_result_set(b2filter_result.begin(), b2filter_result.end()); + for (const auto filter_result : b2filter_result_set) { + SPDLOG_INFO("CTest add corrected points filter_result {}", filter_result); + } + auto clusters_sep = live_grouping.separate(cluster, b2filter_result, true); + for (auto &[id, new_cluster] : clusters_sep) { + // new_cluster->set_scope_filter(new_cluster->get_scope("T0Correction"), id); + SPDLOG_INFO("CTest add corrected points id {} nchildren {}", + id, new_cluster->nchildren()); + { + const auto [earliest, latest] = new_cluster->get_earliest_latest_points(); + SPDLOG_INFO("CTest Cluster {} earliest {} latest {}", iclus, earliest, latest); + } + new_cluster->set_default_scope(scope_T0Correction); + { + const auto [earliest, latest] = new_cluster->get_earliest_latest_points(); + SPDLOG_INFO("CTest Cluster {} earliest {} latest {}", iclus, earliest, latest); + } + // set filter (how to judge good or bad ???) + if (id==0) { + new_cluster->set_scope_filter(scope_T0Correction, false); + } else if (id==1) { + new_cluster->set_scope_filter(scope_T0Correction, true); + } + new_cluster->set_scope_transform(scope_T0Correction, "T0Correction"); + // if (id == 0) { + // live_grouping.remove_child(*new_cluster); + // } + } + break; + } + } + } +}; +// #pragma GCC diagnostic pop diff --git a/clus/src/clustering_util.cxx b/clus/src/clustering_util.cxx deleted file mode 100644 index 31a960df2..000000000 --- a/clus/src/clustering_util.cxx +++ /dev/null @@ -1,91 +0,0 @@ -#include - -#include // temp debug - -using namespace WireCell::PointCloud::Facade; - - -void WireCell::PointCloud::Facade::merge_clusters( - cluster_connectivity_graph_t& g, - Grouping& grouping, - cluster_set_t& known_clusters, // in/out - const std::string& aname, const std::string& pcname) -{ - std::unordered_map desc2id; - std::unordered_map > id2desc; - /*int num_components =*/ boost::connected_components(g, boost::make_assoc_property_map(desc2id)); - for (const auto& [desc, id] : desc2id) { - id2desc[id].insert(desc); - } - - // Note, here we do an unusual thing and COPY the vector of children - // facades. In most simple access we would get the reference to the child - // vector to save a little copy time. We explicitly copy here as we must - // preserve the original order of children facades even as we remove them - // from the grouping. As each child facade is removed, it's - // unique_ptr is returned which we ignore/drop and thus the child - // facade dies along with its node. This leaves the orig_clusters element - // that was just holding the pointer to the doomed facade now holding - // invalid memory. But, it is okay as we never revisit the same cluster in - // the grouping. All that to explain a missing "&"! :) - auto orig_clusters = grouping.children(); - - const bool savecc = aname.size() > 0 && pcname.size() > 0; - - for (const auto& [id, descs] : id2desc) { - if (descs.size() < 2) { - continue; - } - - // it starts with no cluster facade - Cluster& fresh_cluster = grouping.make_child(); - - std::vector cc; - int parent_id = 0; - for (const auto& desc : descs) { - const int idx = g[desc]; - if (idx < 0) { // no need anymore ... - continue; - } - - auto live = orig_clusters[idx]; - fresh_cluster.take_children(*live, true); - - if (savecc) { - cc.resize(fresh_cluster.nchildren(), parent_id); - ++parent_id; - } - - known_clusters.erase(live); - grouping.destroy_child(live); - assert(live == nullptr); - } - if (savecc) { - fresh_cluster.put_pcarray(cc, aname, pcname); - } - known_clusters.insert(&fresh_cluster); - } - - - - // fixme: sanity check / debugging. remove this if you find it committed. - for (const auto* cluster : grouping.children()) { - if (!cluster) { - std::cerr << "merge_clusters: null live cluster on output!\n"; - continue; - } - if (! cluster->nchildren()) { - std::cerr << "merge_clusters: empty live cluster on output!\n"; - continue; - } - for (const auto* blob : cluster->children()) { - if (!blob) { - std::cerr << "merge_clusters: null live blob on output!\n"; - continue; - } - } - } - -} - - diff --git a/clus/src/connect_graph.cxx b/clus/src/connect_graph.cxx new file mode 100644 index 000000000..417285131 --- /dev/null +++ b/clus/src/connect_graph.cxx @@ -0,0 +1,516 @@ +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/Facade_Cluster.h" +#include "WireCellClus/Facade_Blob.h" +#include "WireCellClus/Facade_Grouping.h" + +#include "connect_graphs.h" + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; + +void Graphs::connect_graph(const Cluster& cluster, Weighted::Graph& graph) +{ + // This used to be the body of Cluster::Connect_graph(). + + // now form the connected components + std::vector component(num_vertices(graph)); + const size_t num = connected_components(graph, &component[0]); + + // Create ordered components + std::vector ordered_components; + ordered_components.reserve(component.size()); + for (size_t i = 0; i < component.size(); ++i) { + ordered_components.emplace_back(i); + } + + // Assign vertices to components + for (size_t i = 0; i < component.size(); ++i) { + ordered_components[component[i]].add_vertex(i); + } + + // Sort components by minimum vertex index + std::sort(ordered_components.begin(), ordered_components.end(), + [](const ComponentInfo& a, const ComponentInfo& b) { + return a.min_vertex < b.min_vertex; + }); + + if (num <= 1) return; + + std::vector> pt_clouds; + std::vector> pt_clouds_global_indices; + // use this to link the global index to the local index + // Create point clouds using ordered components + const auto& points = cluster.points(); + for (const auto& comp : ordered_components) { + auto pt_cloud = std::make_shared(); + std::vector global_indices; + + for (size_t vertex_idx : comp.vertex_indices) { + pt_cloud->add({points[0][vertex_idx], points[1][vertex_idx], points[2][vertex_idx]}); + global_indices.push_back(vertex_idx); + } + + pt_clouds.push_back(pt_cloud); + pt_clouds_global_indices.push_back(global_indices); + } + + /// DEBUGONLY: + if (0) { + for (size_t i = 0; i != num; i++) { + std::cout << *pt_clouds.at(i) << std::endl; + std::cout << "global indices: "; + for (size_t j = 0; j != pt_clouds_global_indices.at(i).size(); j++) { + std::cout << pt_clouds_global_indices.at(i).at(j) << " "; + } + std::cout << std::endl; + } + } + + // Initiate dist. metrics + std::vector>> index_index_dis( + num, std::vector>(num)); + std::vector>> index_index_dis_mst( + num, std::vector>(num)); + + std::vector>> index_index_dis_dir1( + num, std::vector>(num)); + std::vector>> index_index_dis_dir2( + num, std::vector>(num)); + std::vector>> index_index_dis_dir_mst( + num, std::vector>(num)); + + for (size_t j = 0; j != num; j++) { + for (size_t k = 0; k != num; k++) { + index_index_dis[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_mst[j][k] = std::make_tuple(-1, -1, 1e9); + + index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_dir_mst[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + + Weighted::Graph temp_graph(num); + + for (size_t j=0;j!=num;j++){ + for (size_t k=j+1;k!=num;k++){ + index_index_dis[j][k] = pt_clouds.at(j)->get_closest_points(*pt_clouds.at(k)); + + int index1 = j; + int index2 = k; + + if (!boost::edge(index1, index2, temp_graph).second) + /*auto edge =*/ add_edge(index1,index2, std::get<2>(index_index_dis[j][k]), temp_graph); + } + } + + process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_mst); + + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + if (std::get<2>(index_index_dis[j][k])<3*units::cm){ + index_index_dis_mst[j][k] = index_index_dis[j][k]; + } + + if (num < 100) + if (pt_clouds.at(j)->get_num_points()>100 && pt_clouds.at(k)->get_num_points()>100 && + (pt_clouds.at(j)->get_num_points()+pt_clouds.at(k)->get_num_points()) > 400){ + geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); + geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); + + geo_point_t dir1 = cluster.vhough_transform(p1, 30 * units::cm, Cluster::HoughParamSpace::theta_phi, pt_clouds.at(j), pt_clouds_global_indices.at(j)); + geo_point_t dir2 = cluster.vhough_transform(p2, 30 * units::cm, Cluster::HoughParamSpace::theta_phi, pt_clouds.at(k), pt_clouds_global_indices.at(k)); + dir1 = dir1 * -1; + dir2 = dir2 * -1; + + std::pair result1 = pt_clouds.at(k)->get_closest_point_along_vec( + p1, dir1, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); + + if (result1.first >= 0) { + index_index_dis_dir1[j][k] = + std::make_tuple(std::get<0>(index_index_dis[j][k]), result1.first, result1.second); + } + + std::pair result2 = pt_clouds.at(j)->get_closest_point_along_vec( + p2, dir2, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); + + if (result2.first >= 0) { + index_index_dis_dir2[j][k] = + std::make_tuple(result2.first, std::get<1>(index_index_dis[j][k]), result2.second); + } + } + } + } + + // MST for the directionality ... + { + Weighted::Graph temp_graph(num); + + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + int index1 = j; + int index2 = k; + if (std::get<0>(index_index_dis_dir1[j][k]) >= 0 || std::get<0>(index_index_dis_dir2[j][k]) >= 0) { + + if (!boost::edge(index1, index2, temp_graph).second) + add_edge( + index1, index2, + std::min(std::get<2>(index_index_dis_dir1[j][k]), std::get<2>(index_index_dis_dir2[j][k])), + temp_graph); + } + } + } + + process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_dir_mst); + + } + + // now complete graph according to the direction + // according to direction ... + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + if (std::get<0>(index_index_dis_mst[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_mst[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_mst[j][k])); + + float dis; + if (std::get<2>(index_index_dis_mst[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_mst[j][k]); + } + else { + dis = std::get<2>(index_index_dis_mst[j][k]); + } + if (!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + + if (std::get<0>(index_index_dis_dir_mst[j][k]) >= 0) { + if (std::get<0>(index_index_dis_dir1[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir1[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir1[j][k])); + + float dis; + if (std::get<2>(index_index_dis_dir1[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_dir1[j][k]) * 1.2; + } + else { + dis = std::get<2>(index_index_dis_dir1[j][k]); + } + if (!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir2[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir2[j][k])); + + float dis; + if (std::get<2>(index_index_dis_dir2[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_dir2[j][k]) * 1.2; + } + else { + dis = std::get<2>(index_index_dis_dir2[j][k]); + } + if (!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + } + + } + } + + +} + + +using namespace WireCell::Clus::Facade; + + +void Graphs::connect_graph_with_reference( + const Facade::Cluster& cluster, + const Facade::Cluster& ref_cluster, + Weighted::Graph& graph) +{ + // Drift direction (used in prototype for angle checks) + geo_vector_t drift_dir_abs(1, 0, 0); + + // now form the connected components + std::vector component(num_vertices(graph)); + const size_t num = connected_components(graph, &component[0]); + + // Create ordered components (same as baseline) + std::vector ordered_components; + ordered_components.reserve(component.size()); + for (size_t i = 0; i < component.size(); ++i) { + ordered_components.emplace_back(i); + } + + // Assign vertices to components + for (size_t i = 0; i < component.size(); ++i) { + ordered_components[component[i]].add_vertex(i); + } + + // Sort components by minimum vertex index + std::sort(ordered_components.begin(), ordered_components.end(), + [](const ComponentInfo& a, const ComponentInfo& b) { + return a.min_vertex < b.min_vertex; + }); + + if (num <= 1) return; + + std::vector> pt_clouds; + std::vector> pt_clouds_global_indices; + + // Initialize pt_clouds for each component (same as baseline) + for (size_t comp_idx = 0; comp_idx < ordered_components.size(); ++comp_idx) { + auto pt_cloud = std::make_shared(); + pt_clouds.push_back(pt_cloud); + pt_clouds_global_indices.push_back(std::vector()); + } + + const auto& points = cluster.points(); + std::set excluded_points; // Track excluded points + + // Check if reference cluster is empty + bool use_reference_filtering = (ref_cluster.is_valid() && ref_cluster.npoints() > 0); + + // Process each point with reference filtering (matches prototype exactly) + for (size_t i = 0; i < component.size(); ++i) { + bool should_exclude = false; + + // Check if point is good (equivalent to prototype's mcell->IsPointGood) + if (!is_point_good(cluster, i, 2)) { + should_exclude = true; + } else if (use_reference_filtering) { + // Only check distance to reference cluster if it's not empty + const auto& ref_kd = ref_cluster.kd3d(); // Use reference cluster's KD-tree + double temp_min_dis = 0; + geo_point_t temp_p(points[0][i], points[1][i], points[2][i]); + std::vector query_point = {temp_p.x(), temp_p.y(), temp_p.z()}; + auto knn_result = ref_kd.knn(1, query_point); + + if (!knn_result.empty()) { + temp_min_dis = std::sqrt(knn_result[0].second); // knn returns squared distance + } + + // Key filtering criterion from prototype: >= 1.0 cm means exclude + if (temp_min_dis >= 1.0 * units::cm) { + should_exclude = true; + } + } + // If ref_cluster is empty, we skip the distance check and only use the point quality check + + if (should_exclude) { + excluded_points.insert(i); + } else { + // Add to appropriate component cloud + size_t comp_idx = component[i]; + pt_clouds.at(comp_idx)->add({points[0][i], points[1][i], points[2][i]}); + pt_clouds_global_indices.at(comp_idx).push_back(i); + } + } + + // Store excluded points in cluster cache (matches prototype's excluded_points) + // Note: When ref_cluster is empty, excluded_points will only contain points that fail the quality check + const_cast(cluster).set_excluded_points(excluded_points); + + + // Initiate dist. metrics (same as baseline) + std::vector>> index_index_dis( + num, std::vector>(num)); + std::vector>> index_index_dis_mst( + num, std::vector>(num)); + + std::vector>> index_index_dis_dir1( + num, std::vector>(num)); + std::vector>> index_index_dis_dir2( + num, std::vector>(num)); + std::vector>> index_index_dis_dir_mst( + num, std::vector>(num)); + + for (size_t j = 0; j != num; j++) { + for (size_t k = 0; k != num; k++) { + index_index_dis[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_mst[j][k] = std::make_tuple(-1, -1, 1e9); + + index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_dir_mst[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + + // Distance calculation (same as baseline) + Weighted::Graph temp_graph(num); + + for (size_t j=0;j!=num;j++){ + for (size_t k=j+1;k!=num;k++){ + index_index_dis[j][k] = pt_clouds.at(j)->get_closest_points(*pt_clouds.at(k)); + + int index1 = j; + int index2 = k; + + if (!boost::edge(index1, index2, temp_graph).second) + /*auto edge =*/ add_edge(index1,index2, std::get<2>(index_index_dis[j][k]), temp_graph); + } + } + + process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_mst); + + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + if (std::get<2>(index_index_dis[j][k])<3*units::cm){ + index_index_dis_mst[j][k] = index_index_dis[j][k]; + } + + if (num < 100) + if (pt_clouds.at(j)->get_num_points()>100 && pt_clouds.at(k)->get_num_points()>100 && + (pt_clouds.at(j)->get_num_points()+pt_clouds.at(k)->get_num_points()) > 400){ + geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); + geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); + + // Use cluster's vhough_transform method with drift direction awareness + geo_vector_t dir1 = cluster.vhough_transform(p1, 30 * units::cm, Cluster::HoughParamSpace::theta_phi, pt_clouds.at(j), pt_clouds_global_indices.at(j)); + geo_vector_t dir2 = cluster.vhough_transform(p2, 30 * units::cm, Cluster::HoughParamSpace::theta_phi, pt_clouds.at(k), pt_clouds_global_indices.at(k)); + dir1 = dir1 * -1; + dir2 = dir2 * -1; + + std::pair result1 = pt_clouds.at(k)->get_closest_point_along_vec( + p1, dir1, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); + + // If no result and direction is nearly perpendicular to drift, try longer hough transform as in prototype + double angle_deg = dir1.angle(drift_dir_abs) * 180.0 / M_PI; + if (result1.first < 0 && std::abs(angle_deg - 90.0) < 10.0) { + if (std::abs(angle_deg - 90.0) < 5.0) + dir1 = cluster.vhough_transform(p1, 80 * units::cm, Cluster::HoughParamSpace::theta_phi, pt_clouds.at(j), pt_clouds_global_indices.at(j)); + else if (std::abs(angle_deg - 90.0) < 10.0) + dir1 = cluster.vhough_transform(p1, 50 * units::cm, Cluster::HoughParamSpace::theta_phi, pt_clouds.at(j), pt_clouds_global_indices.at(j)); + dir1 = dir1 * -1; + result1 = pt_clouds.at(k)->get_closest_point_along_vec( + p1, dir1, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); + } + + if (result1.first >= 0) { + index_index_dis_dir1[j][k] = + std::make_tuple(std::get<0>(index_index_dis[j][k]), result1.first, result1.second); + } + + std::pair result2 = pt_clouds.at(j)->get_closest_point_along_vec( + p2, dir2, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); + + // Additional drift direction check (from prototype, though isochronous search was commented out) + // If no result and direction is nearly perpendicular to drift, try longer hough transform as in prototype + double angle_deg2 = dir2.angle(drift_dir_abs) * 180.0 / M_PI; + if (result2.first < 0 && std::abs(angle_deg2 - 90.0) < 10.0) { + if (std::abs(angle_deg2 - 90.0) < 5.0) + dir2 = cluster.vhough_transform(p2, 80 * units::cm, Cluster::HoughParamSpace::theta_phi, pt_clouds.at(k), pt_clouds_global_indices.at(k)); + else if (std::abs(angle_deg2 - 90.0) < 10.0) + dir2 = cluster.vhough_transform(p2, 50 * units::cm, Cluster::HoughParamSpace::theta_phi, pt_clouds.at(k), pt_clouds_global_indices.at(k)); + dir2 = dir2 * -1; + result2 = pt_clouds.at(j)->get_closest_point_along_vec( + p2, dir2, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); + } + + + + if (result2.first >= 0) { + index_index_dis_dir2[j][k] = + std::make_tuple(result2.first, std::get<1>(index_index_dis[j][k]), result2.second); + } + } + } + } + + // MST for the directionality ... (same as baseline) + { + Weighted::Graph temp_graph(num); + + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + int index1 = j; + int index2 = k; + if (std::get<0>(index_index_dis_dir1[j][k]) >= 0 || std::get<0>(index_index_dis_dir2[j][k]) >= 0) { + + if (!boost::edge(index1, index2, temp_graph).second) + add_edge( + index1, index2, + std::min(std::get<2>(index_index_dis_dir1[j][k]), std::get<2>(index_index_dis_dir2[j][k])), + temp_graph); + } + } + } + + process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_dir_mst); + + } + + // now complete graph according to the direction (same as baseline) + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + if (std::get<0>(index_index_dis_mst[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_mst[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_mst[j][k])); + + float dis; + if (std::get<2>(index_index_dis_mst[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_mst[j][k]); + } + else { + dis = std::get<2>(index_index_dis_mst[j][k]); + } + if (!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + + if (std::get<0>(index_index_dis_dir_mst[j][k]) >= 0) { + if (std::get<0>(index_index_dis_dir1[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir1[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir1[j][k])); + + float dis; + if (std::get<2>(index_index_dis_dir1[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_dir1[j][k]) * 1.2; + } + else { + dis = std::get<2>(index_index_dis_dir1[j][k]); + } + if (!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir2[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir2[j][k])); + + float dis; + if (std::get<2>(index_index_dis_dir2[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_dir2[j][k]) * 1.2; + } + else { + dis = std::get<2>(index_index_dis_dir2[j][k]); + } + if (!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + } + } + } +} + +// Helper function equivalent to prototype's mcell->IsPointGood +bool Graphs::is_point_good(const Cluster& cluster, size_t point_index, int ncut) { + double charge_u = cluster.charge_value(point_index, 0); + double charge_v = cluster.charge_value(point_index, 1); + double charge_w = cluster.charge_value(point_index, 2); + + int ncount = 0; + if (charge_u > 10) ncount++; + if (charge_v > 10) ncount++; + if (charge_w > 10) ncount++; + + return ncount >= ncut; +} \ No newline at end of file diff --git a/clus/src/connect_graph_closely.cxx b/clus/src/connect_graph_closely.cxx new file mode 100644 index 000000000..2f01de1da --- /dev/null +++ b/clus/src/connect_graph_closely.cxx @@ -0,0 +1,868 @@ +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/Facade_Cluster.h" +#include "WireCellClus/Facade_Blob.h" +#include "WireCellClus/Facade_Grouping.h" + +#include "connect_graphs.h" + +using namespace WireCell; +using namespace WireCell::Clus; + + +void Graphs::connect_graph_closely(const Facade::Cluster& cluster, Weighted::Graph& graph, int num_neighbors) +{ + // What follows used to be in Cluster::Establish_close_connected_graph(). + // It is/was called from examine_graph() and Create_graph(). + + using mcell_wire_wcps_map_t = std::map>, Facade::BlobLess>; + mcell_wire_wcps_map_t map_mcell_uindex_wcps, map_mcell_vindex_wcps, map_mcell_windex_wcps; + + std::map, Facade::BlobLess> map_mcell_indices; + + const auto& points = cluster.points(); + const auto& winds = cluster.wire_indices(); + + for (Facade::Blob* mcell : cluster.children()) { + std::map> map_uindex_wcps; + std::map> map_vindex_wcps; + std::map> map_windex_wcps; + + std::vector pinds = cluster.get_blob_indices(mcell); + for (const int pind : pinds) { + // auto v = vertex(pind, graph); // retrieve vertex descriptor + // (graph)[v].ident = pind; + if (map_uindex_wcps.find(winds[0][pind]) == map_uindex_wcps.end()) { + std::set wcps; + wcps.insert(pind); + map_uindex_wcps[winds[0][pind]] = wcps; + } + else { + map_uindex_wcps[winds[0][pind]].insert(pind); + } + + if (map_vindex_wcps.find(winds[1][pind]) == map_vindex_wcps.end()) { + std::set wcps; + wcps.insert(pind); + map_vindex_wcps[winds[1][pind]] = wcps; + } + else { + map_vindex_wcps[winds[1][pind]].insert(pind); + } + + if (map_windex_wcps.find(winds[2][pind]) == map_windex_wcps.end()) { + std::set wcps; + wcps.insert(pind); + map_windex_wcps[winds[2][pind]] = wcps; + } + else { + map_windex_wcps[winds[2][pind]].insert(pind); + } + } + map_mcell_uindex_wcps[mcell] = map_uindex_wcps; + map_mcell_vindex_wcps[mcell] = map_vindex_wcps; + map_mcell_windex_wcps[mcell] = map_windex_wcps; + } + + int num_edges = 0; + + // create graph for points inside the same mcell + for (Facade::Blob* mcell : cluster.children()) { + std::vector pinds = cluster.get_blob_indices(mcell); + int max_wire_interval = mcell->get_max_wire_interval(); + int min_wire_interval = mcell->get_min_wire_interval(); + std::map>* map_max_index_wcps; + std::map>* map_min_index_wcps; + if (mcell->get_max_wire_type() == 0) { + map_max_index_wcps = &map_mcell_uindex_wcps[mcell]; + } + else if (mcell->get_max_wire_type() == 1) { + map_max_index_wcps = &map_mcell_vindex_wcps[mcell]; + } + else { + map_max_index_wcps = &map_mcell_windex_wcps[mcell]; + } + if (mcell->get_min_wire_type() == 0) { + map_min_index_wcps = &map_mcell_uindex_wcps[mcell]; + } + else if (mcell->get_min_wire_type() == 1) { + map_min_index_wcps = &map_mcell_vindex_wcps[mcell]; + } + else { + map_min_index_wcps = &map_mcell_windex_wcps[mcell]; + } + + for (const int pind1 : pinds) { + int index_max_wire; + int index_min_wire; + if (mcell->get_max_wire_type() == 0) { + index_max_wire = winds[0][pind1]; + } + else if (mcell->get_max_wire_type() == 1) { + index_max_wire = winds[1][pind1]; + } + else { + index_max_wire = winds[2][pind1]; + } + if (mcell->get_min_wire_type() == 0) { + index_min_wire = winds[0][pind1]; + } + else if (mcell->get_min_wire_type() == 1) { + index_min_wire = winds[1][pind1]; + } + else { + index_min_wire = winds[2][pind1]; + } + std::vector*> max_wcps_set; + std::vector*> min_wcps_set; + // go through the first map and find the ones satisfying the condition + for (auto it2 = map_max_index_wcps->begin(); it2 != map_max_index_wcps->end(); it2++) { + if (std::abs(it2->first - index_max_wire) <= max_wire_interval) { + max_wcps_set.push_back(&(it2->second)); + } + } + // go through the second map and find the ones satisfying the condition + for (auto it2 = map_min_index_wcps->begin(); it2 != map_min_index_wcps->end(); it2++) { + if (std::abs(it2->first - index_min_wire) <= min_wire_interval) { + min_wcps_set.push_back(&(it2->second)); + } + } + + std::set wcps_set1; + std::set wcps_set2; + + for (auto it2 = max_wcps_set.begin(); it2 != max_wcps_set.end(); it2++) { + wcps_set1.insert((*it2)->begin(), (*it2)->end()); + } + for (auto it3 = min_wcps_set.begin(); it3 != min_wcps_set.end(); it3++) { + wcps_set2.insert((*it3)->begin(), (*it3)->end()); + } + + { + std::set common_set; + set_intersection(wcps_set1.begin(), wcps_set1.end(), wcps_set2.begin(), wcps_set2.end(), + std::inserter(common_set, common_set.begin())); + + for (auto it4 = common_set.begin(); it4 != common_set.end(); it4++) { + const int pind2 = *it4; + if (pind1 != pind2) { + // avoid duplicated edge addition + if (!boost::edge(pind1, pind2, graph).second) { + + auto edge = add_edge(pind1,pind2,(sqrt(pow(points[0][pind1] - points[0][pind2], 2) + + pow(points[1][pind1] - points[1][pind2], 2) + + pow(points[2][pind1] - points[2][pind2], 2))),graph); + if (edge.second){ + num_edges ++; + } + } + } + } + } + } + } + + // create graph for points between connected mcells, need to separate apa, face, and then ... + std::map > > af_time_slices; // apa,face --> time slices + for (auto it = cluster.time_blob_map().begin(); it != cluster.time_blob_map().end(); it++) { + int apa = it->first; + for (auto it1 = it->second.begin(); it1 != it->second.end(); it1++) { + int face = it1->first; + std::vector time_slices_vec; + for (auto it2 = it1->second.begin(); it2 != it1->second.end(); it2++) { + time_slices_vec.push_back(it2->first); + } + af_time_slices[apa][face] = time_slices_vec; + } + } + + std::vector> connected_mcells; + + for (auto it = af_time_slices.begin(); it != af_time_slices.end(); it++) { + int apa = it->first; + for (auto it1 = it->second.begin(); it1 != it->second.end(); it1++) { + int face = it1->first; + std::vector& time_slices = it1->second; + for (size_t i = 0; i != time_slices.size(); i++) { + const auto& mcells_set = cluster.time_blob_map().at(apa).at(face).at(time_slices.at(i)); + + // create graph for points in mcell inside the same time slice + if (mcells_set.size() >= 2) { + for (auto it2 = mcells_set.begin(); it2 != mcells_set.end(); it2++) { + auto mcell1 = *it2; + auto it2p = it2; + if (it2p != mcells_set.end()) { + it2p++; + for (auto it3 = it2p; it3 != mcells_set.end(); it3++) { + auto mcell2 = *(it3); + if (mcell1->overlap_fast(*mcell2, 2)) + connected_mcells.push_back(std::make_pair(mcell1, mcell2)); + } + } + } + } + // create graph for points between connected mcells in adjacent time slices + 1, if not, + 2 + std::vector vec_mcells_set; + if (i + 1 < time_slices.size()) { + if (time_slices.at(i + 1) - time_slices.at(i) == 1*cluster.grouping()->get_nticks_per_slice().at(apa).at(face)) { + vec_mcells_set.push_back(cluster.time_blob_map().at(apa).at(face).at(time_slices.at(i + 1))); + if (i + 2 < time_slices.size()) + if (time_slices.at(i + 2) - time_slices.at(i) == 2*cluster.grouping()->get_nticks_per_slice().at(apa).at(face)) + vec_mcells_set.push_back(cluster.time_blob_map().at(apa).at(face).at(time_slices.at(i + 2))); + } + else if (time_slices.at(i + 1) - time_slices.at(i) == 2*cluster.grouping()->get_nticks_per_slice().at(apa).at(face)) { + vec_mcells_set.push_back(cluster.time_blob_map().at(apa).at(face).at(time_slices.at(i + 1))); + } + } + // bool flag = false; + for (size_t j = 0; j != vec_mcells_set.size(); j++) { + // if (flag) break; + auto& next_mcells_set = vec_mcells_set.at(j); + for (auto it1 = mcells_set.begin(); it1 != mcells_set.end(); it1++) { + auto mcell1 = (*it1); + for (auto it2 = next_mcells_set.begin(); it2 != next_mcells_set.end(); it2++) { + auto mcell2 = (*it2); + if (mcell1->overlap_fast(*mcell2, 2)) { + // flag = true; // correct??? + connected_mcells.push_back(std::make_pair(mcell1, mcell2)); + } + } + } + } + } + } + } + + // establish edge ... + const int max_num_nodes = num_neighbors; + std::map, std::set>> closest_index; + + for (auto it = connected_mcells.begin(); it != connected_mcells.end(); it++) { + auto mcell1 = (*it).first; + auto mcell2 = (*it).second; + + std::vector pinds1 = cluster.get_blob_indices(mcell1); + std::vector pinds2 = cluster.get_blob_indices(mcell2); + + // test 2 against 1 ... + int max_wire_interval = mcell1->get_max_wire_interval(); + int min_wire_interval = mcell1->get_min_wire_interval(); + std::map>* map_max_index_wcps; + std::map>* map_min_index_wcps; + + if (mcell1->get_max_wire_type() == 0) { + map_max_index_wcps = &map_mcell_uindex_wcps.at(mcell2); + } + else if (mcell1->get_max_wire_type() == 1) { + map_max_index_wcps = &map_mcell_vindex_wcps.at(mcell2); + } + else { + map_max_index_wcps = &map_mcell_windex_wcps.at(mcell2); + } + if (mcell1->get_min_wire_type() == 0) { + map_min_index_wcps = &map_mcell_uindex_wcps.at(mcell2); + } + else if (mcell1->get_min_wire_type() == 1) { + map_min_index_wcps = &map_mcell_vindex_wcps.at(mcell2); + } + else { + map_min_index_wcps = &map_mcell_windex_wcps.at(mcell2); + } + + for (const int pind1 : pinds1) { + int index_max_wire; + int index_min_wire; + if (mcell1->get_max_wire_type() == 0) { + index_max_wire = winds[0][pind1]; + } + else if (mcell1->get_max_wire_type() == 1) { + index_max_wire = winds[1][pind1]; + } + else { + index_max_wire = winds[2][pind1]; + } + if (mcell1->get_min_wire_type() == 0) { + index_min_wire = winds[0][pind1]; + } + else if (mcell1->get_min_wire_type() == 1) { + index_min_wire = winds[1][pind1]; + } + else { + index_min_wire = winds[2][pind1]; + } + std::vector*> max_wcps_set; + std::vector*> min_wcps_set; + // go through the first map and find the ones satisfying the condition + for (auto it2 = map_max_index_wcps->begin(); it2 != map_max_index_wcps->end(); it2++) { + if (std::abs(it2->first - index_max_wire) <= max_wire_interval) { + max_wcps_set.push_back(&(it2->second)); + } + } + // go through the second map and find the ones satisfying the condition + for (auto it2 = map_min_index_wcps->begin(); it2 != map_min_index_wcps->end(); it2++) { + if (std::abs(it2->first - index_min_wire) <= min_wire_interval) { + min_wcps_set.push_back(&(it2->second)); + } + } + + std::set wcps_set1; + std::set wcps_set2; + + for (auto it2 = max_wcps_set.begin(); it2 != max_wcps_set.end(); it2++) { + wcps_set1.insert((*it2)->begin(), (*it2)->end()); + } + for (auto it3 = min_wcps_set.begin(); it3 != min_wcps_set.end(); it3++) { + wcps_set2.insert((*it3)->begin(), (*it3)->end()); + } + + { + std::set common_set; + set_intersection(wcps_set1.begin(), wcps_set1.end(), wcps_set2.begin(), wcps_set2.end(), + std::inserter(common_set, common_set.begin())); + + for (auto it4 = common_set.begin(); it4 != common_set.end(); it4++) { + const int pind2 = *it4; + if (pind1 != pind2) { + double dis = sqrt(pow(points[0][pind1] - points[0][pind2], 2) + + pow(points[1][pind1] - points[1][pind2], 2) + + pow(points[2][pind1] - points[2][pind2], 2)); + auto b2 = cluster.blob_with_point(pind2); + auto key = std::make_pair(pind1, b2->slice_index_min()); + + if (closest_index.find(key) == closest_index.end()) { + std::set > temp_sets; + temp_sets.insert(std::make_pair(dis,pind2)); + closest_index[key] = temp_sets; + } + else { + closest_index[key].insert(std::make_pair(dis,pind2)); + if (closest_index[key].size() > static_cast(max_num_nodes)) { + auto it5 = closest_index[key].begin(); + for (int qx = 0; qx!=max_num_nodes;qx++){ + it5++; + } + closest_index[key].erase(it5,closest_index[key].end()); + } + } + } + } + } + } + + // test 1 against 2 ... + max_wire_interval = mcell2->get_max_wire_interval(); + min_wire_interval = mcell2->get_min_wire_interval(); + if (mcell2->get_max_wire_type() == 0) { + map_max_index_wcps = &map_mcell_uindex_wcps[mcell1]; + } + else if (mcell2->get_max_wire_type() == 1) { + map_max_index_wcps = &map_mcell_vindex_wcps[mcell1]; + } + else { + map_max_index_wcps = &map_mcell_windex_wcps[mcell1]; + } + if (mcell2->get_min_wire_type() == 0) { + map_min_index_wcps = &map_mcell_uindex_wcps[mcell1]; + } + else if (mcell2->get_min_wire_type() == 1) { + map_min_index_wcps = &map_mcell_vindex_wcps[mcell1]; + } + else { + map_min_index_wcps = &map_mcell_windex_wcps[mcell1]; + } + for (const int pind1 : pinds2) { + int index_max_wire; + int index_min_wire; + if (mcell2->get_max_wire_type() == 0) { + index_max_wire = winds[0][pind1]; + } + else if (mcell2->get_max_wire_type() == 1) { + index_max_wire = winds[1][pind1]; + } + else { + index_max_wire = winds[2][pind1]; + } + if (mcell2->get_min_wire_type() == 0) { + index_min_wire = winds[0][pind1]; + } + else if (mcell2->get_min_wire_type() == 1) { + index_min_wire = winds[1][pind1]; + } + else { + index_min_wire = winds[2][pind1]; + } + std::vector*> max_wcps_set; + std::vector*> min_wcps_set; + // go through the first map and find the ones satisfying the condition + for (auto it2 = map_max_index_wcps->begin(); it2 != map_max_index_wcps->end(); it2++) { + if (std::abs(it2->first - index_max_wire) <= max_wire_interval) { + max_wcps_set.push_back(&(it2->second)); + } + } + // go through the second map and find the ones satisfying the condition + for (auto it2 = map_min_index_wcps->begin(); it2 != map_min_index_wcps->end(); it2++) { + if (std::abs(it2->first - index_min_wire) <= min_wire_interval) { + min_wcps_set.push_back(&(it2->second)); + } + } + + std::set wcps_set1; + std::set wcps_set2; + + for (auto it2 = max_wcps_set.begin(); it2 != max_wcps_set.end(); it2++) { + wcps_set1.insert((*it2)->begin(), (*it2)->end()); + } + for (auto it3 = min_wcps_set.begin(); it3 != min_wcps_set.end(); it3++) { + wcps_set2.insert((*it3)->begin(), (*it3)->end()); + } + + { + std::set common_set; + set_intersection(wcps_set1.begin(), wcps_set1.end(), wcps_set2.begin(), wcps_set2.end(), + std::inserter(common_set, common_set.begin())); + + for (auto it4 = common_set.begin(); it4 != common_set.end(); it4++) { + const int pind2 = *it4; + if (pind1 != pind2) { + double dis = sqrt(pow(points[0][pind1] - points[0][pind2], 2) + + pow(points[1][pind1] - points[1][pind2], 2) + + pow(points[2][pind1] - points[2][pind2], 2)); + auto b2 = cluster.blob_with_point(pind2); + auto key = std::make_pair(pind1, b2->slice_index_min()); + + if (closest_index.find(key) == closest_index.end()) { + std::set > temp_sets; + temp_sets.insert(std::make_pair(dis,pind2)); + closest_index[key] = temp_sets; + } + else { + closest_index[key].insert(std::make_pair(dis,pind2)); + if (closest_index[key].size() > static_cast(max_num_nodes)) { + auto it5 = closest_index[key].begin(); + for (int qx = 0; qx!=max_num_nodes;qx++){ + it5++; + } + closest_index[key].erase(it5,closest_index[key].end()); + } + } + } + } + } + } + } + + for (auto it4 = closest_index.begin(); it4 != closest_index.end(); it4++) { + int index1 = it4->first.first; + for (auto it5 = it4->second.begin(); it5!=it4->second.end(); it5++){ + int index2 = (*it5).second; + double dis = (*it5).first; + + if (!boost::edge(index1, index2, graph).second) { + + auto edge = add_edge(index1,index2,dis,graph); + if (edge.second){ + num_edges ++; + } + } + // protect against dead cells ... + if (it5 == it4->second.begin() && dis > 0.25*units::cm) + break; + } + + } + + (void)num_edges; // suppress unused variable warning +} + +using namespace WireCell::Clus::Facade; + + +void Graphs::connect_graph_closely_pid(const Facade::Cluster& cluster, Weighted::Graph& graph) +{ + // PID-specific parameters (from prototype) + const int max_num_nodes = 5; + const double protection_distance = 0.25 * units::cm; + + // Build wire index maps for each blob (equivalent to mcell in prototype) + using mcell_wire_wcps_map_t = std::map>, Facade::BlobLess>; + mcell_wire_wcps_map_t map_mcell_uindex_wcps, map_mcell_vindex_wcps, map_mcell_windex_wcps; + + const auto& points = cluster.points(); + const auto& winds = cluster.wire_indices(); + + // Build wire index maps for each blob + for (const Facade::Blob* mcell : cluster.children()) { + std::map> map_uindex_wcps; + std::map> map_vindex_wcps; + std::map> map_windex_wcps; + + std::vector pinds = cluster.get_blob_indices(mcell); + for (const int pind : pinds) { + // Build U wire index map + auto u_it = map_uindex_wcps.find(winds[0][pind]); + if (u_it == map_uindex_wcps.end()) { + std::set wcps; + wcps.insert(pind); + map_uindex_wcps[winds[0][pind]] = wcps; + } else { + u_it->second.insert(pind); + } + + // Build V wire index map + auto v_it = map_vindex_wcps.find(winds[1][pind]); + if (v_it == map_vindex_wcps.end()) { + std::set wcps; + wcps.insert(pind); + map_vindex_wcps[winds[1][pind]] = wcps; + } else { + v_it->second.insert(pind); + } + + // Build W wire index map + auto w_it = map_windex_wcps.find(winds[2][pind]); + if (w_it == map_windex_wcps.end()) { + std::set wcps; + wcps.insert(pind); + map_windex_wcps[winds[2][pind]] = wcps; + } else { + w_it->second.insert(pind); + } + } + + map_mcell_uindex_wcps[mcell] = map_uindex_wcps; + map_mcell_vindex_wcps[mcell] = map_vindex_wcps; + map_mcell_windex_wcps[mcell] = map_windex_wcps; + } + + int num_edges = 0; + + // Phase 1: Create graph for points inside the same mcell (blob) + for (const Facade::Blob* mcell : cluster.children()) { + std::vector pinds = cluster.get_blob_indices(mcell); + int max_wire_interval = mcell->get_max_wire_interval(); + int min_wire_interval = mcell->get_min_wire_interval(); + + // Get appropriate wire index maps based on wire types + std::map>* map_max_index_wcps; + std::map>* map_min_index_wcps; + + if (mcell->get_max_wire_type() == 0) { + map_max_index_wcps = &map_mcell_uindex_wcps[mcell]; + } else if (mcell->get_max_wire_type() == 1) { + map_max_index_wcps = &map_mcell_vindex_wcps[mcell]; + } else { + map_max_index_wcps = &map_mcell_windex_wcps[mcell]; + } + + if (mcell->get_min_wire_type() == 0) { + map_min_index_wcps = &map_mcell_uindex_wcps[mcell]; + } else if (mcell->get_min_wire_type() == 1) { + map_min_index_wcps = &map_mcell_vindex_wcps[mcell]; + } else { + map_min_index_wcps = &map_mcell_windex_wcps[mcell]; + } + + for (const int pind1 : pinds) { + int index_max_wire, index_min_wire; + + // Get wire indices for current point + if (mcell->get_max_wire_type() == 0) { + index_max_wire = winds[0][pind1]; + } else if (mcell->get_max_wire_type() == 1) { + index_max_wire = winds[1][pind1]; + } else { + index_max_wire = winds[2][pind1]; + } + + if (mcell->get_min_wire_type() == 0) { + index_min_wire = winds[0][pind1]; + } else if (mcell->get_min_wire_type() == 1) { + index_min_wire = winds[1][pind1]; + } else { + index_min_wire = winds[2][pind1]; + } + + // Find candidate points within wire intervals + std::vector*> max_wcps_set; + std::vector*> min_wcps_set; + + // Find points within max wire interval + for (auto& wire_pair : *map_max_index_wcps) { + if (std::abs(wire_pair.first - index_max_wire) <= max_wire_interval) { + max_wcps_set.push_back(&wire_pair.second); + } + } + + // Find points within min wire interval + for (auto& wire_pair : *map_min_index_wcps) { + if (std::abs(wire_pair.first - index_min_wire) <= min_wire_interval) { + min_wcps_set.push_back(&wire_pair.second); + } + } + + // Create candidate sets + std::set wcps_set1, wcps_set2; + for (const auto* wcp_set : max_wcps_set) { + wcps_set1.insert(wcp_set->begin(), wcp_set->end()); + } + for (const auto* wcp_set : min_wcps_set) { + wcps_set2.insert(wcp_set->begin(), wcp_set->end()); + } + + // Find intersection of candidate sets + std::set common_set; + std::set_intersection(wcps_set1.begin(), wcps_set1.end(), + wcps_set2.begin(), wcps_set2.end(), + std::inserter(common_set, common_set.begin())); + + // Connect to all valid points in the same mcell + for (const int pind2 : common_set) { + if (pind2 != pind1) { + double distance = std::sqrt( + std::pow(points[0][pind1] - points[0][pind2], 2) + + std::pow(points[1][pind1] - points[1][pind2], 2) + + std::pow(points[2][pind1] - points[2][pind2], 2) + ); + + if (!boost::edge(pind1, pind2, graph).second){ + auto edge_result = add_edge(pind1, pind2, distance, graph); + + // std::cout << mcell->slice_index_min() << " " << mcell->u_wire_index_min() << " " << mcell->v_wire_index_min() << " " + // << mcell->w_wire_index_min() << " " << pind1 << " " << pind2 + // << " " << edge_result.second << std::endl; + + if (edge_result.second) { + num_edges++; + } + } + } + } + } + } + + // Phase 2: Create graph for points between connected mcells across time slices + const auto& time_cells_set_map = cluster.time_blob_map(); + + // Build time slice structure + std::map>> af_time_slices; // apa,face --> time slices + for (const auto& apa_pair : time_cells_set_map) { + int apa = apa_pair.first; + for (const auto& face_pair : apa_pair.second) { + int face = face_pair.first; + std::vector time_slices_vec; + for (const auto& time_pair : face_pair.second) { + time_slices_vec.push_back(time_pair.first); + } + af_time_slices[apa][face] = time_slices_vec; + } + } + + // Find connected mcells across time slices + std::vector> connected_mcells; + + for (const auto& apa_pair : af_time_slices) { + int apa = apa_pair.first; + for (const auto& face_pair : apa_pair.second) { + int face = face_pair.first; + const std::vector& time_slices = face_pair.second; + + for (size_t i = 0; i < time_slices.size(); i++) { + const auto& mcells_set = time_cells_set_map.at(apa).at(face).at(time_slices[i]); + + // Connect mcells within the same time slice + if (mcells_set.size() >= 2) { + for (auto it1 = mcells_set.begin(); it1 != mcells_set.end(); it1++) { + auto mcell1 = *it1; + auto it2 = it1; + it2++; + for (; it2 != mcells_set.end(); it2++) { + auto mcell2 = *it2; + if (mcell1->overlap_fast(*mcell2, 2)) { + connected_mcells.push_back(std::make_pair(mcell1, mcell2)); + } + } + } + } + + // Connect mcells across adjacent time slices + std::vector vec_mcells_set; + if (i + 1 < time_slices.size()) { + if (time_slices.at(i + 1) - time_slices.at(i) == 1*cluster.grouping()->get_nticks_per_slice().at(apa).at(face)) { + vec_mcells_set.push_back(cluster.time_blob_map().at(apa).at(face).at(time_slices.at(i + 1))); + if (i + 2 < time_slices.size()) + if (time_slices.at(i + 2) - time_slices.at(i) == 2*cluster.grouping()->get_nticks_per_slice().at(apa).at(face)) + vec_mcells_set.push_back(cluster.time_blob_map().at(apa).at(face).at(time_slices.at(i + 2))); + } + else if (time_slices.at(i + 1) - time_slices.at(i) == 2*cluster.grouping()->get_nticks_per_slice().at(apa).at(face)) { + vec_mcells_set.push_back(cluster.time_blob_map().at(apa).at(face).at(time_slices.at(i + 1))); + } + } + + + // Find overlapping mcells between current and next time slices + for (const auto& next_mcells_set : vec_mcells_set) { + for (const auto* mcell1 : mcells_set) { + for (const auto* mcell2 : next_mcells_set) { + if (mcell1->overlap_fast(*mcell2, 2)) { + connected_mcells.push_back(std::make_pair(mcell1, mcell2)); + } + } + } + } + } + } + } + + // std::cout << "Connected mcells across time slices: " << connected_mcells.size() << std::endl; + + // Phase 3: Establish cross-mcell connections using PID-specific logic + std::map, std::set>> closest_index; + + for (const auto& mcell_pair : connected_mcells) { + const Facade::Blob* mcell1 = mcell_pair.first; + const Facade::Blob* mcell2 = mcell_pair.second; + + // Process connections in both directions + for (int direction = 0; direction < 2; direction++) { + const Facade::Blob* source_mcell = (direction == 0) ? mcell1 : mcell2; + const Facade::Blob* target_mcell = (direction == 0) ? mcell2 : mcell1; + + std::vector source_pinds = cluster.get_blob_indices(source_mcell); + + int max_wire_interval = source_mcell->get_max_wire_interval(); + int min_wire_interval = source_mcell->get_min_wire_interval(); + + std::map>* map_max_index_wcps; + std::map>* map_min_index_wcps; + + // Select appropriate wire index maps based on source mcell's wire types + if (source_mcell->get_max_wire_type() == 0) { + map_max_index_wcps = &map_mcell_uindex_wcps[target_mcell]; + } else if (source_mcell->get_max_wire_type() == 1) { + map_max_index_wcps = &map_mcell_vindex_wcps[target_mcell]; + } else { + map_max_index_wcps = &map_mcell_windex_wcps[target_mcell]; + } + + if (source_mcell->get_min_wire_type() == 0) { + map_min_index_wcps = &map_mcell_uindex_wcps[target_mcell]; + } else if (source_mcell->get_min_wire_type() == 1) { + map_min_index_wcps = &map_mcell_vindex_wcps[target_mcell]; + } else { + map_min_index_wcps = &map_mcell_windex_wcps[target_mcell]; + } + + for (const int pind1 : source_pinds) { + int index_max_wire, index_min_wire; + + // Get wire indices for current point + if (source_mcell->get_max_wire_type() == 0) { + index_max_wire = winds[0][pind1]; + } else if (source_mcell->get_max_wire_type() == 1) { + index_max_wire = winds[1][pind1]; + } else { + index_max_wire = winds[2][pind1]; + } + + if (source_mcell->get_min_wire_type() == 0) { + index_min_wire = winds[0][pind1]; + } else if (source_mcell->get_min_wire_type() == 1) { + index_min_wire = winds[1][pind1]; + } else { + index_min_wire = winds[2][pind1]; + } + + // Find candidate points within wire intervals + std::vector*> max_wcps_set; + std::vector*> min_wcps_set; + + for (auto& wire_pair : *map_max_index_wcps) { + if (std::abs(wire_pair.first - index_max_wire) <= max_wire_interval) { + max_wcps_set.push_back(&wire_pair.second); + } + } + + for (auto& wire_pair : *map_min_index_wcps) { + if (std::abs(wire_pair.first - index_min_wire) <= min_wire_interval) { + min_wcps_set.push_back(&wire_pair.second); + } + } + + // Create candidate sets + std::set wcps_set1, wcps_set2; + for (const auto* wcp_set : max_wcps_set) { + wcps_set1.insert(wcp_set->begin(), wcp_set->end()); + } + for (const auto* wcp_set : min_wcps_set) { + wcps_set2.insert(wcp_set->begin(), wcp_set->end()); + } + + // Find intersection + std::set common_set; + std::set_intersection(wcps_set1.begin(), wcps_set1.end(), + wcps_set2.begin(), wcps_set2.end(), + std::inserter(common_set, common_set.begin())); + + // Build closest index map for PID-specific connection limiting + for (const int pind2 : common_set) { + if (pind2 != pind1) { + double distance = std::sqrt( + std::pow(points[0][pind1] - points[0][pind2], 2) + + std::pow(points[1][pind1] - points[1][pind2], 2) + + std::pow(points[2][pind1] - points[2][pind2], 2) + ); + + // Use target mcell's time slice as key + int target_time_slice = target_mcell->slice_index_min(); + auto key = std::make_pair(pind1, target_time_slice); + + auto it = closest_index.find(key); + if (it == closest_index.end()) { + std::set> temp_set; + temp_set.insert(std::make_pair(distance, pind2)); + closest_index[key] = temp_set; + } else { + it->second.insert(std::make_pair(distance, pind2)); + // Keep only the closest max_num_nodes connections + if (it->second.size() > static_cast(max_num_nodes)) { + auto erase_it = it->second.begin(); + std::advance(erase_it, max_num_nodes); + it->second.erase(erase_it, it->second.end()); + } + } + } + } + } + } + } + // std::cout << closest_index.size() << " closest index entries created" << std::endl; + + // Phase 4: Add the selected edges from closest_index map + for (const auto& closest_pair : closest_index) { + int pind1 = closest_pair.first.first; + + for (const auto& distance_pair : closest_pair.second) { + int pind2 = distance_pair.second; + double distance = distance_pair.first; + + if (!boost::edge(pind1, pind2, graph).second) { + auto edge_result = add_edge(pind1, pind2, distance, graph); + + // std::cout << "Adding edge " << pind1 << " " << pind2 << " " << distance << " " << edge_result.second << std::endl; + + + if (edge_result.second) { + num_edges++; + } + } + + // PID-specific protection: break if first connection is too far + if (distance_pair == *closest_pair.second.begin() && distance > protection_distance) { + break; + } + } + } + + // Debug output (similar to prototype) + // std::cout << "PID Graph: " << num_edges << " edges added" << std::endl; + (void)num_edges; // suppress unused variable warning +} \ No newline at end of file diff --git a/clus/src/connect_graph_ctpc.cxx b/clus/src/connect_graph_ctpc.cxx new file mode 100644 index 000000000..1ba9ee1b3 --- /dev/null +++ b/clus/src/connect_graph_ctpc.cxx @@ -0,0 +1,760 @@ +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/Facade_Cluster.h" +#include "WireCellClus/Facade_Blob.h" +#include "WireCellClus/Facade_Grouping.h" + +#include "connect_graphs.h" + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; + +void Graphs::connect_graph_ctpc( + const Facade::Cluster& cluster, + IDetectorVolumes::pointer dv, + Clus::IPCTransformSet::pointer pcts, + Weighted::Graph& graph) +{ + // This used to be the body of Cluster::Connect_graph(dv,pcts,use_ctpc). + const bool use_ctpc=true; + const auto* grouping = cluster.grouping(); + + // now form the connected components + std::vector component(num_vertices(graph)); + const size_t num = connected_components(graph, &component[0]); + + // Create ordered components + std::vector ordered_components; + ordered_components.reserve(component.size()); + for (size_t i = 0; i < component.size(); ++i) { + ordered_components.emplace_back(i); + } + + // Assign vertices to components + for (size_t i = 0; i < component.size(); ++i) { + ordered_components[component[i]].add_vertex(i); + } + + // Sort components by minimum vertex index + std::sort(ordered_components.begin(), ordered_components.end(), + [](const ComponentInfo& a, const ComponentInfo& b) { + return a.min_vertex < b.min_vertex; + }); + + if (num <= 1) return; + + std::vector> pt_clouds; + std::vector> pt_clouds_global_indices; // can use to access wpid ... + + const auto& points = cluster.points(); + for (const auto& comp : ordered_components) { + auto pt_cloud = std::make_shared(); + std::vector global_indices; + for (size_t vertex_idx : comp.vertex_indices) { + pt_cloud->add({points[0][vertex_idx], points[1][vertex_idx], points[2][vertex_idx]}); + global_indices.push_back(vertex_idx); + } + pt_clouds.push_back(pt_cloud); + pt_clouds_global_indices.push_back(global_indices); + } + + /// DEBUGONLY: + if (0) { + for (size_t i = 0; i != num; i++) { + std::cout << *pt_clouds.at(i) << std::endl; + std::cout << "global indices: "; + for (size_t j = 0; j != pt_clouds_global_indices.at(i).size(); j++) { + std::cout << pt_clouds_global_indices.at(i).at(j) << " "; + } + std::cout << std::endl; + } + } + + // Initiate dist. metrics + std::vector>> index_index_dis( + num, std::vector>(num)); + std::vector>> index_index_dis_mst( + num, std::vector>(num)); + + std::vector>> index_index_dis_dir1( + num, std::vector>(num)); + std::vector>> index_index_dis_dir2( + num, std::vector>(num)); + std::vector>> index_index_dis_dir_mst( + num, std::vector>(num)); + + for (size_t j = 0; j != num; j++) { + for (size_t k = 0; k != num; k++) { + index_index_dis[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_mst[j][k] = std::make_tuple(-1, -1, 1e9); + + index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_dir_mst[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + + // Calc. dis, dis_dir1, dis_dir2 + // check against the closest distance ... + // no need to have MST ... + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + index_index_dis[j][k] = pt_clouds.at(j)->get_closest_points(*pt_clouds.at(k)); + + if ((num < 100 && pt_clouds.at(j)->get_num_points() > 100 && pt_clouds.at(k)->get_num_points() > 100 && + (pt_clouds.at(j)->get_num_points() + pt_clouds.at(k)->get_num_points()) > 400) || + (pt_clouds.at(j)->get_num_points() > 500 && pt_clouds.at(k)->get_num_points() > 500)) { + geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); + geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); + + geo_point_t dir1 = cluster.vhough_transform(p1, 30 * units::cm, Cluster::HoughParamSpace::theta_phi, pt_clouds.at(j), pt_clouds_global_indices.at(j)); + geo_point_t dir2 = cluster.vhough_transform(p2, 30 * units::cm, Cluster::HoughParamSpace::theta_phi, pt_clouds.at(k), pt_clouds_global_indices.at(k)); + dir1 = dir1 * -1; + dir2 = dir2 * -1; + + std::pair result1 = pt_clouds.at(k)->get_closest_point_along_vec( + p1, dir1, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); + + if (result1.first >= 0) { + index_index_dis_dir1[j][k] = + std::make_tuple(std::get<0>(index_index_dis[j][k]), result1.first, result1.second); + } + + std::pair result2 = pt_clouds.at(j)->get_closest_point_along_vec( + p2, dir2, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); + + if (result2.first >= 0) { + index_index_dis_dir2[j][k] = + std::make_tuple(result2.first, std::get<1>(index_index_dis[j][k]), result2.second); + } + } + + // Now check the path ... + { + geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); + auto wpid_p1 = cluster.wire_plane_id(pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis[j][k]))); + + geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); + auto wpid_p2 = cluster.wire_plane_id(pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis[j][k]))); + + double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); + double step_dis = 1.0 * units::cm; + int num_steps = dis / step_dis + 1; + int num_bad = 0; + geo_point_t test_p; + for (int ii = 0; ii != num_steps; ii++) { + test_p.set(p1.x() + (p2.x() - p1.x()) / num_steps * (ii + 1), + p1.y() + (p2.y() - p1.y()) / num_steps * (ii + 1), + p1.z() + (p2.z() - p1.z()) / num_steps * (ii + 1)); + + if (use_ctpc) { + auto test_wpid = get_wireplaneid(test_p, wpid_p1, wpid_p2, dv); + if (test_wpid.apa()!=-1){ + geo_point_t test_p_raw = test_p; + if (cluster.get_default_scope().hash() != cluster.get_raw_scope().hash()){ + const auto transform = pcts->pc_transform(cluster.get_scope_transform()); + double cluster_t0 = cluster.get_cluster_t0(); + test_p_raw = transform->backward(test_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + } + const bool good_point = grouping->is_good_point(test_p_raw, test_wpid.apa(), test_wpid.face()); + if (!good_point) num_bad++; + } + } + } + + if (num_bad > 7 || (num_bad > 2 && num_bad >= 0.75 * num_steps)) { + index_index_dis[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + + // Now check the path ... + if (std::get<0>(index_index_dis_dir1[j][k]) >= 0) { + geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_dir1[j][k])); + auto wpid_p1 = cluster.wire_plane_id(pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir1[j][k]))); + + geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_dir1[j][k])); + auto wpid_p2 = cluster.wire_plane_id(pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir1[j][k]))); + + double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); + double step_dis = 1.0 * units::cm; + int num_steps = dis / step_dis + 1; + int num_bad = 0; + geo_point_t test_p; + for (int ii = 0; ii != num_steps; ii++) { + test_p.set(p1.x() + (p2.x() - p1.x()) / num_steps * (ii + 1), + p1.y() + (p2.y() - p1.y()) / num_steps * (ii + 1), + p1.z() + (p2.z() - p1.z()) / num_steps * (ii + 1)); + if (use_ctpc) { + auto test_wpid = get_wireplaneid(test_p, wpid_p1, wpid_p2, dv); + if (test_wpid.apa()!=-1){ + geo_point_t test_p_raw = test_p; + if (cluster.get_default_scope().hash() != cluster.get_raw_scope().hash()){ + const auto transform = pcts->pc_transform(cluster.get_scope_transform()); + double cluster_t0 = cluster.get_cluster_t0(); + test_p_raw = transform->backward(test_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + } + const bool good_point = grouping->is_good_point(test_p_raw, test_wpid.apa(), test_wpid.face()); + if (!good_point) num_bad++; + } + } + } + + if (num_bad > 7 || (num_bad > 2 && num_bad >= 0.75 * num_steps)) { + index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + + // Now check the path ... + if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { + geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_dir2[j][k])); + auto wpid_p1 = cluster.wire_plane_id(pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir2[j][k]))); + geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_dir2[j][k])); + auto wpid_p2 = cluster.wire_plane_id(pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir2[j][k]))); + + double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); + double step_dis = 1.0 * units::cm; + int num_steps = dis / step_dis + 1; + int num_bad = 0; + geo_point_t test_p; + for (int ii = 0; ii != num_steps; ii++) { + test_p.set(p1.x() + (p2.x() - p1.x()) / num_steps * (ii + 1), + p1.y() + (p2.y() - p1.y()) / num_steps * (ii + 1), + p1.z() + (p2.z() - p1.z()) / num_steps * (ii + 1)); + if (use_ctpc) { + auto test_wpid = get_wireplaneid(test_p, wpid_p1, wpid_p2, dv); + if (test_wpid.apa()!=-1){ + geo_point_t test_p_raw = test_p; + if (cluster.get_default_scope().hash() != cluster.get_raw_scope().hash()){ + const auto transform = pcts->pc_transform(cluster.get_scope_transform()); + double cluster_t0 = cluster.get_cluster_t0(); + test_p_raw = transform->backward(test_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + } + const bool good_point = grouping->is_good_point(test_p_raw, test_wpid.apa(), test_wpid.face()); + if (!good_point) num_bad++; + } + } + } + + if (num_bad > 7 || (num_bad > 2 && num_bad >= 0.75 * num_steps)) { + index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + } + } + + // deal with MST of first type + { + Weighted::Graph temp_graph(num); + + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + int index1 = j; + int index2 = k; + if (std::get<0>(index_index_dis[j][k]) >= 0) { + if (!boost::edge(index1, index2, temp_graph).second) { + add_edge(index1, index2, std::get<2>(index_index_dis[j][k]), temp_graph); + } + } + } + } + + // Process MST + process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_mst); + } + + // MST of the direction ... + { + Weighted::Graph temp_graph(num); + + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + int index1 = j; + int index2 = k; + if (std::get<0>(index_index_dis_dir1[j][k]) >= 0 || std::get<0>(index_index_dis_dir2[j][k]) >= 0) { + if (!boost::edge(index1, index2, temp_graph).second) { + add_edge( + index1, index2, + std::min(std::get<2>(index_index_dis_dir1[j][k]), std::get<2>(index_index_dis_dir2[j][k])), + temp_graph); + } + } + } + } + + process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_dir_mst); + + } + + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + if (std::get<2>(index_index_dis[j][k]) < 3 * units::cm) { + index_index_dis_mst[j][k] = index_index_dis[j][k]; + } + + // establish the path ... + if (std::get<0>(index_index_dis_mst[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_mst[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_mst[j][k])); + float dis; + if (std::get<2>(index_index_dis_mst[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_mst[j][k]); + } + else { + dis = std::get<2>(index_index_dis_mst[j][k]); + } + if (!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + + if (std::get<0>(index_index_dis_dir_mst[j][k]) >= 0) { + if (std::get<0>(index_index_dis_dir1[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir1[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir1[j][k])); + float dis; + if (std::get<2>(index_index_dis_dir1[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_dir1[j][k]) * 1.1; + } + else { + dis = std::get<2>(index_index_dis_dir1[j][k]); + } + if (!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir2[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir2[j][k])); + float dis; + if (std::get<2>(index_index_dis_dir2[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_dir2[j][k]) * 1.1; + } + else { + dis = std::get<2>(index_index_dis_dir2[j][k]); + } + if (!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + } + + } // k + } // j +} + + + + +using namespace WireCell::Clus::Facade; + +void Graphs::connect_graph_ctpc_with_reference( + const Facade::Cluster& cluster, + const Facade::Cluster& ref_cluster, + IDetectorVolumes::pointer dv, + Clus::IPCTransformSet::pointer pcts, + Weighted::Graph& graph) +{ + // Enable CTPC functionality (combining ctpc logic with reference filtering) + const bool use_ctpc = true; + const auto* grouping = cluster.grouping(); + + // Drift direction for directional analysis (equivalent to prototype's drift_dir) + geo_vector_t drift_dir_abs(1, 0, 0); + + // Form connected components from existing graph + std::vector component(num_vertices(graph)); + const size_t num = connected_components(graph, &component[0]); + + // Create ordered components structure (same as baseline) + std::vector ordered_components; + ordered_components.reserve(component.size()); + for (size_t i = 0; i < component.size(); ++i) { + ordered_components.emplace_back(i); + } + + // Assign vertices to components + for (size_t i = 0; i < component.size(); ++i) { + ordered_components[component[i]].add_vertex(i); + } + + // Sort components by minimum vertex index for deterministic behavior + std::sort(ordered_components.begin(), ordered_components.end(), + [](const ComponentInfo& a, const ComponentInfo& b) { + return a.min_vertex < b.min_vertex; + }); + + std::cout << "Graph Creation " << cluster.nchildren() << " " << cluster.npoints() << " using reference filtering: " << ref_cluster.npoints() << std::endl; + + + if (num <= 1) return; + + // Initialize point cloud containers for each component + std::vector> pt_clouds; + std::vector> pt_clouds_global_indices; + + for (size_t i = 0; i < ordered_components.size(); ++i) { + auto pt_cloud = std::make_shared(); + pt_clouds.push_back(pt_cloud); + pt_clouds_global_indices.push_back(std::vector()); + } + + const auto& points = cluster.points(); + std::set excluded_points; // Track excluded points (prototype's excluded_points) + + // Check if reference cluster is valid and not empty + bool use_reference_filtering = (ref_cluster.is_valid() && ref_cluster.npoints() > 0); + + + + // REFERENCE FILTERING PHASE - equivalent to prototype's filtering logic + for (size_t i = 0; i < component.size(); ++i) { + bool should_exclude = false; + + // Phase 1: Check point quality (equivalent to prototype's mcell->IsPointGood) + if (!is_point_good(cluster, i, 2)) { + should_exclude = true; + } + // Phase 2: Reference cluster distance filtering (only if ref_cluster is not empty) + else if (use_reference_filtering) { + const auto& ref_kd = ref_cluster.kd3d(); + double temp_min_dis = 0; + geo_point_t temp_p(points[0][i], points[1][i], points[2][i]); + std::vector query_point = {temp_p.x(), temp_p.y(), temp_p.z()}; + auto knn_result = ref_kd.knn(1, query_point); + + if (!knn_result.empty()) { + temp_min_dis = std::sqrt(knn_result[0].second); // knn returns squared distance + } + + // Key filtering criterion from prototype: >= 1.0 cm means exclude + if (temp_min_dis >= 1.0 * units::cm) { + should_exclude = true; + } + } + // If ref_cluster is empty, only use point quality check + + if (should_exclude) { + excluded_points.insert(i); + } else { + // Add to appropriate component cloud + size_t comp_idx = component[i]; + pt_clouds.at(comp_idx)->add({points[0][i], points[1][i], points[2][i]}); + pt_clouds_global_indices.at(comp_idx).push_back(i); + } + } + + // Store excluded points in cluster cache (matches prototype's excluded_points) + const_cast(cluster).set_excluded_points(excluded_points); + + + + // Initialize distance metric containers (same structure as baseline) + std::vector>> index_index_dis( + num, std::vector>(num)); + std::vector>> index_index_dis_mst( + num, std::vector>(num)); + + std::vector>> index_index_dis_dir1( + num, std::vector>(num)); + std::vector>> index_index_dis_dir2( + num, std::vector>(num)); + std::vector>> index_index_dis_dir_mst( + num, std::vector>(num)); + + // Initialize all distances to invalid/infinite + for (size_t j = 0; j != num; j++) { + for (size_t k = 0; k != num; k++) { + index_index_dis[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_mst[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_dir_mst[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + + // DISTANCE CALCULATION AND CTPC PATH VALIDATION + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + // Find closest points between components + index_index_dis[j][k] = pt_clouds.at(j)->get_closest_points(*pt_clouds.at(k)); + + // Enhanced directional analysis for large components (from prototype logic) + if ((num < 100 && pt_clouds.at(j)->get_num_points() > 100 && pt_clouds.at(k)->get_num_points() > 100 && + (pt_clouds.at(j)->get_num_points() + pt_clouds.at(k)->get_num_points()) > 400) || + (pt_clouds.at(j)->get_num_points() > 500 && pt_clouds.at(k)->get_num_points() > 500)) { + + geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); + geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); + + // Use cluster's vhough_transform method for directional analysis + geo_vector_t dir1 = cluster.vhough_transform(p1, 30 * units::cm, + Cluster::HoughParamSpace::theta_phi, pt_clouds.at(j), pt_clouds_global_indices.at(j)); + geo_vector_t dir2 = cluster.vhough_transform(p2, 30 * units::cm, + Cluster::HoughParamSpace::theta_phi, pt_clouds.at(k), pt_clouds_global_indices.at(k)); + dir1 = dir1 * -1; + dir2 = dir2 * -1; + + // Directional search from p1 towards p2 + std::pair result1 = pt_clouds.at(k)->get_closest_point_along_vec( + p1, dir1, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); + + // Enhanced drift direction analysis (from prototype) + if (result1.first < 0) { + double angle_deg = dir1.angle(drift_dir_abs) * 180.0 / M_PI; + if (std::abs(angle_deg - 90.0) < 10.0) { + // Direction nearly perpendicular to drift - try longer hough transform + if (std::abs(angle_deg - 90.0) < 5.0) { + dir1 = cluster.vhough_transform(p1, 80 * units::cm, + Cluster::HoughParamSpace::theta_phi, pt_clouds.at(j), pt_clouds_global_indices.at(j)); + } else if (std::abs(angle_deg - 90.0) < 10.0) { + dir1 = cluster.vhough_transform(p1, 50 * units::cm, + Cluster::HoughParamSpace::theta_phi, pt_clouds.at(j), pt_clouds_global_indices.at(j)); + } + dir1 = dir1 * -1; + result1 = pt_clouds.at(k)->get_closest_point_along_vec( + p1, dir1, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); + } + } + + if (result1.first >= 0) { + index_index_dis_dir1[j][k] = std::make_tuple( + std::get<0>(index_index_dis[j][k]), result1.first, result1.second); + } + + // Directional search from p2 towards p1 + std::pair result2 = pt_clouds.at(j)->get_closest_point_along_vec( + p2, dir2, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); + + // Enhanced drift direction analysis for dir2 + if (result2.first < 0) { + double angle_deg2 = dir2.angle(drift_dir_abs) * 180.0 / M_PI; + if (std::abs(angle_deg2 - 90.0) < 10.0) { + if (std::abs(angle_deg2 - 90.0) < 5.0) { + dir2 = cluster.vhough_transform(p2, 80 * units::cm, + Cluster::HoughParamSpace::theta_phi, pt_clouds.at(k), pt_clouds_global_indices.at(k)); + } else if (std::abs(angle_deg2 - 90.0) < 10.0) { + dir2 = cluster.vhough_transform(p2, 50 * units::cm, + Cluster::HoughParamSpace::theta_phi, pt_clouds.at(k), pt_clouds_global_indices.at(k)); + } + dir2 = dir2 * -1; + result2 = pt_clouds.at(j)->get_closest_point_along_vec( + p2, dir2, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); + } + } + + if (result2.first >= 0) { + index_index_dis_dir2[j][k] = std::make_tuple( + result2.first, std::get<1>(index_index_dis[j][k]), result2.second); + } + } + + // CTPC PATH VALIDATION - Check basic distance path + { + geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); + auto wpid_p1 = cluster.wire_plane_id(pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis[j][k]))); + + geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); + auto wpid_p2 = cluster.wire_plane_id(pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis[j][k]))); + + double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); + double step_dis = 1.0 * units::cm; + int num_steps = dis / step_dis + 1; + int num_bad = 0; + geo_point_t test_p; + + for (int ii = 0; ii != num_steps; ii++) { + test_p.set(p1.x() + (p2.x() - p1.x()) / num_steps * (ii + 1), + p1.y() + (p2.y() - p1.y()) / num_steps * (ii + 1), + p1.z() + (p2.z() - p1.z()) / num_steps * (ii + 1)); + + if (use_ctpc) { + auto test_wpid = get_wireplaneid(test_p, wpid_p1, wpid_p2, dv); + if (test_wpid.apa() != -1) { + geo_point_t test_p_raw = test_p; + if (cluster.get_default_scope().hash() != cluster.get_raw_scope().hash()) { + const auto transform = pcts->pc_transform(cluster.get_scope_transform()); + double cluster_t0 = cluster.get_cluster_t0(); + test_p_raw = transform->backward(test_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + } + const bool good_point = grouping->is_good_point(test_p_raw, test_wpid.apa(), test_wpid.face()); + if (!good_point) num_bad++; + } + } + } + + if (num_bad > 7 || (num_bad > 2 && num_bad >= 0.75 * num_steps)) { + index_index_dis[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + + // CTPC PATH VALIDATION - Check directional path 1 + if (std::get<0>(index_index_dis_dir1[j][k]) >= 0) { + geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_dir1[j][k])); + auto wpid_p1 = cluster.wire_plane_id(pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir1[j][k]))); + + geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_dir1[j][k])); + auto wpid_p2 = cluster.wire_plane_id(pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir1[j][k]))); + + double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); + double step_dis = 1.0 * units::cm; + int num_steps = dis / step_dis + 1; + int num_bad = 0; + geo_point_t test_p; + + for (int ii = 0; ii != num_steps; ii++) { + test_p.set(p1.x() + (p2.x() - p1.x()) / num_steps * (ii + 1), + p1.y() + (p2.y() - p1.y()) / num_steps * (ii + 1), + p1.z() + (p2.z() - p1.z()) / num_steps * (ii + 1)); + if (use_ctpc) { + auto test_wpid = get_wireplaneid(test_p, wpid_p1, wpid_p2, dv); + if (test_wpid.apa() != -1) { + geo_point_t test_p_raw = test_p; + if (cluster.get_default_scope().hash() != cluster.get_raw_scope().hash()) { + const auto transform = pcts->pc_transform(cluster.get_scope_transform()); + double cluster_t0 = cluster.get_cluster_t0(); + test_p_raw = transform->backward(test_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + } + const bool good_point = grouping->is_good_point(test_p_raw, test_wpid.apa(), test_wpid.face()); + if (!good_point) num_bad++; + } + } + } + + if (num_bad > 7 || (num_bad > 2 && num_bad >= 0.75 * num_steps)) { + index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + + // CTPC PATH VALIDATION - Check directional path 2 + if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { + geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_dir2[j][k])); + auto wpid_p1 = cluster.wire_plane_id(pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir2[j][k]))); + geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_dir2[j][k])); + auto wpid_p2 = cluster.wire_plane_id(pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir2[j][k]))); + + double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); + double step_dis = 1.0 * units::cm; + int num_steps = dis / step_dis + 1; + int num_bad = 0; + geo_point_t test_p; + + for (int ii = 0; ii != num_steps; ii++) { + test_p.set(p1.x() + (p2.x() - p1.x()) / num_steps * (ii + 1), + p1.y() + (p2.y() - p1.y()) / num_steps * (ii + 1), + p1.z() + (p2.z() - p1.z()) / num_steps * (ii + 1)); + if (use_ctpc) { + auto test_wpid = get_wireplaneid(test_p, wpid_p1, wpid_p2, dv); + if (test_wpid.apa() != -1) { + geo_point_t test_p_raw = test_p; + if (cluster.get_default_scope().hash() != cluster.get_raw_scope().hash()) { + const auto transform = pcts->pc_transform(cluster.get_scope_transform()); + double cluster_t0 = cluster.get_cluster_t0(); + test_p_raw = transform->backward(test_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + } + const bool good_point = grouping->is_good_point(test_p_raw, test_wpid.apa(), test_wpid.face()); + if (!good_point) num_bad++; + } + } + } + + if (num_bad > 7 || (num_bad > 2 && num_bad >= 0.75 * num_steps)) { + index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + } + } + + // Build MST for basic distances + { + Weighted::Graph temp_graph(num); + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + int index1 = j, index2 = k; + if (std::get<0>(index_index_dis[j][k]) >= 0) { + if (!boost::edge(index1, index2, temp_graph).second) { + add_edge(index1, index2, std::get<2>(index_index_dis[j][k]), temp_graph); + } + } + } + } + process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_mst); + } + + // Build MST for directional distances + { + Weighted::Graph temp_graph(num); + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + int index1 = j, index2 = k; + if (std::get<0>(index_index_dis_dir1[j][k]) >= 0 || + std::get<0>(index_index_dis_dir2[j][k]) >= 0) { + if (!boost::edge(index1, index2, temp_graph).second) { + add_edge(index1, index2, + std::min(std::get<2>(index_index_dis_dir1[j][k]), + std::get<2>(index_index_dis_dir2[j][k])), temp_graph); + } + } + } + } + process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_dir_mst); + } + + // Final graph construction phase + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + // Add short distance connections directly to MST + if (std::get<2>(index_index_dis[j][k]) < 3 * units::cm) { + index_index_dis_mst[j][k] = index_index_dis[j][k]; + } + + // Add MST basic distance edges to graph + if (std::get<0>(index_index_dis_mst[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_mst[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_mst[j][k])); + + float dis; + if (std::get<2>(index_index_dis_mst[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_mst[j][k]); + } else { + dis = std::get<2>(index_index_dis_mst[j][k]); + } + if (!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + + // Add MST directional edges to graph (with penalty for longer distances) + if (std::get<0>(index_index_dis_dir_mst[j][k]) >= 0) { + if (std::get<0>(index_index_dis_dir1[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir1[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir1[j][k])); + + float dis; + if (std::get<2>(index_index_dis_dir1[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_dir1[j][k]) * 1.1; // Matches ctpc baseline penalty + } else { + dis = std::get<2>(index_index_dis_dir1[j][k]); + } + if (!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + + if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir2[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir2[j][k])); + + float dis; + if (std::get<2>(index_index_dis_dir2[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_dir2[j][k]) * 1.1; // Matches ctpc baseline penalty + } else { + dis = std::get<2>(index_index_dis_dir2[j][k]); + } + if (!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + } + } + } +} \ No newline at end of file diff --git a/clus/src/connect_graph_relaxed.cxx b/clus/src/connect_graph_relaxed.cxx new file mode 100644 index 000000000..cebb39b43 --- /dev/null +++ b/clus/src/connect_graph_relaxed.cxx @@ -0,0 +1,604 @@ + +#include "WireCellClus/Graphs.h" +#include "WireCellClus/IPCTransform.h" +#include "WireCellClus/Facade_Cluster.h" +#include "WireCellClus/Facade_Grouping.h" + +#include "connect_graphs.h" + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Graphs; +using namespace WireCell::Clus::Facade; + +void Graphs::connect_graph_relaxed( + const Facade::Cluster& cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts, + Weighted::Graph& graph) +{ + const bool use_ctpc = true; + const auto* grouping = cluster.grouping(); + + // Get all the wire plane IDs from the grouping + const auto& wpids = grouping->wpids(); + + // Key: pair, Value: drift_dir, angle_u, angle_v, angle_w + std::map> wpid_params; + std::map wpid_U_dir; + std::map wpid_V_dir; + std::map wpid_W_dir; + std::set apas; + for (const auto& wpid : wpids) { + int apa = wpid.apa(); + int face = wpid.face(); + apas.insert(apa); + + // Create wpids for all three planes with this APA and face + WirePlaneId wpid_u(kUlayer, face, apa); + WirePlaneId wpid_v(kVlayer, face, apa); + WirePlaneId wpid_w(kWlayer, face, apa); + + // Get drift direction based on face orientation + int face_dirx = dv->face_dirx(wpid_u); + geo_point_t drift_dir(face_dirx, 0, 0); + + // Get wire directions for all planes + Vector wire_dir_u = dv->wire_direction(wpid_u); + Vector wire_dir_v = dv->wire_direction(wpid_v); + Vector wire_dir_w = dv->wire_direction(wpid_w); + + // Calculate angles + double angle_u = std::atan2(wire_dir_u.z(), wire_dir_u.y()); + double angle_v = std::atan2(wire_dir_v.z(), wire_dir_v.y()); + double angle_w = std::atan2(wire_dir_w.z(), wire_dir_w.y()); + + wpid_params[wpid] = std::make_tuple(drift_dir, angle_u, angle_v, angle_w); + wpid_U_dir[wpid] = geo_point_t(0, cos(angle_u), sin(angle_u)); + wpid_V_dir[wpid] = geo_point_t(0, cos(angle_v), sin(angle_v)); + wpid_W_dir[wpid] = geo_point_t(0, cos(angle_w), sin(angle_w)); + } + + // this drift direction is only used to calculate isochronous case, so this is OK ... + const geo_vector_t drift_dir_abs(1, 0, 0); + + + // Form connected components + std::vector component(num_vertices(graph)); + const size_t num = connected_components(graph, &component[0]); + + if (num <= 1) return; + + // Create point clouds using connected components + std::vector> pt_clouds; + std::vector> pt_clouds_global_indices; + + // Create ordered components + std::vector ordered_components; + ordered_components.reserve(component.size()); + for (size_t i = 0; i < component.size(); ++i) { + ordered_components.emplace_back(i); + } + + // Assign vertices to components + for (size_t i = 0; i < component.size(); ++i) { + ordered_components[component[i]].add_vertex(i); + } + + // Sort components by minimum vertex index + std::sort(ordered_components.begin(), ordered_components.end(), + [](const ComponentInfo& a, const ComponentInfo& b) { + return a.min_vertex < b.min_vertex; + }); + + // Create point clouds for each component + const auto& points = cluster.points(); + for (const auto& comp : ordered_components) { + auto pt_cloud = std::make_shared(); + + std::vector global_indices; + + for (size_t vertex_idx : comp.vertex_indices) { + pt_cloud->add({points[0][vertex_idx], points[1][vertex_idx], points[2][vertex_idx]}); + global_indices.push_back(vertex_idx); + } + pt_clouds.push_back(pt_cloud); + pt_clouds_global_indices.push_back(global_indices); + } + + // Initialize distance metrics + std::vector>> index_index_dis(num, std::vector>(num)); + std::vector>> index_index_dis_mst(num, std::vector>(num)); + std::vector>> index_index_dis_dir1(num, std::vector>(num)); + std::vector>> index_index_dis_dir2(num, std::vector>(num)); + std::vector>> index_index_dis_dir_mst(num, std::vector>(num)); + + // Initialize all distances to inf + for (size_t j = 0; j != num; j++) { + for (size_t k = 0; k != num; k++) { + index_index_dis[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_mst[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); + index_index_dis_dir_mst[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + + // Calculate distances between components + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + // Get closest points between components + index_index_dis[j][k] = pt_clouds.at(j)->get_closest_points(*pt_clouds.at(k)); + + // Skip small clouds + if ((num < 100 && pt_clouds.at(j)->get_num_points() > 100 && pt_clouds.at(k)->get_num_points() > 100 && + (pt_clouds.at(j)->get_num_points() + pt_clouds.at(k)->get_num_points()) > 400) || + (pt_clouds.at(j)->get_num_points() > 500 && pt_clouds.at(k)->get_num_points() > 500)) { + + // Get closest points and calculate directions + geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); + geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); + + geo_vector_t dir1 = cluster.vhough_transform(p1, 30 * units::cm, Cluster::HoughParamSpace::theta_phi, pt_clouds.at(j), + pt_clouds_global_indices.at(j)); + geo_vector_t dir2 = cluster.vhough_transform(p2, 30 * units::cm, Cluster::HoughParamSpace::theta_phi, pt_clouds.at(k), + pt_clouds_global_indices.at(k)); + dir1 = dir1 * -1; + dir2 = dir2 * -1; + + std::pair result1 = pt_clouds.at(k)->get_closest_point_along_vec(p1, dir1, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); + + if (result1.first >= 0) { + index_index_dis_dir1[j][k] = std::make_tuple(std::get<0>(index_index_dis[j][k]), + result1.first, result1.second); + } + + std::pair result2 = pt_clouds.at(j)->get_closest_point_along_vec(p2, dir2, 80 * units::cm, 5 * units::cm, 7.5, 3 * units::cm); + + if (result2.first >= 0) { + index_index_dis_dir2[j][k] = std::make_tuple(result2.first, + std::get<1>(index_index_dis[j][k]), + result2.second); + } + } + // Now check the path + + { + geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis[j][k])); + auto wpid_p1 = cluster.wire_plane_id(pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis[j][k]))); + geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis[j][k])); + auto wpid_p2 = cluster.wire_plane_id(pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis[j][k]))); + + double dis = sqrt(pow(p1.x() - p2.x(), 2) + pow(p1.y() - p2.y(), 2) + pow(p1.z() - p2.z(), 2)); + double step_dis = 1.0 * units::cm; + int num_steps = dis/step_dis + 1; + + + + // Track different types of "bad" points + int num_bad[4] = {0,0,0,0}; // more than one of three are bad + int num_bad1[4] = {0,0,0,0}; // at least one of three are bad + int num_bad2[3] = {0,0,0}; // number of dead channels + + // Check points along path + for (int ii = 0; ii != num_steps; ii++) { + geo_point_t test_p( + p1.x() + (p2.x() - p1.x())/num_steps*(ii + 1), + p1.y() + (p2.y() - p1.y())/num_steps*(ii + 1), + p1.z() + (p2.z() - p1.z())/num_steps*(ii + 1) + ); + + // Test point quality using grouping parameters + std::vector scores; + if (use_ctpc) { + auto test_wpid = get_wireplaneid(test_p, wpid_p1, wpid_p2, dv); + if (test_wpid.apa()!=-1){ + geo_point_t test_p_raw = test_p; + if (cluster.get_default_scope().hash() != cluster.get_raw_scope().hash()){ + const auto transform = pcts->pc_transform(cluster.get_scope_transform()); + double cluster_t0 = cluster.get_cluster_t0(); + test_p_raw = transform->backward(test_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + } + scores = grouping->test_good_point(test_p_raw, test_wpid.apa(), test_wpid.face()); + + // Check overall quality + if (scores[0] + scores[3] + scores[1] + scores[4] + (scores[2]+scores[5])*2 < 3) { + num_bad[0]++; + } + if (scores[0]+scores[3]==0) num_bad[1]++; + if (scores[1]+scores[4]==0) num_bad[2]++; + if (scores[2]+scores[5]==0) num_bad[3]++; + + if (scores[3]!=0) num_bad2[0]++; + if (scores[4]!=0) num_bad2[1]++; + if (scores[5]!=0) num_bad2[2]++; + + if (scores[0] + scores[3] + scores[1] + scores[4] + (scores[2]+scores[5]) < 3) { + num_bad1[0]++; + } + if (scores[0]+scores[3]==0) num_bad1[1]++; + if (scores[1]+scores[4]==0) num_bad1[2]++; + if (scores[2]+scores[5]==0) num_bad1[3]++; + } + } + } + + auto test_wpid = get_wireplaneid(p1, wpid_p1, p2, wpid_p2, dv); + + // Calculate angles between directions + geo_vector_t tempV1(0, p2.y() - p1.y(), p2.z() - p1.z()); + geo_vector_t tempV5; + + double angle1 = tempV1.angle(wpid_U_dir.at(test_wpid)); + tempV5.set(fabs(p2.x() - p1.x()), + sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2)) * sin(angle1), + 0); + angle1 = tempV5.angle(drift_dir_abs); + + double angle2 = tempV1.angle(wpid_V_dir.at(test_wpid)); + tempV5.set(fabs(p2.x() - p1.x()), + sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2)) * sin(angle2), + 0); + angle2 = tempV5.angle(drift_dir_abs); + + double angle1p = tempV1.angle(wpid_W_dir.at(test_wpid)); + tempV5.set(fabs(p2.x() - p1.x()), + sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2)) * sin(angle1p), + 0); + angle1p = tempV5.angle(drift_dir_abs); + + tempV5.set(p2.x() - p1.x(), p2.y() - p1.y(), p2.z() - p1.z()); + double angle3 = tempV5.angle(drift_dir_abs); + + bool flag_strong_check = true; + + // Define constants for readability + constexpr double pi = 3.141592653589793; + constexpr double perp_angle_tol = 10.0/180.0*pi; + constexpr double wire_angle_tol = 12.5/180.0*pi; + constexpr double perp_angle = pi/2.0; + constexpr double invalid_dist = 1e9; + + if (fabs(angle3 - perp_angle) < perp_angle_tol) { + geo_vector_t tempV2 = cluster.vhough_transform(p1, 15*units::cm); + geo_vector_t tempV3 = cluster.vhough_transform(p2, 15*units::cm); + + if (fabs(tempV2.angle(drift_dir_abs) - perp_angle) < perp_angle_tol && + fabs(tempV3.angle(drift_dir_abs) - perp_angle) < perp_angle_tol) { + flag_strong_check = false; + } + } + else if (angle1 < wire_angle_tol || angle2 < wire_angle_tol || angle1p < wire_angle_tol) { + flag_strong_check = false; + } + + // Helper function to check if ratio exceeds threshold + auto exceeds_ratio = [](int val, int steps, double ratio = 0.75) { + return val >= ratio * steps; + }; + + // Helper function to invalidate distance + auto invalidate_distance = [&]() { + index_index_dis[j][k] = std::make_tuple(-1, -1, invalid_dist); + }; + + if (flag_strong_check) { + if (num_bad1[0] > 7 || (num_bad1[0] > 2 && exceeds_ratio(num_bad1[0], num_steps))) { + invalidate_distance(); + } + } + else { + bool parallel_angles = (angle1 < wire_angle_tol && angle2 < wire_angle_tol) || + (angle1p < wire_angle_tol && angle1 < wire_angle_tol) || + (angle1p < wire_angle_tol && angle2 < wire_angle_tol); + + if (parallel_angles) { + if (num_bad[0] > 7 || (num_bad[0] > 2 && exceeds_ratio(num_bad[0], num_steps))) { + invalidate_distance(); + } + } + else if (angle1 < wire_angle_tol) { + int sum_bad = num_bad[2] + num_bad[3]; + if (sum_bad > 9 || (sum_bad > 2 && exceeds_ratio(sum_bad, num_steps)) || num_bad[3] >= 3) { + invalidate_distance(); + } + } + else if (angle2 < wire_angle_tol) { + int sum_bad = num_bad[1] + num_bad[3]; + if (sum_bad > 9 || (sum_bad > 2 && exceeds_ratio(sum_bad, num_steps)) || num_bad[3] >= 3) { + invalidate_distance(); + } + } + else if (angle1p < wire_angle_tol) { + int sum_bad = num_bad[2] + num_bad[1]; + if (sum_bad > 9 || (sum_bad > 2 && exceeds_ratio(sum_bad, num_steps))) { + invalidate_distance(); + } + } + else if (num_bad[0] > 7 || (num_bad[0] > 2 && exceeds_ratio(num_bad[0], num_steps))) { + invalidate_distance(); + } + } + } + + // Now check path again ... + if (std::get<0>(index_index_dis_dir1[j][k]) >= 0) { + geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_dir1[j][k])); + auto wpid_p1 = cluster.wire_plane_id(pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir1[j][k]))); + geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_dir1[j][k])); + auto wpid_p2 = cluster.wire_plane_id(pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir1[j][k]))); + + double dis = sqrt(pow(p1.x() - p2.x(), 2) + + pow(p1.y() - p2.y(), 2) + + pow(p1.z() - p2.z(), 2)); + double step_dis = 1.0 * units::cm; + int num_steps = dis/step_dis + 1; + int num_bad = 0; + int num_bad1 = 0; + + // Check intermediate points along path + for (int ii = 0; ii != num_steps; ii++) { + geo_point_t test_p( + p1.x() + (p2.x() - p1.x())/num_steps*(ii + 1), + p1.y() + (p2.y() - p1.y())/num_steps*(ii + 1), + p1.z() + (p2.z() - p1.z())/num_steps*(ii + 1) + ); + + if (use_ctpc) { + auto test_wpid = get_wireplaneid(test_p, wpid_p1, wpid_p2, dv); + if (test_wpid.apa()!=-1){ + geo_point_t test_p_raw = test_p; + if (cluster.get_default_scope().hash() != cluster.get_raw_scope().hash()){ + const auto transform = pcts->pc_transform(cluster.get_scope_transform()); + double cluster_t0 = cluster.get_cluster_t0(); + test_p_raw = transform->backward(test_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + } + const bool good_point = grouping->is_good_point(test_p_raw, test_wpid.apa(), test_wpid.face()); + if (!good_point) { + num_bad++; + } + if (!grouping->is_good_point(test_p_raw, test_wpid.apa(), test_wpid.face(), 0.6*units::cm, 1, 0)) { + num_bad1++; + } + } + } + } + + auto test_wpid = get_wireplaneid(p1, wpid_p1, p2, wpid_p2, dv); + + // Calculate angles + geo_vector_t tempV1(0, p2.y() - p1.y(), p2.z() - p1.z()); + geo_vector_t tempV5; + + double angle1 = tempV1.angle(wpid_U_dir.at(test_wpid)); + tempV5.set(fabs(p2.x() - p1.x()), + sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2))*sin(angle1), + 0); + angle1 = tempV5.angle(drift_dir_abs); + + double angle2 = tempV1.angle(wpid_V_dir.at(test_wpid)); + tempV5.set(fabs(p2.x() - p1.x()), + sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2))*sin(angle2), + 0); + angle2 = tempV5.angle(drift_dir_abs); + + tempV5.set(p2.x() - p1.x(), p2.y() - p1.y(), p2.z() - p1.z()); + double angle3 = tempV5.angle(drift_dir_abs); + + double angle1p = tempV1.angle(wpid_W_dir.at(test_wpid)); + tempV5.set(fabs(p2.x() - p1.x()), + sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2))*sin(angle1p), + 0); + angle1p = tempV5.angle(drift_dir_abs); + + const double pi = 3.141592653589793; + if (fabs(angle3 - pi/2) < 10.0/180.0*pi || + angle1 < 12.5/180.0*pi || + angle2 < 12.5/180.0*pi || + angle1p < 7.5/180.0*pi) { + // Parallel or prolonged case + if (num_bad > 7 || (num_bad > 2 && num_bad >= 0.75*num_steps)) { + index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + else { + if (num_bad1 > 7 || (num_bad1 > 2 && num_bad1 >= 0.75*num_steps)) { + index_index_dis_dir1[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + } + + //Now check path again ... + // Now check the path... + if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { + geo_point_t p1 = pt_clouds.at(j)->point(std::get<0>(index_index_dis_dir2[j][k])); + auto wpid_p1 = cluster.wire_plane_id(pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir2[j][k]))); + geo_point_t p2 = pt_clouds.at(k)->point(std::get<1>(index_index_dis_dir2[j][k])); + auto wpid_p2 = cluster.wire_plane_id(pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir2[j][k]))); + + double dis = sqrt(pow(p1.x() - p2.x(), 2) + + pow(p1.y() - p2.y(), 2) + + pow(p1.z() - p2.z(), 2)); + double step_dis = 1.0 * units::cm; + int num_steps = dis/step_dis + 1; + int num_bad = 0; + int num_bad1 = 0; + + // Check points along path + for (int ii = 0; ii != num_steps; ii++) { + geo_point_t test_p( + p1.x() + (p2.x() - p1.x())/num_steps*(ii + 1), + p1.y() + (p2.y() - p1.y())/num_steps*(ii + 1), + p1.z() + (p2.z() - p1.z())/num_steps*(ii + 1) + ); + + if (use_ctpc) { + auto test_wpid = get_wireplaneid(test_p, wpid_p1, wpid_p2, dv); + if (test_wpid.apa()!=-1){ + geo_point_t test_p_raw = test_p; + if (cluster.get_default_scope().hash() != cluster.get_raw_scope().hash()){ + const auto transform = pcts->pc_transform(cluster.get_scope_transform()); + double cluster_t0 = cluster.get_cluster_t0(); + test_p_raw = transform->backward(test_p, cluster_t0, test_wpid.face(), test_wpid.apa()); + } + const bool good_point = grouping->is_good_point(test_p_raw, test_wpid.apa(), test_wpid.face()); + if (!good_point) { + num_bad++; + } + if (!grouping->is_good_point(test_p_raw, test_wpid.apa(), test_wpid.face(), 0.6*units::cm, 1, 0)) { + num_bad1++; + } + } + } + } + + auto test_wpid = get_wireplaneid(p1, wpid_p1, p2, wpid_p2, dv); + + // Calculate angles between directions + geo_vector_t tempV1(0, p2.y() - p1.y(), p2.z() - p1.z()); + geo_vector_t tempV5; + + double angle1 = tempV1.angle(wpid_U_dir.at(test_wpid)); + tempV5.set(fabs(p2.x() - p1.x()), + sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2))*sin(angle1), + 0); + angle1 = tempV5.angle(drift_dir_abs); + + double angle2 = tempV1.angle(wpid_V_dir.at(test_wpid)); + tempV5.set(fabs(p2.x() - p1.x()), + sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2))*sin(angle2), + 0); + angle2 = tempV5.angle(drift_dir_abs); + + tempV5.set(p2.x() - p1.x(), p2.y() - p1.y(), p2.z() - p1.z()); + double angle3 = tempV5.angle(drift_dir_abs); + + double angle1p = tempV1.angle(wpid_W_dir.at(test_wpid)); + tempV5.set(fabs(p2.x() - p1.x()), + sqrt(pow(p2.y() - p1.y(), 2) + pow(p2.z() - p1.z(), 2))*sin(angle1p), + 0); + angle1p = tempV5.angle(drift_dir_abs); + + const double pi = 3.141592653589793; + bool is_parallel = fabs(angle3 - pi/2) < 10.0/180.0*pi || + angle1 < 12.5/180.0*pi || + angle2 < 12.5/180.0*pi || + angle1p < 7.5/180.0*pi; + + if (is_parallel) { + // Parallel or prolonged case + if (num_bad > 7 || (num_bad > 2 && num_bad >= 0.75*num_steps)) { + index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + else { + if (num_bad1 > 7 || (num_bad1 > 2 && num_bad1 >= 0.75*num_steps)) { + index_index_dis_dir2[j][k] = std::make_tuple(-1, -1, 1e9); + } + } + } + } + } + + // deal with MST of first type + { + Weighted::Graph temp_graph(num); + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + int index1 = j; + int index2 = k; + if (std::get<0>(index_index_dis[j][k]) >= 0) { + if (!boost::edge(index1, index2, temp_graph).second) { + add_edge(index1, index2, std::get<2>(index_index_dis[j][k]), temp_graph); + } + } + } + } + + // Process MST + process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_mst); + } + + // MST of the direction ... + { + Weighted::Graph temp_graph(num); + + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + int index1 = j; + int index2 = k; + if (std::get<0>(index_index_dis_dir1[j][k]) >= 0 || std::get<0>(index_index_dis_dir2[j][k]) >= 0) { + if (!boost::edge(index1, index2, temp_graph).second) { + // Add edge with minimum distance from both directions + add_edge( + index1, index2, + std::min(std::get<2>(index_index_dis_dir1[j][k]), std::get<2>(index_index_dis_dir2[j][k])), + temp_graph); + } + } + } + } + + process_mst_deterministically(temp_graph, index_index_dis, index_index_dis_dir_mst); + + } + + for (size_t j = 0; j != num; j++) { + for (size_t k = j + 1; k != num; k++) { + if (std::get<2>(index_index_dis[j][k]) < 3 * units::cm) { + index_index_dis_mst[j][k] = index_index_dis[j][k]; + } + + // establish the path ... + if (std::get<0>(index_index_dis_mst[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_mst[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_mst[j][k])); + float dis; + if (std::get<2>(index_index_dis_mst[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_mst[j][k]); + } + else { + dis = std::get<2>(index_index_dis_mst[j][k]); + } + if (!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + + if (std::get<0>(index_index_dis_dir_mst[j][k]) >= 0) { + if (std::get<0>(index_index_dis_dir1[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir1[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir1[j][k])); + float dis; + if (std::get<2>(index_index_dis_dir1[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_dir1[j][k]) * 1.1; + } + else { + dis = std::get<2>(index_index_dis_dir1[j][k]); + } + if(!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + if (std::get<0>(index_index_dis_dir2[j][k]) >= 0) { + const int gind1 = pt_clouds_global_indices.at(j).at(std::get<0>(index_index_dis_dir2[j][k])); + const int gind2 = pt_clouds_global_indices.at(k).at(std::get<1>(index_index_dis_dir2[j][k])); + float dis; + if (std::get<2>(index_index_dis_dir2[j][k]) > 5 * units::cm) { + dis = std::get<2>(index_index_dis_dir2[j][k]) * 1.1; + } + else { + dis = std::get<2>(index_index_dis_dir2[j][k]); + } + // } + if (!boost::edge(gind1, gind2, graph).second) { + /*auto edge =*/ add_edge(gind1, gind2, dis, graph); + } + } + } + + } // k + } // j + +} + diff --git a/clus/src/connect_graphs.h b/clus/src/connect_graphs.h new file mode 100644 index 000000000..e74a3faf6 --- /dev/null +++ b/clus/src/connect_graphs.h @@ -0,0 +1,50 @@ +#ifndef WIRECELLCLUS_PRIVATE_CONNECT_GRAPHS +#define WIRECELLCLUS_PRIVATE_CONNECT_GRAPHS + +#include "WireCellClus/Graphs.h" +#include "WireCellIface/IDetectorVolumes.h" +#include "WireCellClus/IPCTransform.h" +#include "WireCellClus/Facade_Cluster.h" + +namespace WireCell::Clus::Graphs { + + + // See make_graphs.h for bundling of construction and connecting. + + void connect_graph(const Facade::Cluster& cluster, + Weighted::Graph& graph); + + void connect_graph_with_reference( + const Facade::Cluster& cluster, + const Facade::Cluster& ref_cluster, + Weighted::Graph& graph); + + void connect_graph_ctpc(const Facade::Cluster& cluster, + IDetectorVolumes::pointer dv, + Clus::IPCTransformSet::pointer pcts, + Weighted::Graph& graph); + + void connect_graph_ctpc_with_reference( + const Facade::Cluster& cluster, + const Facade::Cluster& ref_cluster, + IDetectorVolumes::pointer dv, + Clus::IPCTransformSet::pointer pcts, + Weighted::Graph& graph); + + void connect_graph_closely(const Facade::Cluster& cluster, + Weighted::Graph& graph, int num_neighbors = 5); + + void connect_graph_closely_pid(const Facade::Cluster& cluster, + Weighted::Graph& graph); + + // ne' overclustering protection + void connect_graph_relaxed( + const Facade::Cluster& cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts, + Weighted::Graph& graph); + + bool is_point_good(const Facade::Cluster& cluster, size_t point_index, int ncut = 3); +} + +#endif diff --git a/clus/src/improvecluster_1.cxx b/clus/src/improvecluster_1.cxx new file mode 100644 index 000000000..8cc345f66 --- /dev/null +++ b/clus/src/improvecluster_1.cxx @@ -0,0 +1,930 @@ + +#include "improvecluster_1.h" + +WIRECELL_FACTORY(ImproveCluster_1, WireCell::Clus::ImproveCluster_1, + WireCell::IConfigurable, WireCell::IPCTreeMutate) + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; +using namespace WireCell::PointCloud::Tree; + +// Segregate this weird choice for namespace. +namespace WCF = WireCell::Clus::Facade; + +// Nick name for less typing. +namespace WRG = WireCell::RayGrid; + +namespace WireCell::Clus { + + ImproveCluster_1::ImproveCluster_1() + { + } + + ImproveCluster_1::~ImproveCluster_1() + { + } + + void ImproveCluster_1::configure(const WireCell::Configuration& cfg) + { + // Configure base class first + RetileCluster::configure(cfg); + + NeedDV::configure(cfg); + NeedPCTS::configure(cfg); + + if (cfg.isMember("samplers") && cfg["samplers"].isArray()) { + // Process array of samplers + for (const auto& sampler_cfg : cfg["samplers"]) { + int apa = sampler_cfg["apa"].asInt(); + int face = sampler_cfg["face"].asInt(); + std::string sampler_name = sampler_cfg["name"].asString(); + + if (sampler_name.empty()) { + raise("RetileCluster requires an IBlobSampler name for APA %d face %d", apa, face); + } + // std::cout << "Test: " << apa << " " << face << " " << sampler_name << std::endl; + auto sampler_ptr = Factory::find_tn(sampler_name); + m_samplers[apa][face] = sampler_ptr; + } + } + + std::vector anodes_tn; + for (const auto& aname : cfg["anodes"]) { + auto anode = Factory::find_tn(aname.asString()); + anodes_tn.push_back(anode); + for (const auto& face1 : anode->faces()) { + int apa = anode->ident(); + int face = face1->which(); + m_face[apa][face] = face1; + const auto& coords = face1->raygrid(); + if (coords.nlayers() != 5) { + raise("unexpected number of ray grid layers: %d", coords.nlayers()); + } + // std::cout <<"Test: " << apa << " " << face << " " << coords.nlayers() << std::endl; + // Get wire info for each plane + m_plane_infos[apa][face].clear(); + m_plane_infos[apa][face].push_back(Aux::get_wire_plane_info(face1, kUlayer)); + m_plane_infos[apa][face].push_back(Aux::get_wire_plane_info(face1, kVlayer)); + m_plane_infos[apa][face].push_back(Aux::get_wire_plane_info(face1, kWlayer)); + + } + } + } + + Configuration ImproveCluster_1::default_configuration() const + { + Configuration cfg = RetileCluster::default_configuration(); + + + + return cfg; + } + + + std::unique_ptr ImproveCluster_1::mutate(node_t& node) const + { + // get the original cluster + auto* orig_cluster = reinitialize(node); + + + // std::cout << m_grouping->get_name() << " " << m_wpid_angles.size() << std::endl; + + auto wpids = orig_cluster->wpids_blob(); + std::set wpid_set(wpids.begin(), wpids.end()); + + // // Needed in hack_activity() but call it here to avoid call overhead. + // // find the highest and lowest points + // auto pair_points = orig_cluster->get_two_boundary_wcps(); + // auto first_index = orig_cluster->get_closest_point_index(pair_points.first); + // auto second_index = orig_cluster->get_closest_point_index(pair_points.second); + // std::vector path_wcps = orig_cluster->graph_algorithms("basic_pid").shortest_path(first_index, second_index); + + + // make a new node from the existing grouping + auto& new_cluster = m_grouping->make_child(); // make a new cluster inside the existing grouping ... + + + // std::cout << "Xin3: " << path_wcps.size() << " " << pair_points.first.x() << " " + // << pair_points.first.y() << " " + // << pair_points.first.z() << " | " + // << pair_points.second.x() << " " + // << pair_points.second.y() << " " + // << pair_points.second.z() << std::endl; + + + for (auto it = wpid_set.begin(); it != wpid_set.end(); ++it) { + int apa = it->apa(); + int face = it->face(); + const auto& angles = m_wpid_angles.at(*it); + + std::map, std::vector > map_slices_measures; + + // std::map, std::vector > map_slices_measures_orig; + // get_activity(*orig_cluster, map_slices_measures_orig, apa, face); + + get_activity_improved(*orig_cluster, map_slices_measures, apa, face); + + // Step 2. + // hack_activity_improved(*orig_cluster, map_slices_measures, path_wcps, apa, face); // may need more args + + // test ... + // std::cout << "Test: Improved: " << map_slices_measures.size() << " " << orig_cluster->children().size() << std::endl; + // for (const auto& [slice_key, measures] : map_slices_measures) { + // std::cout << "Slice: [" << slice_key.first << ", " << slice_key.second << ") "; + // for (size_t i = 2; i < 5; ++i) { + // bool in_range = false; + // int start_idx = -1; + // std::cout << "Layer " << i << " "; + // for (size_t idx = 0; idx < measures[i].size(); ++idx) { + // if (measures[i][idx] > 0.0) { + // if (!in_range) { + // start_idx = idx; + // in_range = true; + // } + // } else { + // if (in_range) { + // std::cout << "[" << start_idx << ", " << idx-1 << ") "; + // in_range = false; + // } + // } + // } + // if (in_range) { + // std::cout << "[" << start_idx << ", " << measures[i].size()-1 << ") "; + // } + // } + // std::cout << std::endl; + // } + + + + // Step 3. + auto iblobs = make_iblobs_improved(map_slices_measures, apa, face); + + if (m_verbose) std::cout << "ImproveCluster_1: " << orig_cluster->nchildren() << " " << iblobs.size() << " iblobs for apa " << apa << " face " << face << std::endl; + + auto niblobs = iblobs.size(); + + // start to sampling points + int npoints = 0; + for (size_t bind=0; bindget_tick().at(apa).at(face); + + auto pcs = Aux::sample_live(sampler, iblob, angles, tick, bind); + // DO NOT EXTEND FURTHER! see #426, #430 + + if (pcs["3d"].size()==0) continue; // no points ... + // Access 3D coordinates + auto pc3d = pcs["3d"]; // Get the 3D point cloud dataset + auto x_coords = pc3d.get("x")->elements(); // Get X coordinates + // auto y_coords = pc3d.get("y")->elements(); // Get Y coordinates + // auto z_coords = pc3d.get("z")->elements(); // Get Z coordinates + // auto ucharge_val = pc3d.get("ucharge_val")->elements(); // Get U charge + // auto vcharge_val = pc3d.get("vcharge_val")->elements(); // Get V charge + // auto wcharge_val = pc3d.get("wcharge_val")->elements(); // Get W charge + // auto ucharge_err = pc3d.get("ucharge_unc")->elements(); // Get U charge error + // auto vcharge_err = pc3d.get("vcharge_unc")->elements(); // Get V charge error + // auto wcharge_err = pc3d.get("wcharge_unc")->elements(); // Get W charge error + + // std::cout << "ImproveCluster_1 PCS: " << pcs.size() << " " + // << pcs["3d"].size() << " " + // << x_coords.size() << std::endl; + + npoints +=x_coords.size(); + if (pcs.empty()) { + SPDLOG_DEBUG("ImproveCluster_1: skipping blob {} with no points", iblob->ident()); + continue; + } + new_cluster.node()->insert(Tree::Points(std::move(pcs))); + + } + if (m_verbose) std::cout << "ImproveCluster_1: " << npoints << " points sampled for apa " << apa << " face " << face << " Blobs " << niblobs << std::endl; + + + // remove bad blobs ... + int tick_span = map_slices_measures.begin()->first.second - map_slices_measures.begin()->first.first; + auto blobs_to_remove = remove_bad_blobs(*orig_cluster, new_cluster, tick_span, apa, face); + for (const Blob* blob : blobs_to_remove) { + Blob& b = const_cast(*blob); + new_cluster.remove_child(b); + } + if (m_verbose) std::cout << "ImproveCluster_1: " << blobs_to_remove.size() << " blobs removed for apa " << apa << " face " << face << " " << new_cluster.children().size() << std::endl; + } + + + auto& default_scope = orig_cluster->get_default_scope(); + auto& raw_scope = orig_cluster->get_raw_scope(); + + if (m_verbose) std::cout << "ImproveCluster_1: Scope: " << default_scope.hash() << " " << raw_scope.hash() << std::endl; + if (default_scope.hash()!=raw_scope.hash()){ + auto correction_name = orig_cluster->get_scope_transform(default_scope); + // std::vector filter_results = c + new_cluster.add_corrected_points(m_pcts, correction_name); + // Set this as the default scope for viewing + new_cluster.from(*orig_cluster); // copy state from original cluster + // std::cout << "Test: Same:" << default_scope.hash() << " " << raw_scope.hash() << std::endl; + } + + + // auto retiled_node = new_cluster.node(); + + // std::cout << m_grouping->get_name() << " " << m_grouping->children().size() << std::endl; + + return m_grouping->remove_child(new_cluster); + } + + + + + + +void ImproveCluster_1::get_activity_improved(const Cluster& cluster, std::map,std::vector>& map_slices_measures, int apa, int face) const{ + + auto uvwt_min = cluster.get_uvwt_min(apa, face); + auto uvwt_max = cluster.get_uvwt_max(apa, face); + // Track the bounds for optimization + int min_time = std::get<3>(uvwt_min); + int max_time = std::get<3>(uvwt_max) + 1; + int min_uch = std::get<0>(uvwt_min), max_uch = std::get<0>(uvwt_max) + 1; + int min_vch = std::get<1>(uvwt_min), max_vch = std::get<1>(uvwt_max) + 1; + int min_wch = std::get<2>(uvwt_min), max_wch = std::get<2>(uvwt_max) + 1; + + // get grouping information + auto grouping = cluster.grouping(); + + // Note: In toolkit, grouping provides methods to get dead channels + // although this is cham actually wire index ... + auto dead_uchs_range = grouping->get_overlap_dead_chs(min_time, max_time, min_uch, max_uch, apa, face, 0, 0); + auto dead_vchs_range = grouping->get_overlap_dead_chs(min_time, max_time, min_vch, max_vch, apa, face, 1, 0); + auto dead_wchs_range = grouping->get_overlap_dead_chs(min_time, max_time, min_wch, max_wch, apa, face, 2, 0); + + // auto dead_uchs_all = grouping->get_all_dead_chs(apa, face, 0); + // std::cout << "dead_uchs_all: "; + // for (const auto& [ch, ranges ]: dead_uchs_all) { + // std::cout << ch << " " << ranges.first << " " << ranges.second << std::endl; + // } + // std::cout << std::endl; + + // std::cout << "dead_uch_ranges: " << std::endl; + // for (const auto& [start, end] : dead_uchs_range) { + // std::cout << "[" << start << ", " << end << ") " << std::endl; + // } + // std::cout << "dead_vch_ranges: " << std::endl; + // for (const auto& [start, end] : dead_vchs_range) { + // std::cout << "[" << start << ", " << end << ") " << std::endl; + // } + // std::cout << "dead_wch_ranges: " << std::endl; + // for (const auto& [start, end] : dead_wchs_range) { + // std::cout << "[" << start << ", " << end << ") " << std::endl; + // } + + + // althoguh ch, but wire index ... + std::map, std::pair> map_u_tcc = grouping->get_overlap_good_ch_charge(min_time, max_time, min_uch, max_uch, apa, face, 0); + std::map, std::pair> map_v_tcc = grouping->get_overlap_good_ch_charge(min_time, max_time, min_vch, max_vch, apa, face, 1); + std::map, std::pair> map_w_tcc = grouping->get_overlap_good_ch_charge(min_time, max_time, min_wch, max_wch, apa, face, 2); + + + // for (const auto& [time_ch, charge_info] : map_u_tcc) { + // std::cout << "U plane: time_slice=" << time_ch.first + // << ", ch=" << time_ch.second + // << ", charge=" << charge_info.first + // << ", error=" << charge_info.second << std::endl; + // } + + // //print out for debug ... + // std::cout << min_time << " " << max_time << " " + // << min_uch << " " << max_uch << " " + // << min_vch << " " << max_vch << " " + // << min_wch << " " << max_wch << " " << dead_uchs_range.size() << " " << dead_vchs_range.size() << " " << dead_wchs_range.size() << " " << map_u_tcc.size() << " " << map_v_tcc.size() << " " << map_w_tcc.size() << std::endl; + + + // Maps for tracking time slices and channels for each wire plane + std::map> u_time_chs; // U plane time-channel map + std::map> v_time_chs; // V plane time-channel map + std::map> w_time_chs; // W plane time-channel map + + int tick_span = 1; + + // Step 1: Fill maps according to existing blobs in cluster + auto children = cluster.children(); + for (auto child : children) { + auto blob = child->value().facade(); + if (!blob) continue; + + // Get the time slice bounds for this blob + int time_slice_min = blob->slice_index_min(); + int time_slice_max = blob->slice_index_max(); + tick_span = time_slice_max - time_slice_min; + + // Process each time slice in the blob + for (int time_slice = time_slice_min; time_slice < time_slice_max; time_slice = time_slice + tick_span) { + // Initialize channel sets if not present + if (u_time_chs.find(time_slice) == u_time_chs.end()) { + u_time_chs[time_slice] = std::set(); + v_time_chs[time_slice] = std::set(); + w_time_chs[time_slice] = std::set(); + } + // Process each wire plane (U=0, V=1, W=2) + for (int plane = 0; plane < 3; ++plane) { + // Get wire bounds for this plane in the blob + int wire_min = (plane == 0) ? blob->u_wire_index_min() : + (plane == 1) ? blob->v_wire_index_min() : blob->w_wire_index_min(); + int wire_max = (plane == 0) ? blob->u_wire_index_max() : + (plane == 1) ? blob->v_wire_index_max() : blob->w_wire_index_max(); + + // Process each wire in the range + for (int wire_ch = wire_min; wire_ch < wire_max; ++wire_ch) { + + // Store in appropriate plane map + if (plane == 0) { // U plane + u_time_chs[time_slice].insert(wire_ch); + } else if (plane == 1) { // V plane + v_time_chs[time_slice].insert(wire_ch); + } else { // W plane + w_time_chs[time_slice].insert(wire_ch); + } + } + } + } + } + + + // std::cout << u_time_chs.size() << " " << v_time_chs.size() << " " << w_time_chs.size() << " " << u_time_chs.begin()->second.size() << " " << v_time_chs.begin()->second.size() << " " << w_time_chs.begin()->second.size() << std::endl; + + // Distance cut for dead channel inclusion (20 cm as in original code) + const double dis_cut = 20 * units::cm; + + // Step 2: Handle dead channels from CTPC (using grouping interface) + for (const auto& [start, end]: dead_uchs_range) { + for (int ch = start; ch < end; ++ch) { + for (int time_slice = min_time; time_slice < max_time; time_slice+=tick_span) { + auto [x_pos, y_pos] = grouping->convert_time_ch_2Dpoint(time_slice, ch, apa, face, 0); + std::vector query_point = {static_cast(x_pos), static_cast(y_pos)}; + const auto& skd = cluster.kd2d(apa, face, 0); + auto ret_matches = skd.knn(1, query_point); + // std::cout << ret_matches[0].first << " " << sqrt(ret_matches[0].second) / units::cm << " " << dis_cut / units::cm << std::endl; + if (sqrt(ret_matches[0].second) < dis_cut) u_time_chs[time_slice].insert(ch); + } + } + } + for (const auto& [start, end]: dead_vchs_range) { + for (int ch = start; ch < end; ++ch) { + for (int time_slice = min_time; time_slice < max_time; time_slice+=tick_span) { + auto [x_pos, y_pos] = grouping->convert_time_ch_2Dpoint(time_slice, ch, apa, face, 1); + std::vector query_point = {static_cast(x_pos), static_cast(y_pos)}; + const auto& skd = cluster.kd2d(apa, face, 1); + auto ret_matches = skd.knn(1, query_point); + if (sqrt(ret_matches[0].second) < dis_cut) v_time_chs[time_slice].insert(ch); + } + } + } + for (const auto& [start, end]: dead_wchs_range) { + for (int ch = start; ch < end; ++ch) { + for (int time_slice = min_time; time_slice < max_time; time_slice+=tick_span) { + auto [x_pos, y_pos] = grouping->convert_time_ch_2Dpoint(time_slice, ch, apa, face, 2); + std::vector query_point = {static_cast(x_pos), static_cast(y_pos)}; + const auto& skd = cluster.kd2d(apa, face, 2); + auto ret_matches = skd.knn(1, query_point); + if (sqrt(ret_matches[0].second) < dis_cut) w_time_chs[time_slice].insert(ch); + } + } + } + + + + // Step 3: Deal with good channels from CTPC + std::map, double> time_ch_charge_map; + + // Process U plane good channels + for (const auto& [time_ch, charge_info] : map_u_tcc) { + int time_slice = time_ch.first; + int ch = time_ch.second; + auto [x_pos, y_pos] = grouping->convert_time_ch_2Dpoint(time_slice, ch, apa, face, 0); + std::vector query_point = {static_cast(x_pos), static_cast(y_pos)}; + const auto& skd = cluster.kd2d(apa, face, 0); + auto ret_matches = skd.knn(1, query_point); + double temp_min_dis = sqrt(ret_matches[0].second); + if (temp_min_dis > dis_cut) continue; + u_time_chs[time_slice].insert(ch); + } + // Process V plane good channels + for (const auto& [time_ch, charge_info] : map_v_tcc) { + int time_slice = time_ch.first; + int ch = time_ch.second; + auto [x_pos, y_pos] = grouping->convert_time_ch_2Dpoint(time_slice, ch, apa, face, 1); + std::vector query_point = {static_cast(x_pos), static_cast(y_pos)}; + const auto& skd = cluster.kd2d(apa, face, 1); + auto ret_matches = skd.knn(1, query_point); + double temp_min_dis = sqrt(ret_matches[0].second); + if (temp_min_dis > dis_cut) continue; + v_time_chs[time_slice].insert(ch); + } + // Process W plane good channels + for (const auto& [time_ch, charge_info] : map_w_tcc) { + int time_slice = time_ch.first; + int ch = time_ch.second; + auto [x_pos, y_pos] = grouping->convert_time_ch_2Dpoint(time_slice, ch, apa, face, 2); + std::vector query_point = {static_cast(x_pos), static_cast(y_pos)}; + const auto& skd = cluster.kd2d(apa, face, 2); + auto ret_matches = skd.knn(1, query_point); + double temp_min_dis = sqrt(ret_matches[0].second); + if (temp_min_dis > dis_cut) continue; + w_time_chs[time_slice].insert(ch); + } + + // Step 4: Convert to toolkit activity format (RayGrid measures) + const int nlayers = 2+3; + for (const auto& [time_slice, ch_set] : u_time_chs) { + auto slice_key = std::make_pair(time_slice, time_slice + tick_span); + + auto& measures = map_slices_measures[slice_key]; + if (measures.size()==0){ + measures.resize(nlayers); + // what to do the first two views??? + measures[0].push_back(1); + measures[1].push_back(1); + measures[2].resize(m_plane_infos.at(apa).at(face)[0].total_wires, 0); + measures[3].resize(m_plane_infos.at(apa).at(face)[1].total_wires, 0); + measures[4].resize(m_plane_infos.at(apa).at(face)[2].total_wires, 0); + + // std::cout << "Test2: " << measures[2].size() << " " << measures[3].size() << " " << measures[4].size() << std::endl; + } + + WRG::measure_t& m = measures[2+0]; // U plane is layer 2 + for (int ch : ch_set) { + double charge = 1e-3; // Default charge value + auto it = map_u_tcc.find(std::make_pair(time_slice, ch)); + if (it != map_u_tcc.end()) { + charge = it->second.first; // Use the charge from the map + } + m[ch] = charge; + } + } + for (const auto& [time_slice, ch_set] : v_time_chs) { + auto slice_key = std::make_pair(time_slice, time_slice + tick_span); + + auto& measures = map_slices_measures[slice_key]; + if (measures.size()==0){ + measures.resize(nlayers); + // what to do the first two views??? + measures[0].push_back(1); + measures[1].push_back(1); + measures[2].resize(m_plane_infos.at(apa).at(face)[0].total_wires, 0); + measures[3].resize(m_plane_infos.at(apa).at(face)[1].total_wires, 0); + measures[4].resize(m_plane_infos.at(apa).at(face)[2].total_wires, 0); + + // std::cout << "Test3: " << measures[2].size() << " " << measures[3].size() << " " << measures[4].size() << std::endl; + } + + WRG::measure_t& m = measures[2+1]; // V plane is layer 3 + for (int ch : ch_set) { + double charge = 1e-3; // Default charge value + auto it = map_v_tcc.find(std::make_pair(time_slice, ch)); + if (it != map_v_tcc.end()) { + charge = it->second.first; // Use the charge from the map + } + m[ch] = charge; + } + } + for (const auto& [time_slice, ch_set] : w_time_chs) { + auto slice_key = std::make_pair(time_slice, time_slice + tick_span); + auto& measures = map_slices_measures[slice_key]; + if (measures.size()==0){ + measures.resize(nlayers); + // what to do the first two views??? + measures[0].push_back(1); + measures[1].push_back(1); + measures[2].resize(m_plane_infos.at(apa).at(face)[0].total_wires, 0); + measures[3].resize(m_plane_infos.at(apa).at(face)[1].total_wires, 0); + measures[4].resize(m_plane_infos.at(apa).at(face)[2].total_wires, 0); + + // std::cout << "Test4: " << measures[2].size() << " " << measures[3].size() << " " << measures[4].size() << std::endl; + } + WRG::measure_t& m = measures[2+2]; // W plane is layer 4 + for (int ch : ch_set) { + double charge = 1e-3; // Default charge value + auto it = map_w_tcc.find(std::make_pair(time_slice, ch)); + if (it != map_w_tcc.end()) { + charge = it->second.first; // Use the charge from the map + } + m[ch] = charge; + } + } + + +} + + +// Step 2. Modify activity to suit. +void ImproveCluster_1::hack_activity_improved(const Cluster& cluster, std::map, std::vector >& map_slices_measures, const std::vector& path_wcps, int apa, int face) const +{ + + const double low_dis_limit = 0.3 * units::cm; + // Get path points + // auto path_wcps = cluster.get_path_wcps(); + std::vector> path_pts; + + // Convert list points to vector with interpolation + for (const auto& wcp : path_wcps) { + geo_point_t p= cluster.point3d_raw(wcp); // index ... // raw data points ... + auto wpid_p = cluster.wire_plane_id(wcp); // wpid ... + // std::cerr << "retile: path:" << wcp << " p:" << p << " wpid:" << wpid_p << "\n"; + if (path_pts.empty()) { + path_pts.push_back(std::make_pair(p, wpid_p)); + } else { + double dis = (p - path_pts.back().first).magnitude(); + if (dis < low_dis_limit) { + path_pts.push_back(std::make_pair(p, wpid_p)); + } else { + int ncount = int(dis/low_dis_limit) + 1; + auto p2 = path_pts.back().first; + auto wpid2 = path_pts.back().second; + for (int i=0; i < ncount; i++) { + Point p1 = p2 + (p - p2) * (i+1)/ncount; + auto wpid_p1 = get_wireplaneid(p1, wpid_p, wpid2, m_dv); + path_pts.push_back(std::make_pair(p1, wpid_p1)); + } + } + } + } + + + std::vector> wire_limits; + for (int i=0; i!=3; i++){ + wire_limits.push_back(std::make_pair(m_plane_infos.at(apa).at(face)[i].start_index, m_plane_infos.at(apa).at(face)[i].end_index)); + // std::cout << "Test: " << apa << " " << face << " " << wire_limits[i].first << " " << wire_limits[i].second << std::endl; + } + + // this is to get the end of the time tick range = start_tick + tick_span + const int tick_span = map_slices_measures.begin()->first.second - map_slices_measures.begin()->first.first; + + // std::cout << "Test: " << apa << " " << face << " " << tick_span << std::endl; + + // Flag points that have sufficient activity around them + std::vector path_pts_flag(path_pts.size(), false); + for (size_t i = 0; i < path_pts.size(); i++) { + if (path_pts[i].second.apa() != apa || path_pts[i].second.face() != face) continue; + auto [time_tick_u, u_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i].first, apa, m_face.at(apa).at(face)->which(), 0); + auto [time_tick_v, v_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i].first, apa, m_face.at(apa).at(face)->which(), 1); + auto [time_tick_w, w_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i].first, apa, m_face.at(apa).at(face)->which(), 2); + //std::cout << time_tick_u << " " << u_wire << " " << v_wire << " " << w_wire << std::endl; + + int aligned_tick = std::round(time_tick_u *1.0/ tick_span) * tick_span; + std::pair tick_range = std::make_pair(aligned_tick, aligned_tick + tick_span); + + // Check for activity in neighboring wires/time + // For each plane (U,V,W), count activity in current and adjacent wires + std::vector wire_hits = {0,0,0}; // counts for U,V,W planes + std::vector wires = {u_wire, v_wire, w_wire}; + + for (size_t plane = 0; plane < 3; plane++) { + // Check activity in current and adjacent wires + for (int delta : {-1, 0, 1}) { + int wire = wires[plane] + delta; + if (wire < wire_limits[plane].first || wire > wire_limits[plane].second) + continue; + + int layer = plane + 2; + if (map_slices_measures.find(tick_range) != map_slices_measures.end()) { + if (map_slices_measures[tick_range][layer][wire] > 0) { + wire_hits[plane] += (delta == 0) ? 1 : (delta == -1) ? 2 : 1; + } + } + } + } + + // Set flag if sufficient activity found + if (wire_hits[0] >=2 && wire_hits[1] >=2 && wire_hits[2] >=2) { + path_pts_flag[i] = true; + } + + // std::cout << i << " " << path_pts[i].first << " " << path_pts_flag[i] << std::endl; + + //std::cout << path_pts[i] << " " << wire_hits[0] << " " << wire_hits[1] << " " << wire_hits[2] << " " << path_pts_flag[i] << " " << aligned_tick/tick_span << " " << u_wire << " " << v_wire << " " << w_wire << " " << time_tick_u << " " << std::round(time_tick_u / tick_span) << std::endl; + // std::cout << wire_hits[0] << " " << wire_hits[1] << " " << wire_hits[2] << " " << path_pts_flag[i] << std::endl; + } + + // Add missing activity based on path points + for (size_t i = 0; i < path_pts.size(); i++) { + if (path_pts[i].second.apa() != apa || path_pts[i].second.face() != face) continue; + + // Skip if point is well-covered by existing activity + if (i == 0) { + if (path_pts_flag[i] && path_pts_flag[i+1]) continue; + } else if (i+1 == path_pts.size()) { + if (path_pts_flag[i] && path_pts_flag[i-1]) continue; + } else { + if (path_pts_flag[i-1] && path_pts_flag[i] && path_pts_flag[i+1]) continue; + } + + auto [time_tick_u, u_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i].first, apa, m_face.at(apa).at(face)->which(), 0); + auto [time_tick_v, v_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i].first, apa, m_face.at(apa).at(face)->which(), 1); + auto [time_tick_w, w_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i].first, apa, m_face.at(apa).at(face)->which(), 2); + + int aligned_tick = std::round(time_tick_u *1.0/ tick_span) * tick_span; + + // Add activity around this point + for (int dt = -3; dt <= 3; dt++) { + int time_slice = aligned_tick + dt * tick_span; + if (time_slice < 0) continue; + + // Find or create time slice in measures map + auto slice_key = std::make_pair(time_slice, time_slice+tick_span); + if (map_slices_measures.find(slice_key) == map_slices_measures.end()) { + auto& measures = map_slices_measures[slice_key]; + measures = std::vector(5); // 2+3 layers + measures[0].push_back(1); // First layer measurement + measures[1].push_back(1); // Second layer measurement + measures[2].resize(m_plane_infos.at(apa).at(face)[0].total_wires, 0); + measures[3].resize(m_plane_infos.at(apa).at(face)[1].total_wires, 0); + measures[4].resize(m_plane_infos.at(apa).at(face)[2].total_wires, 0); + } + + // Add activity for each plane + std::vector wires = {u_wire, v_wire, w_wire}; + for (size_t plane = 0; plane < 3; plane++) { + auto& measures = map_slices_measures[slice_key][plane+2]; // +2 to skip first two layers + + for (int dw = -3; dw <= 3; dw++) { + int wire = wires[plane] + dw; + if (wire < wire_limits[plane].first || wire > wire_limits[plane].second || + pow(dw,2) + pow(dt,2)>3*3) + continue; + if (measures.at(wire) > 0.0) continue; // Already has activity + measures.at(wire) = 1.0e-3; // Set activity + } + } + + } + } + + + // Loop through the map and remove slices with no activity in any plane view + auto it = map_slices_measures.begin(); + while (it != map_slices_measures.end()) { + bool missing_activity = false; + + // For each wire plane (U, V, W) + for (int pind = 0; pind < 3; pind++) { + const auto& measures = it->second[pind + 2]; // +2 to skip first two layers + + // Check if this plane has NO activity + if (std::none_of(measures.begin(), measures.end(), [](double val) { return val > 0.0; })) { + missing_activity = true; + break; + } + } + + // If any plane has no activity, remove this slice + if (missing_activity) { + it = map_slices_measures.erase(it); + } else { + ++it; + } + } + + +} + + +std::set +ImproveCluster_1::remove_bad_blobs(const Cluster& cluster, Cluster& shad_cluster, int tick_span, int apa, int face) const +{ + // Get time-organized maps of original and new blobs + const auto& orig_time_blob_map = cluster.time_blob_map().at(apa).at(face); + const auto& new_time_blob_map = shad_cluster.time_blob_map().at(apa).at(face); + + // Build index mappings for new blobs (similar to prototype's mcell indexing) + std::map map_index_blob; + std::map map_blob_index; + std::vector all_new_blobs; + + int index = 0; + for (const auto& [time_slice, new_blobs] : new_time_blob_map) { + for (const Blob* blob : new_blobs) { + map_index_blob[index] = blob; + map_blob_index[blob] = index; + all_new_blobs.push_back(blob); + index++; + } + } + + // If no new blobs or only one blob, return empty set (no graph needed) + if (all_new_blobs.size() <= 1) { + return std::set(); + } + + // Create graph for new blobs - establish connectivity between adjacent time slices + const int N = all_new_blobs.size(); + boost::adjacency_list> + temp_graph(N); + + // Build graph edges between blobs in adjacent time slices that overlap spatially + for (const auto& [time_slice, current_blobs] : new_time_blob_map) { + // Connect to next time slice + auto next_it = new_time_blob_map.find(time_slice + tick_span); + if (next_it != new_time_blob_map.end()) { + for (const Blob* blob1 : current_blobs) { + for (const Blob* blob2 : next_it->second) { + int index1 = map_blob_index[blob1]; + int index2 = map_blob_index[blob2]; + + // Add edge if blobs overlap spatially (similar to prototype's Overlap_fast) + if (blob1->overlap_fast(*blob2, 1)) { + add_edge(index1, index2, 1.0, temp_graph); + } + } + } + } + } + + // Find connected components (groups of spatially/temporally connected blobs) + std::vector component(num_vertices(temp_graph)); + const int num_components = connected_components(temp_graph, &component[0]); + + std::set blobs_to_remove; + + // If we have multiple disconnected components, validate each component + if (num_components > 1) { + std::set good_components; + + // Examine each connected component to determine if it's "good" + for (int i = 0; i < static_cast(component.size()); ++i) { + int comp_id = component[i]; + + // Skip if we've already validated this component + if (good_components.find(comp_id) != good_components.end()) { + continue; + } + + const Blob* blob = map_index_blob[i]; + int time_slice = blob->slice_index_min(); // Get time slice for this blob + bool flag_good = false; + + // Check overlap with original blobs in previous time slice + if (!flag_good) { + auto prev_it = orig_time_blob_map.find(time_slice - tick_span); + if (prev_it != orig_time_blob_map.end()) { + for (const Blob* orig_blob : prev_it->second) { + if (blob->overlap_fast(*orig_blob, 1)) { + flag_good = true; + break; + } + } + } + } + + // Check overlap with original blobs in same time slice + if (!flag_good) { + auto same_it = orig_time_blob_map.find(time_slice); + if (same_it != orig_time_blob_map.end()) { + for (const Blob* orig_blob : same_it->second) { + if (blob->overlap_fast(*orig_blob, 1)) { + flag_good = true; + break; + } + } + } + } + + // Check overlap with original blobs in next time slice + if (!flag_good) { + auto next_it = orig_time_blob_map.find(time_slice + tick_span); + if (next_it != orig_time_blob_map.end()) { + for (const Blob* orig_blob : next_it->second) { + if (blob->overlap_fast(*orig_blob, 1)) { + flag_good = true; + break; + } + } + } + } + + // If this component representative blob has good overlap, mark entire component as good + if (flag_good) { + good_components.insert(comp_id); + } + } + + // Collect blobs from bad components for removal + for (int i = 0; i < static_cast(component.size()); ++i) { + int comp_id = component[i]; + if (good_components.find(comp_id) == good_components.end()) { + // This component is not good, mark its blobs for removal + const Blob* blob = map_index_blob[i]; + blobs_to_remove.insert(blob); + } + } + } + + return blobs_to_remove; +} + +std::vector ImproveCluster_1::make_iblobs_improved(std::map, std::vector >& map_slices_measures, int apa, int face) const +{ + std::vector ret; + + const auto& coords = m_face.at(apa).at(face)->raygrid(); + int blob_ident=0; + int slice_ident = 0; + + const double tick = m_grouping->get_tick().at(apa).at(face); + + + for (auto it = map_slices_measures.begin(); it != map_slices_measures.end(); it++){ + // Do the actual tiling. + WRG::activities_t activities = RayGrid::make_activities(m_face.at(apa).at(face)->raygrid(), it->second); + auto bshapes = WRG::make_blobs(coords, activities); + + + // { + // std::cerr << "abc: " + // << " s:"<(sframe, slice_ident++, it->first.first*tick, (it->first.second - it->first.first)*tick); + // Copy the prepared activity map into the slice + auto& slice_activity = sslice->activity(); + + for (int plane_idx = 0; plane_idx < 3; ++plane_idx) { + const int layer = plane_idx + 2; + const auto& plane_measures = it->second[layer]; + + // Get the wire plane for this face and plane + auto face_ptr = m_face.at(apa).at(face); + auto planes = face_ptr->planes(); + if (static_cast(plane_idx) >= planes.size()) continue; + + auto wire_plane = planes[plane_idx]; + const auto& channels = wire_plane->channels(); + + // Map wire indices to channels and populate activity + for (size_t wire_idx = 0; wire_idx < plane_measures.size(); ++wire_idx) { + if (plane_measures[wire_idx] > 0.0) { + // Find the channel corresponding to this wire index + if (wire_idx < channels.size()) { + auto ichan = channels[wire_idx]; + if (ichan) { + // Set activity with value and zero uncertainty + if (plane_measures[wire_idx]==1e-3){ + slice_activity[ichan] = ISlice::value_t(0.0, 1e12); + } else { + slice_activity[ichan] = ISlice::value_t(plane_measures[wire_idx], 0.0); + } + } + } + } + } + } + + + for (const auto& bshape : bshapes) { + + // { + // std::cerr << "blob: " + // << " s:"<(blob_ident++, blob_value, + blob_error, bshape, sslice, m_face.at(apa).at(face)); + ret.push_back(iblob); + + } + } + + // std::cout << "Test: Blobs: " << ret.size() << std::endl; + + return ret; +} + + +} // namespace WireCell::Clus + diff --git a/clus/src/improvecluster_1.h b/clus/src/improvecluster_1.h new file mode 100644 index 000000000..897cf961d --- /dev/null +++ b/clus/src/improvecluster_1.h @@ -0,0 +1,53 @@ +// ImproveCluster_1 - First level cluster improvement using Steiner tree methods +// +// This class inherits from RetileCluster and provides enhanced cluster +// improvement functionality by incorporating Steiner tree algorithms +// from the Wire-Cell Prototype. + +#ifndef WIRECELLCLUS_IMPROVE_CLUSTER_1_H +#define WIRECELLCLUS_IMPROVE_CLUSTER_1_H + +#include "retile_cluster.h" // Include the RetileCluster header + +#include "WireCellUtil/NamedFactory.h" + +#include + +namespace WireCell::Clus { + + using namespace WireCell; + using namespace WireCell::Clus; + using namespace WireCell::Clus::Facade; + using namespace WireCell::PointCloud::Tree; + + class ImproveCluster_1 : public RetileCluster { + + public: + + ImproveCluster_1(); + virtual ~ImproveCluster_1(); + + // IConfigurable API - extend the base configuration + void configure(const WireCell::Configuration& config) override; + virtual Configuration default_configuration() const override; + + // IPCTreeMutate API - override to add Steiner tree improvements + virtual std::unique_ptr mutate(node_t& node) const override; + + protected: + void get_activity_improved(const Cluster& cluster, std::map,std::vector>& map_slices_measures, int apa, int face) const; + + void hack_activity_improved(const Cluster& cluster, std::map, std::vector >& map_slices_measures, const std::vector& path_wcps, int apa, int face) const; + + std::vector make_iblobs_improved(std::map, std::vector >& map_slices_measures, int apa, int face) const; + + + std::set remove_bad_blobs(const Cluster& cluster, Cluster& shad_cluster, int tick_span, int apa, int face) const; + + private: + + + }; + +} +#endif // WIRECELLCLUS_IMPROVE_CLUSTER_1_H \ No newline at end of file diff --git a/clus/src/improvecluster_2.cxx b/clus/src/improvecluster_2.cxx new file mode 100644 index 000000000..b13d8c570 --- /dev/null +++ b/clus/src/improvecluster_2.cxx @@ -0,0 +1,249 @@ +// ImproveCluster_2 - Second level cluster improvement +// +// This class inherits from ImproveCluster_1 and provides additional +// cluster improvement functionality, building upon the Steiner tree +// enhancements from the first level. + +#include "improvecluster_1.h" // Include the ImproveCluster_1 header +#include "SteinerGrapher.h" +#include "WireCellUtil/NamedFactory.h" +#include "WireCellUtil/Logging.h" + +#include + +namespace WireCell::Clus { + + class ImproveCluster_2 : public ImproveCluster_1 { + + public: + + ImproveCluster_2(); + virtual ~ImproveCluster_2(); + + // IConfigurable API - extend the base configuration + void configure(const WireCell::Configuration& config) override; + virtual Configuration default_configuration() const override; + + // IPCTreeMutate API - override to add second level improvements + virtual std::unique_ptr mutate(node_t& node) const override; + + private: + + + + }; + +} // namespace WireCell::Clus + +WIRECELL_FACTORY(ImproveCluster_2, WireCell::Clus::ImproveCluster_2, + WireCell::IConfigurable, WireCell::IPCTreeMutate) + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; +using namespace WireCell::PointCloud::Tree; + +// Segregate this weird choice for namespace. +namespace WCF = WireCell::Clus::Facade; + +// Nick name for less typing. +namespace WRG = WireCell::RayGrid; +namespace WireCell::Clus { + + ImproveCluster_2::ImproveCluster_2() + { + } + + ImproveCluster_2::~ImproveCluster_2() + { + } + + void ImproveCluster_2::configure(const WireCell::Configuration& cfg) + { + // Configure base class first + ImproveCluster_1::configure(cfg); + + + } + + Configuration ImproveCluster_2::default_configuration() const + { + Configuration cfg = ImproveCluster_1::default_configuration(); + + + return cfg; + } + + std::unique_ptr ImproveCluster_2::mutate(node_t& node) const + { + // get the original cluster + auto* orig_cluster = reinitialize(node); + + // First: get the shortest path from the original cluster + // Create a SteinerGrapher instance with the cluster + // You'll need to provide appropriate configuration + Steiner::Grapher::Config grapher_config; + // Configure as needed - you may need to access member variables + // that provide detector volumes and point cloud transform sets + grapher_config.dv = m_dv; // From NeedDV mixin + grapher_config.pcts = m_pcts; // From NeedPCTS mixin + + // Create the Steiner::Grapher instance + auto log = Log::logger("improve_cluster_2 mutate"); + Steiner::Grapher orig_steiner_grapher(*orig_cluster, grapher_config, log); + auto& orig_graph = orig_steiner_grapher.get_graph("basic_pid"); // this is good for the original cluster + + if (m_verbose) std::cout << "ImproveCluster_2 " << " Orig Graph vertices: " << boost::num_vertices(orig_graph) << ", edges: " << boost::num_edges(orig_graph) << std::endl; + + // Establish same blob steiner edges + orig_steiner_grapher.establish_same_blob_steiner_edges("basic_pid", true); + std::vector orig_path_point_indices; + { + auto pair_points = orig_cluster->get_two_boundary_wcps(); + auto first_index = orig_cluster->get_closest_point_index(pair_points.first); + auto second_index = orig_cluster->get_closest_point_index(pair_points.second); + orig_path_point_indices = orig_cluster->graph_algorithms("basic_pid").shortest_path(first_index, second_index); + } + if (m_verbose) std::cout << "ImproveCluster_2 " << " Origi Shortest path indices: " << orig_path_point_indices.size() << " ; Graph vertices: " << boost::num_vertices(orig_graph) << ", edges: " << boost::num_edges(orig_graph)<< std::endl; + + orig_steiner_grapher.remove_same_blob_steiner_edges("basic_pid"); + if (m_verbose) std::cout << "ImproveCluster_2 " << " Orig Graph vertices: " << boost::num_vertices(orig_graph) << ", edges: " << boost::num_edges(orig_graph) << std::endl; + + + + // Second, make a temp_cluster based on the original cluster via ImproveCluster_1 + if (m_verbose) std::cout << "ImproveCluster_2: Grouping" << m_grouping->get_name() << " " << m_grouping->children().size() << std::endl; + + auto temp_node = ImproveCluster_1::mutate(node); + auto temp_cluster_1 = temp_node->value.facade(); + auto& temp_cluster = m_grouping->make_child(); + temp_cluster.take_children(*temp_cluster_1); // Move all blobs from improved cluster + temp_cluster.from(*orig_cluster); + if (m_verbose) std::cout << "ImproveCluster_2: Grouping" << m_grouping->get_name() << " " << m_grouping->children().size() << std::endl; + + Steiner::Grapher temp_steiner_grapher(temp_cluster, grapher_config, log); + + // this requires CTPC and ref_point cloud of original cluster + auto& temp_graph = temp_cluster.find_graph("ctpc_ref_pid", *orig_cluster, m_dv, m_pcts); + //temp_steiner_grapher.get_graph("basic_pid"); + + + if (m_verbose) std::cout << "ImproveCluster_2 " << " Temp Graph vertices: " << boost::num_vertices(temp_graph) << ", edges: " << boost::num_edges(temp_graph) << std::endl; + temp_steiner_grapher.establish_same_blob_steiner_edges("ctpc_ref_pid", false); + std::vector temp_path_point_indices; + { + auto pair_points = temp_cluster.get_two_boundary_wcps(); + auto first_index = temp_cluster.get_closest_point_index(pair_points.first); + auto second_index = temp_cluster.get_closest_point_index(pair_points.second); + temp_path_point_indices = temp_cluster.graph_algorithms("ctpc_ref_pid").shortest_path(first_index, second_index); + } + if (m_verbose) std::cout << "ImproveCluster_2 " << " Temp Shortest path indices: " << temp_path_point_indices.size() << " ; Graph vertices: " << boost::num_vertices(temp_graph) << ", edges: " << boost::num_edges(temp_graph)<< std::endl; + temp_steiner_grapher.remove_same_blob_steiner_edges("ctpc_ref_pid"); + if (m_verbose) std::cout << "ImproveCluster_2 " << " Temp Graph vertices: " << boost::num_vertices(temp_graph) << ", edges: " << boost::num_edges(temp_graph) << std::endl; + + + // star to construct a new cluster + auto wpids = orig_cluster->wpids_blob(); + std::set wpid_set(wpids.begin(), wpids.end()); + + // make a new node from the existing grouping + auto& new_cluster = m_grouping->make_child(); // make a new cluster inside + + for (auto it = wpid_set.begin(); it != wpid_set.end(); ++it) { + int apa = it->apa(); + int face = it->face(); + const auto& angles = m_wpid_angles.at(*it); + + std::map, std::vector > map_slices_measures; + + // get original activities ... + get_activity_improved(*orig_cluster, map_slices_measures, apa, face); + + // hack activity according to original cluster + hack_activity_improved(*orig_cluster, map_slices_measures, orig_path_point_indices, apa, face); // may need more args + + // hack activities according to the new cluster + hack_activity_improved(temp_cluster, map_slices_measures, temp_path_point_indices, apa, face); // may need more args + + // Step 3. + auto iblobs = make_iblobs_improved(map_slices_measures, apa, face); + + if (m_verbose) std::cout << "ImproveCluster_2: new cluster " << iblobs.size() << " iblobs for apa " << apa << " face " << face << std::endl; + + auto niblobs = iblobs.size(); + // start to sampling points + int npoints = 0; + for (size_t bind=0; bindget_tick().at(apa).at(face); + + auto pcs = Aux::sample_live(sampler, iblob, angles, tick, bind); + // DO NOT EXTEND FURTHER! see #426, #430 + + if (pcs["3d"].size()==0) continue; // no points ... + // Access 3D coordinates + auto pc3d = pcs["3d"]; // Get the 3D point cloud dataset + auto x_coords = pc3d.get("x")->elements(); // Get X coordinates + // auto y_coords = pc3d.get("y")->elements(); // Get Y coordinates + // auto z_coords = pc3d.get("z")->elements(); // Get Z coordinates + // auto ucharge_val = pc3d.get("ucharge_val")->elements(); // Get U charge + // auto vcharge_val = pc3d.get("vcharge_val")->elements(); // Get V charge + // auto wcharge_val = pc3d.get("wcharge_val")->elements(); // Get W charge + // auto ucharge_err = pc3d.get("ucharge_unc")->elements(); // Get U charge error + // auto vcharge_err = pc3d.get("vcharge_unc")->elements(); // Get V charge error + // auto wcharge_err = pc3d.get("wcharge_unc")->elements(); // Get W charge error + + // std::cout << "ImproveCluster_1 PCS: " << pcs.size() << " " + // << pcs["3d"].size() << " " + // << x_coords.size() << std::endl; + + npoints +=x_coords.size(); + if (pcs.empty()) { + SPDLOG_DEBUG("ImproveCluster_1: skipping blob {} with no points", iblob->ident()); + continue; + } + new_cluster.node()->insert(Tree::Points(std::move(pcs))); + } + if (m_verbose) std::cout << "ImproveCluster_2: " << npoints << " points sampled for apa " << apa << " face " << face << " Blobs " << niblobs << std::endl; + + // remove bad blobs + int tick_span = map_slices_measures.begin()->first.second - map_slices_measures.begin()->first.first; + auto blobs_to_remove = remove_bad_blobs(*orig_cluster, new_cluster, tick_span, apa, face); + for (const Blob* blob : blobs_to_remove) { + Blob& b = const_cast(*blob); + new_cluster.remove_child(b); + } + if (m_verbose) std::cout << "ImproveCluster_2: " << blobs_to_remove.size() << " blobs removed for apa " << apa << " face " << face << " " << new_cluster.children().size() << std::endl; + } + + + // Remove this cluster from the grouping + auto* temp_cluster_ptr = &temp_cluster; + m_grouping->destroy_child(temp_cluster_ptr, true); + if (m_verbose) std::cout << "ImproveCluster_2: Grouping" << m_grouping->get_name() << " " << m_grouping->children().size() << std::endl; + + auto& default_scope = orig_cluster->get_default_scope(); + auto& raw_scope = orig_cluster->get_raw_scope(); + + if (m_verbose) std::cout << "ImproveCluster_1: Scope: " << default_scope.hash() << " " << raw_scope.hash() << std::endl; + if (default_scope.hash()!=raw_scope.hash()){ + auto correction_name = orig_cluster->get_scope_transform(default_scope); + // std::vector filter_results = c + new_cluster.add_corrected_points(m_pcts, correction_name); + // Get the new scope with corrected points + // const auto& correction_scope = new_cluster.get_scope(correction_name); + // Set this as the default scope for viewing + new_cluster.from(*orig_cluster); // copy state from original cluster + // std::cout << "Test: Same:" << default_scope.hash() << " " << raw_scope.hash() << std::endl; + } + + // auto retiled_node = new_cluster.node(); + + + return m_grouping->remove_child(new_cluster); + + } + +} // namespace WireCell::Clus diff --git a/clus/src/make_fiducialutils.cxx b/clus/src/make_fiducialutils.cxx new file mode 100644 index 000000000..0a0ad5348 --- /dev/null +++ b/clus/src/make_fiducialutils.cxx @@ -0,0 +1,98 @@ +/// This is an ensemble visitor that constructs a FiducialUtils and places it in +/// the "live" grouping. + +#include "WireCellClus/FiducialUtils.h" +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncsMixins.h" +#include "WireCellIface/IConfigurable.h" +#include "WireCellUtil/NamedFactory.h" + +#include // for make_shared + + +class MakeFiducialUtils; +WIRECELL_FACTORY(MakeFiducialUtils, MakeFiducialUtils, + WireCell::IConfigurable, WireCell::Clus::IEnsembleVisitor) +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; +class MakeFiducialUtils : public IConfigurable, public Clus::IEnsembleVisitor + , private Clus::NeedDV, private Clus::NeedFiducial, private Clus::NeedPCTS { +public: + MakeFiducialUtils() {}; + virtual ~MakeFiducialUtils() {}; + + void configure(const WireCell::Configuration& cfg) { + NeedDV::configure(cfg); + NeedFiducial::configure(cfg); + NeedPCTS::configure(cfg); + m_live_name = get(cfg, "live", m_live_name); + m_dead_name = get(cfg, "dead", m_dead_name); + m_targ_name = get(cfg, "target", m_targ_name); + + + } + virtual Configuration default_configuration() const { + Configuration cfg; + cfg["live"] = m_live_name; + cfg["dead"] = m_dead_name; + cfg["target"] = m_targ_name; + return cfg; + } + + void visit(Ensemble& ensemble) const { + using spdlog::warn; + + auto* live_grouping = ensemble.with_name(m_live_name).at(0); + auto* dead_grouping = ensemble.with_name(m_dead_name).at(0); + auto* targ_grouping = ensemble.with_name(m_targ_name).at(0); + + int nerrors = 0; + if (!live_grouping) { + warn("no live grouping in ensemble"); + ++nerrors; + } + if (!dead_grouping) { + warn("no dead grouping in ensemble"); + ++nerrors; + } + if (!targ_grouping) { + warn("no target grouping in ensemble"); + ++nerrors; + } + if (nerrors) { + warn("ensemble is not well formed, no FiducialUtils made, this may cause downstream problems."); + return; + } + + std::cout << "Add FiducialUtils to grouping" << std::endl; + auto fu = std::make_shared(FiducialUtils::StaticData{m_dv, m_fiducial, m_pcts}); + + // Feed the dynamic data (live and dead groupings) + fu->feed_dynamic(FiducialUtils::DynamicData{*live_grouping, *dead_grouping}); + + // validation ... + // { + // // fu->inside_fiducial_volume(Point(250*units::cm, 120*units::cm, 10*units::cm)); + // // fu->inside_fiducial_volume(Point(250*units::cm, -80*units::cm, 30*units::cm)); + // // fu->inside_fiducial_volume(Point(250*units::cm, 120*units::cm, -10*units::cm)); + // // fu->inside_fiducial_volume(Point(25*units::cm, 60*units::cm, 10*units::cm)); + // // fu->inside_fiducial_volume(Point(250*units::cm, 120*units::cm, 10*units::cm)); + // // fu->inside_fiducial_volume(Point(250*units::cm, -80*units::cm, 1030*units::cm)); + // // fu->inside_fiducial_volume(Point(250*units::cm, -80*units::cm, 960*units::cm)); + // // fu->inside_fiducial_volume(Point(25*units::cm, 60*units::cm, 10*units::cm)); + + // std::cout << "FiducialUtils check: " << fu->inside_dead_region(Point(0,0,2),0,0,2) << " " << fu->inside_dead_region(Point(41.8*units::cm, 26.5*units::cm,707.0*units::cm),0,0,2) << " " << fu->inside_dead_region(Point(41.8*units::cm, 16.5*units::cm,707.0*units::cm),0,0,2) << std::endl; + + + // } + + live_grouping->set_fiducialutils(fu); + } + +private: + + // Names of grouping for "live" and "dead" and "target" (the grouping to + // receive the FiducialUtils. + std::string m_live_name{"live"}, m_dead_name{"dead"}, m_targ_name{"live"}; +}; diff --git a/clus/src/make_graphs.cxx b/clus/src/make_graphs.cxx new file mode 100644 index 000000000..abf93758e --- /dev/null +++ b/clus/src/make_graphs.cxx @@ -0,0 +1,86 @@ +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/Facade_Cluster.h" +#include "WireCellClus/Facade_Blob.h" +#include "WireCellClus/Facade_Grouping.h" + +#include "connect_graphs.h" +#include "make_graphs.h" + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; +using namespace WireCell::Clus::Graphs; + + +Weighted::Graph WireCell::Clus::Graphs::make_graph_closely( + const Cluster& cluster) +{ + Weighted::Graph graph(cluster.npoints()); + connect_graph_closely(cluster, graph); + return graph; +} + +Weighted::Graph WireCell::Clus::Graphs::make_graph_closely_pid( + const Cluster& cluster) +{ + Weighted::Graph graph(cluster.npoints()); + connect_graph_closely_pid(cluster, graph); + return graph; +} + +Weighted::Graph WireCell::Clus::Graphs::make_graph_basic( + const Cluster& cluster) +{ + auto graph = make_graph_closely(cluster); + connect_graph(cluster, graph); + return graph; +} + +Weighted::Graph WireCell::Clus::Graphs::make_graph_basic_pid( + const Cluster& cluster, + const Cluster& ref_cluster) +{ + auto graph = make_graph_closely_pid(cluster); + + connect_graph_with_reference(cluster, ref_cluster, graph); + + return graph; +} + +Weighted::Graph WireCell::Clus::Graphs::make_graph_ctpc( + const Cluster& cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts) +{ + auto graph = make_graph_closely(cluster); + connect_graph_ctpc(cluster, dv, pcts, graph); + connect_graph(cluster, graph); + return graph; +} + +Weighted::Graph WireCell::Clus::Graphs::make_graph_ctpc_pid( + const Cluster& cluster, + const Cluster& ref_cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts) +{ + // Start with close connections + auto graph = make_graph_closely_pid(cluster); + + // Add CTPC connections with reference filtering + connect_graph_ctpc_with_reference(cluster, ref_cluster, dv, pcts, graph); + connect_graph_with_reference(cluster, ref_cluster, graph); + + return graph; +} + + +Weighted::Graph WireCell::Clus::Graphs::make_graph_relaxed( + const Facade::Cluster& cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts) +{ + auto graph = make_graph_closely(cluster); + connect_graph_relaxed(cluster, dv, pcts, graph); + return graph; +} diff --git a/clus/src/make_graphs.h b/clus/src/make_graphs.h new file mode 100644 index 000000000..f44c6e278 --- /dev/null +++ b/clus/src/make_graphs.h @@ -0,0 +1,52 @@ +#ifndef WIRECELLCLUS_PRIVATE_MAKE_GRAPHS +#define WIRECELLCLUS_PRIVATE_MAKE_GRAPHS + +#include "WireCellClus/Graphs.h" +#include "WireCellIface/IDetectorVolumes.h" +#include "WireCellClus/IPCTransform.h" +#include "WireCellClus/Facade_Cluster.h" + + +namespace WireCell::Clus::Graphs { + + // factory functions wrapping up construction and various connect_graph* + // functions. + + // just closely connected. + Weighted::Graph make_graph_closely( + const Facade::Cluster& cluster); + + // just closely connected. + Weighted::Graph make_graph_closely_pid( + const Facade::Cluster& cluster); + + // closely + basic connection + Weighted::Graph make_graph_basic( + const Facade::Cluster& cluster); + + // closely_pid + basic connection with reference cluster (empty by default) + Weighted::Graph make_graph_basic_pid( + const Facade::Cluster& cluster, + const Facade::Cluster& ref_cluster = Facade::Cluster{}); + + // closely + ctpc connection + Weighted::Graph make_graph_ctpc( + const Facade::Cluster& cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts); + + Weighted::Graph make_graph_ctpc_pid( + const Facade::Cluster& cluster, + const Facade::Cluster& ref_cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts); + + // closely + relaxed (overclustering protection) + Weighted::Graph make_graph_relaxed( + const Facade::Cluster& cluster, + IDetectorVolumes::pointer dv, + IPCTransformSet::pointer pcts); + +} + +#endif diff --git a/clus/src/retile_cluster.cxx b/clus/src/retile_cluster.cxx new file mode 100644 index 000000000..dcdb0d4bb --- /dev/null +++ b/clus/src/retile_cluster.cxx @@ -0,0 +1,721 @@ +// This provides RetileCluster aka "IPCTreeMutate". +// +// Warning: this lives up to its name. It may change the input cluster. +// +// It requires the input cluster node to have a (grouping) node parent. +// +// The retiling of a cluster follows this general sequence: +// +// 1) constructs layers of "activity" from input grouping. +// 2) applies "hacks" to the activity. +// 3) runs WCT tiling to create blobs. +// 4) runs blobs sampling to make point clouds +// 5) produces clusters such that the new blobs formed from an old cluster form a new "shadow" cluster. +// 6) forms a PC-tree +// 7) outputs the new grouping + +#include "retile_cluster.h" // Include the header instead of defining the class here + +WIRECELL_FACTORY(RetileCluster, WireCell::Clus::RetileCluster, + WireCell::IConfigurable, WireCell::IPCTreeMutate) + + + + +// Segregate this weird choice for namespace. +namespace WCF = WireCell::Clus::Facade; + +// Nick name for less typing. +namespace WRG = WireCell::RayGrid; + + + + +// Now can handle all APA/Faces +void RetileCluster::configure(const WireCell::Configuration& cfg) +{ + NeedDV::configure(cfg); + NeedPCTS::configure(cfg); + + if (cfg.isMember("samplers") && cfg["samplers"].isArray()) { + // Process array of samplers + for (const auto& sampler_cfg : cfg["samplers"]) { + int apa = sampler_cfg["apa"].asInt(); + int face = sampler_cfg["face"].asInt(); + std::string sampler_name = sampler_cfg["name"].asString(); + + if (sampler_name.empty()) { + raise("RetileCluster requires an IBlobSampler name for APA %d face %d", apa, face); + } + // std::cout << "Test: " << apa << " " << face << " " << sampler_name << std::endl; + auto sampler_ptr = Factory::find_tn(sampler_name); + m_samplers[apa][face] = sampler_ptr; + } + } + + std::vector anodes_tn; + for (const auto& aname : cfg["anodes"]) { + auto anode = Factory::find_tn(aname.asString()); + anodes_tn.push_back(anode); + for (const auto& face1 : anode->faces()) { + int apa = anode->ident(); + int face = face1->which(); + m_face[apa][face] = face1; + const auto& coords = face1->raygrid(); + if (coords.nlayers() != 5) { + raise("unexpected number of ray grid layers: %d", coords.nlayers()); + } + // std::cout <<"Test: " << apa << " " << face << " " << coords.nlayers() << std::endl; + // Get wire info for each plane + m_plane_infos[apa][face].clear(); + m_plane_infos[apa][face].push_back(Aux::get_wire_plane_info(face1, kUlayer)); + m_plane_infos[apa][face].push_back(Aux::get_wire_plane_info(face1, kVlayer)); + m_plane_infos[apa][face].push_back(Aux::get_wire_plane_info(face1, kWlayer)); + } + } + + // Add time cut configuration + m_cut_time_low = get(cfg, "cut_time_low", -1e9); + m_cut_time_high = get(cfg, "cut_time_high", 1e9); + m_verbose = get(cfg, "verbose", false); + +} + + +// Step 0. The RetileCluster only directly gets a cluster node but needs +// context from the parent grouping which likely can be cached between calls. +Facade::Cluster* RetileCluster::reinitialize(Points::node_type& node) const +{ + auto* cluster = node.value.facade(); + if (!cluster || !cluster->grouping()) { + return nullptr; + } + if (m_grouping && m_grouping == cluster->grouping()) { + return cluster; + } + m_grouping = cluster->grouping(); + + m_wpid_angles.clear(); + for (const auto& gwpid : m_grouping->wpids()) { + // gwpids are "all" type - no specific layer so we must remake per-layer wpids + int apa = gwpid.apa(); + int face = gwpid.face(); + std::vector angles(3); + for (size_t ind=0; ind<3; ++ind) { + // iplane2layer is in WirePlaneId.h + WirePlaneId wpid(iplane2layer[ind], face, apa); + Vector wire_dir = m_dv->wire_direction(wpid); + angles[ind] = std::atan2(wire_dir.z(), wire_dir.y()); + } + m_wpid_angles[gwpid] = angles; + } + return cluster; +} + + +// Step 1. Build activities from blobs in a cluster. +void RetileCluster::get_activity(const Cluster& cluster, std::map, std::vector >& map_slices_measures, int apa, int face) const +{ + const int nlayers = 2+3; + + // checkme: this assumes "iend" is the usual one-past-last aka [ibeg,iend) + // forms a half-open range. I'm not sure if PointTreeBuilding is following + // this or not. + + + + // for (auto& info : plane_infos) { + // std::cout << "test1: " << info.start_index << " " << info.end_index << " " << info.total_wires << std::endl; + // } + + int (WCF::Blob::*wmin[])(void) const = { + &WCF::Blob::u_wire_index_min, + &WCF::Blob::v_wire_index_min, + &WCF::Blob::w_wire_index_min + }; + + int (WCF::Blob::*wmax[])(void) const = { + &WCF::Blob::u_wire_index_max, + &WCF::Blob::v_wire_index_max, + &WCF::Blob::w_wire_index_max + }; + + const double hit=1.0; // actual charge value does not matter to tiling. + + for (const auto* fblob : cluster.children()) { + int tslice_beg = fblob->slice_index_min(); + int tslice_end = fblob->slice_index_max(); + + // if blob is not consistent skip ... + auto blob_wpid = fblob->wpid(); + if (blob_wpid.apa()!=apa || blob_wpid.face()!=face) continue; + + auto& measures = map_slices_measures[std::make_pair(tslice_beg, tslice_end)]; + + // if (tslice_beg == tslice_end) { + // std::cout << "Test: Same: " << tslice_beg << " " << tslice_end << std::endl; + // } + + if (measures.size()==0){ + measures.resize(nlayers); + // what to do the first two views??? + measures[0].push_back(1); + measures[1].push_back(1); + measures[2].resize(m_plane_infos.at(apa).at(face)[0].total_wires, 0); + measures[3].resize(m_plane_infos.at(apa).at(face)[1].total_wires, 0); + measures[4].resize(m_plane_infos.at(apa).at(face)[2].total_wires, 0); + // std::cout << measures[2].size() << " " << measures[3].size() << " " << measures[4].size() << std::endl; + } + + // the three views ... + for (int index=0; index<3; ++index) { + const int layer = index + 2; + WRG::measure_t& m = measures[layer]; + // Make each "wire" in each blob's bounds of this plane "hit". + int ibeg = (fblob->*wmin[index])(); + int iend = (fblob->*wmax[index])(); + while (ibeg < iend) { + m[ibeg++] = hit; + } + //std::cout << ibeg << " " << iend << " " << index << " " << hit << std::endl; + } + } + + // std::cout << "Test: Org: " << map_slices_measures.size() << " " << cluster.children().size() << std::endl; + +} + + +// Step 2. Modify activity to suit. +void RetileCluster::hack_activity( + const Cluster& cluster, + std::map, std::vector >& map_slices_measures, + const std::vector& path_wcps, + int apa, int face) const +{ + + // for (auto it = map_slices_measures.begin(); it!= map_slices_measures.end(); it++){ + // std::cout << "Before: " << it->first.first << " " << it->first.second << " " << it->second.size() << std::endl; + // for (int i=0; i!=5; i++){ + // std::cout << it->second[i].size() << " "; + // } + // std::cout << std::endl; + // } + + + const double low_dis_limit = 0.3 * units::cm; + // Get path points + // auto path_wcps = cluster.get_path_wcps(); + std::vector> path_pts; + + // Convert list points to vector with interpolation + for (const auto& wcp : path_wcps) { + geo_point_t p= cluster.point3d_raw(wcp); // index ... // raw data points ... + auto wpid_p = cluster.wire_plane_id(wcp); // wpid ... + // std::cerr << "retile: path:" << wcp << " p:" << p << " wpid:" << wpid_p << "\n"; + if (path_pts.empty()) { + path_pts.push_back(std::make_pair(p, wpid_p)); + } else { + double dis = (p - path_pts.back().first).magnitude(); + if (dis < low_dis_limit) { + path_pts.push_back(std::make_pair(p, wpid_p)); + } else { + int ncount = int(dis/low_dis_limit) + 1; + auto p2 = path_pts.back().first; + auto wpid2 = path_pts.back().second; + for (int i=0; i < ncount; i++) { + Point p1 = p2 + (p - p2) * (i+1)/ncount; + auto wpid_p1 = get_wireplaneid(p1, wpid_p, wpid2, m_dv); + path_pts.push_back(std::make_pair(p1, wpid_p1)); + } + } + } + } + + + std::vector> wire_limits; + for (int i=0; i!=3; i++){ + wire_limits.push_back(std::make_pair(m_plane_infos.at(apa).at(face)[i].start_index, m_plane_infos.at(apa).at(face)[i].end_index)); + // std::cout << "Test: " << apa << " " << face << " " << wire_limits[i].first << " " << wire_limits[i].second << std::endl; + } + + // this is to get the end of the time tick range = start_tick + tick_span + const int tick_span = map_slices_measures.begin()->first.second - map_slices_measures.begin()->first.first; + + // std::cout << "Test: " << apa << " " << face << " " << tick_span << std::endl; + + // Flag points that have sufficient activity around them + std::vector path_pts_flag(path_pts.size(), false); + for (size_t i = 0; i < path_pts.size(); i++) { + if (path_pts[i].second.apa() != apa || path_pts[i].second.face() != face) continue; + auto [time_tick_u, u_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i].first, apa, m_face.at(apa).at(face)->which(), 0); + auto [time_tick_v, v_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i].first, apa, m_face.at(apa).at(face)->which(), 1); + auto [time_tick_w, w_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i].first, apa, m_face.at(apa).at(face)->which(), 2); + //std::cout << time_tick_u << " " << u_wire << " " << v_wire << " " << w_wire << std::endl; + + int aligned_tick = std::round(time_tick_u *1.0/ tick_span) * tick_span; + std::pair tick_range = std::make_pair(aligned_tick, aligned_tick + tick_span); + + // Check for activity in neighboring wires/time + // For each plane (U,V,W), count activity in current and adjacent wires + std::vector wire_hits = {0,0,0}; // counts for U,V,W planes + std::vector wires = {u_wire, v_wire, w_wire}; + + for (size_t plane = 0; plane < 3; plane++) { + // Check activity in current and adjacent wires + for (int delta : {-1, 0, 1}) { + int wire = wires[plane] + delta; + if (wire < wire_limits[plane].first || wire > wire_limits[plane].second) + continue; + + int layer = plane + 2; + if (map_slices_measures.find(tick_range) != map_slices_measures.end()) { + if (map_slices_measures[tick_range][layer][wire] > 0) { + wire_hits[plane] += (delta == 0) ? 1 : (delta == -1) ? 2 : 1; + } + } + } + } + + // Set flag if sufficient activity found + if (wire_hits[0] > 0 && wire_hits[1] > 0 && wire_hits[2] > 0 && + (wire_hits[0] + wire_hits[1] + wire_hits[2] >= 6)) { + path_pts_flag[i] = true; + } + // std::cout << path_pts[i] << " " << wire_hits[0] << " " << wire_hits[1] << " " << wire_hits[2] << " " << path_pts_flag[i] << " " << aligned_tick/tick_span << " " << u_wire << " " << v_wire << " " << w_wire << " " << time_tick_u << " " << std::round(time_tick_u / tick_span) << std::endl; + // std::cout << wire_hits[0] << " " << wire_hits[1] << " " << wire_hits[2] << " " << path_pts_flag[i] << std::endl; + } + + // Add missing activity based on path points + for (size_t i = 0; i < path_pts.size(); i++) { + if (path_pts[i].second.apa() != apa || path_pts[i].second.face() != face) continue; + + // Skip if point is well-covered by existing activity + if (i == 0) { + if (path_pts_flag[i] && path_pts_flag[i+1]) continue; + } else if (i+1 == path_pts.size()) { + if (path_pts_flag[i] && path_pts_flag[i-1]) continue; + } else { + if (path_pts_flag[i-1] && path_pts_flag[i] && path_pts_flag[i+1]) continue; + } + + auto [time_tick_u, u_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i].first, apa, m_face.at(apa).at(face)->which(), 0); + auto [time_tick_v, v_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i].first, apa, m_face.at(apa).at(face)->which(), 1); + auto [time_tick_w, w_wire] = cluster.grouping()->convert_3Dpoint_time_ch(path_pts[i].first, apa, m_face.at(apa).at(face)->which(), 2); + + int aligned_tick = std::round(time_tick_u *1.0/ tick_span) * tick_span; + + // Add activity around this point + for (int dt = -3; dt <= 3; dt++) { + int time_slice = aligned_tick + dt * tick_span; + if (time_slice < 0) continue; + + // Find or create time slice in measures map + auto slice_key = std::make_pair(time_slice, time_slice+tick_span); + if (map_slices_measures.find(slice_key) == map_slices_measures.end()) { + auto& measures = map_slices_measures[slice_key]; + measures = std::vector(5); // 2+3 layers + measures[0].push_back(1); // First layer measurement + measures[1].push_back(1); // Second layer measurement + measures[2].resize(m_plane_infos.at(apa).at(face)[0].total_wires, 0); + measures[3].resize(m_plane_infos.at(apa).at(face)[1].total_wires, 0); + measures[4].resize(m_plane_infos.at(apa).at(face)[2].total_wires, 0); + } + + // Add activity for each plane + std::vector wires = {u_wire, v_wire, w_wire}; + for (size_t plane = 0; plane < 3; plane++) { + auto& measures = map_slices_measures[slice_key][plane+2]; // +2 to skip first two layers + + for (int dw = -3; dw <= 3; dw++) { + int wire = wires[plane] + dw; + if (wire < wire_limits[plane].first || wire > wire_limits[plane].second || + std::abs(dw) + std::abs(dt) > 3) + continue; + + measures.at(wire) = 1.0; // Set activity + } + } + + } + } + + + // Loop through the map and remove slices with no activity in any plane view + auto it = map_slices_measures.begin(); + while (it != map_slices_measures.end()) { + bool missing_activity = false; + + // For each wire plane (U, V, W) + for (int pind = 0; pind < 3; pind++) { + const auto& measures = it->second[pind + 2]; // +2 to skip first two layers + + // Check if this plane has NO activity + if (std::none_of(measures.begin(), measures.end(), [](double val) { return val > 0.0; })) { + missing_activity = true; + break; + } + } + + // If any plane has no activity, remove this slice + if (missing_activity) { + it = map_slices_measures.erase(it); + } else { + ++it; + } + } + + +} + + + +// Step 3. Form IBlobs from activities. +std::vector RetileCluster::make_iblobs(std::map, std::vector >& map_slices_measures, int apa, int face) const +{ + std::vector ret; + + const auto& coords = m_face.at(apa).at(face)->raygrid(); + int blob_ident=0; + int slice_ident = 0; + + const double tick = m_grouping->get_tick().at(apa).at(face); + + + for (auto it = map_slices_measures.begin(); it != map_slices_measures.end(); it++){ + // Do the actual tiling. + WRG::activities_t activities = RayGrid::make_activities(m_face.at(apa).at(face)->raygrid(), it->second); + auto bshapes = WRG::make_blobs(coords, activities); + + + // { + // std::cerr << "abc: " + // << " s:"<(sframe, slice_ident++, it->first.first*tick, (it->first.second - it->first.first)*tick); + // Copy the prepared activity map into the slice + auto& slice_activity = sslice->activity(); + + for (int plane_idx = 0; plane_idx < 3; ++plane_idx) { + const int layer = plane_idx + 2; + const auto& plane_measures = it->second[layer]; + + // Get the wire plane for this face and plane + auto face_ptr = m_face.at(apa).at(face); + auto planes = face_ptr->planes(); + if (plane_idx >= static_cast(planes.size())) continue; + + auto wire_plane = planes[plane_idx]; + const auto& channels = wire_plane->channels(); + + // Map wire indices to channels and populate activity + for (size_t wire_idx = 0; wire_idx < plane_measures.size(); ++wire_idx) { + if (plane_measures[wire_idx] > 0.0) { + // Find the channel corresponding to this wire index + if (wire_idx < channels.size()) { + auto ichan = channels[wire_idx]; + if (ichan) { + // Set activity with value and zero uncertainty + slice_activity[ichan] = ISlice::value_t(plane_measures[wire_idx], 0.0); + } + } + } + } + } + + + for (const auto& bshape : bshapes) { + + // { + // std::cerr << "blob: " + // << " s:"<(blob_ident++, blob_value, + blob_error, bshape, sslice, m_face.at(apa).at(face)); + ret.push_back(iblob); + + } + } + + // std::cout << "Test: Blobs: " << ret.size() << std::endl; + + return ret; +} + +std::set +RetileCluster::remove_bad_blobs(const Cluster& cluster, Cluster& shad_cluster, int tick_span, int apa, int face) const +{ + // const auto& wpids = cluster.grouping()->wpids(); + // const auto& shad_wpids = shad_cluster.grouping()->wpids(); + // if (wpids.size() > 1 || shad_wpids.size() > 1) { + // throw std::runtime_error("Live or Dead grouping must have exactly one wpid: wpids.size()=" + + // std::to_string(wpids.size()) + ", shad_wpids.size()=" + + // std::to_string(shad_wpids.size())); + // } + + // Since wpids is a set, we need to get the first element using an iterator + // WirePlaneId wpid = *wpids.begin(); + // WirePlaneId shad_wpid = *shad_wpids.begin(); + // if (wpid != shad_wpid) { + // throw std::runtime_error("Live and Dead grouping must have the same wpid"); + // } + // int apa = wpid.apa(); + // int face = wpid.face(); + + // Implementation here + // Get time-organized map of original blobs + const auto& orig_time_blob_map = cluster.time_blob_map().at(apa).at(face); + + // Get time-organized map of newly created blobs + const auto& new_time_blob_map = shad_cluster.time_blob_map().at(apa).at(face); + + // Track blobs that need to be removed + std::set blobs_to_remove; + + // Examine each new blob + for (const auto& [time_slice, new_blobs] : new_time_blob_map) { + // std::cout << time_slice << " " << new_blobs.size() << std::endl; + + for (const Blob* new_blob : new_blobs) { + bool flag_good = false; + + // Check overlap with blobs in previous time slice + if (orig_time_blob_map.find(time_slice - tick_span) != orig_time_blob_map.end()) { + for (const Blob* orig_blob : orig_time_blob_map.at(time_slice - tick_span)) { + if (new_blob->overlap_fast(*orig_blob, 1)) { + flag_good = true; + break; + } + } + } + + // Check overlap with blobs in same time slice + if (!flag_good && orig_time_blob_map.find(time_slice) != orig_time_blob_map.end()) { + for (const Blob* orig_blob : orig_time_blob_map.at(time_slice)) { + if (new_blob->overlap_fast(*orig_blob, 1)) { + flag_good = true; + break; + } + } + } + + // Check overlap with blobs in next time slice + if (!flag_good && orig_time_blob_map.find(time_slice + tick_span) != orig_time_blob_map.end()) { + for (const Blob* orig_blob : orig_time_blob_map.at(time_slice + tick_span)) { + if (new_blob->overlap_fast(*orig_blob, 1)) { + flag_good = true; + break; + } + } + } + + // If no overlap found with original blobs in nearby time slices, mark for removal + if (!flag_good) { + blobs_to_remove.insert(new_blob); + } + } + } + + // Remove the bad blobs + return blobs_to_remove; + + +} + + +Points::node_ptr RetileCluster::mutate(Points::node_type& node) const +{ + auto* orig_cluster = reinitialize(node); + if (!orig_cluster) { + return nullptr; + } + + // Only retile clusters with flashes that are in the window + auto flash = orig_cluster->get_flash(); + if (!flash) { + return nullptr; + } + double flash_time = flash.time(); + if (! (flash_time >= m_cut_time_low && flash_time <= m_cut_time_high)) { + return nullptr; + } + + // get the span of indices + auto cc = orig_cluster->get_pcarray("isolated", "perblob"); + // convert span to vector + std::vector cc_vec(cc.begin(), cc.end()); + // for (const auto& val : cc_vec) { + // std::cout << val << " "; + // } + // std::cout << std::endl; + + auto scope = orig_cluster->get_default_scope(); + auto scope_transform = orig_cluster->get_scope_transform(scope); + // origi_cluster still have the original main cluster ... + // debug_cluster(orig_cluster, "Start:"); + + // std::cout << "Xin1: " << orig_cluster->get_scope_filter(scope) << " " << orig_cluster->get_default_scope() << " " << orig_cluster->get_scope_transform(scope) << std::endl; + + auto splits = m_grouping->separate(orig_cluster, cc_vec); + // debug_cluster(orig_cluster, "Mid:"); + + // std::cout << "Xin2: " << orig_cluster->get_scope_filter(scope) << " " << orig_cluster->get_default_scope() << " " << orig_cluster->get_scope_transform(scope)<< std::endl; + + + // orig_cluster->set_scope_filter(scope, true); + orig_cluster->set_default_scope(scope); // need this to clear cache ... + // orig_cluster->set_scope_transform(scope,scope_transform); + + // std::cout << "Xin: " << orig_cluster->get_scope_filter(scope) << " " << orig_cluster->get_default_scope() << " " << orig_cluster->get_scope_transform(scope)<< std::endl; + + std::map map_id_cluster = splits; + map_id_cluster[-1] = orig_cluster; + + Cluster *shadow_orig_cluster=nullptr; + + // A temporary node and grouping facade to hold separate clusters that we + // the construct into a single "shadow" cluster for return. + Points::node_t shad_node; + auto* shad_grouping = shad_node.value.facade(); + shad_grouping->from(*m_grouping); // copies $#@%# state + + std::map shadow_splits; + for (auto& [id, cluster] : map_id_cluster) { + + auto& shad_cluster = shad_grouping->make_child(); + shad_cluster.set_ident(cluster->ident()); + + if (id==-1) shadow_orig_cluster = &shad_cluster; + else shadow_splits[id] = &shad_cluster; + + // Needed in hack_activity() but call it here to avoid call overhead. + const auto& path_wcps = cluster_path_wcps(cluster); + + auto wpids = cluster->wpids_blob(); + std::set wpid_set(wpids.begin(), wpids.end()); + for (auto it = wpid_set.begin(); it != wpid_set.end(); ++it) { + int apa = it->apa(); + int face = it->face(); + const auto& angles = m_wpid_angles.at(*it); + + // Step 1. + std::map, std::vector > map_slices_measures; + get_activity(*cluster, map_slices_measures, apa, face); + + // Step 2. + hack_activity(*cluster, map_slices_measures, path_wcps, apa, face); // may need more args + + // Check for time slices with same start and end + // for (const auto& [time_range, measures] : map_slices_measures) { + // if (time_range.first == 480 or time_range.first == 1148) { + // std::cout << "Warning: Time slice with same start and end found: " + // << time_range.first << " " << time_range.second << std::endl; + // } + // } + + // Step 3. Must make IBlobs for this is what the sampler takes. + auto shad_iblobs = make_iblobs(map_slices_measures, apa, face); // may need more args + + // Steps 4-6. + auto niblobs = shad_iblobs.size(); + + // This is the 3rd generation of copy-paste for sampling. Gen 2 is + // in UbooneClusterSource. OG is in PointTreeBuilding. The reason + // for the copy-pastes is insufficient attentino to proper code + // factoring starting in PointTreeBuilding. Over time, it is almost + // guaranteed these copy-pastes become out-of-sync. A 4th copy is + // likely found in the steiner-related area. + + for (size_t bind=0; bindget_tick().at(apa).at(face); //500*units::ns; + + auto pcs = Aux::sample_live(sampler, iblob, angles, tick, bind); + /// DO NOT EXTEND FURTHER! see #426, #430 + + if (pcs.empty()) { + SPDLOG_DEBUG("retile: skipping blob {} with no points", iblob->ident()); + continue; + } + shad_cluster.node()->insert(Tree::Points(std::move(pcs))); + } + int tick_span = map_slices_measures.begin()->first.second - map_slices_measures.begin()->first.first; + // std::cout << "Test: " << shad_cluster.npoints() << " " << " " << shad_cluster.nchildren() << std::endl; + + // remove blobs after creating facade_blobs ... + auto blobs_to_remove = remove_bad_blobs(*cluster, shad_cluster, tick_span, apa, face); + for (const Blob* blob : blobs_to_remove) { + Blob& b = const_cast(*blob); + shad_cluster.remove_child(b); + } + // shad_cluster.clear_cache(); + // std::cout << "Test: " << apa << " " << face << " " << blobs_to_remove.size() << std::endl; + } + + + // add the new scope to the newly corrected shad_cluster ... + auto& default_scope = cluster->get_default_scope(); + auto& raw_scope = cluster->get_raw_scope(); + + if (default_scope.hash()!=raw_scope.hash()){ + auto correction_name = cluster->get_scope_transform(default_scope); + // std::vector filter_results = c + shad_cluster.add_corrected_points(m_pcts, correction_name); + // Get the new scope with corrected points + const auto correction_scope = shad_cluster.get_scope(correction_name); + // // Set this as the default scope for viewing + shad_cluster.from(*cluster); // copy state from original cluster + // std::cout << "Test: Same:" << default_scope.hash() << " " << raw_scope.hash() << std::endl; + } + + } + + + // Restore input cluster + auto cc2 = m_grouping->merge(splits,orig_cluster); + + // Record how we had split it. + orig_cluster->put_pcarray(cc2, "isolated", "perblob"); + + // Merge the separate shadow clusters into one. + auto cc3 = shad_grouping->merge(shadow_splits, shadow_orig_cluster); + + // Record its splits + shadow_orig_cluster->put_pcarray(cc3, "isolated", "perblob"); + + + // Send merged cluster node to caller. + return shad_node.remove(shad_node.children().front()); +} + + diff --git a/clus/src/retile_cluster.h b/clus/src/retile_cluster.h new file mode 100644 index 000000000..efe6d4f6b --- /dev/null +++ b/clus/src/retile_cluster.h @@ -0,0 +1,171 @@ +// This provides RetileCluster aka "IPCTreeMutate". +// +// Warning: this lives up to its name. It may change the input cluster. +// +// It requires the input cluster node to have a (grouping) node parent. +// +// The retiling of a cluster follows this general sequence: +// +// 1) constructs layers of "activity" from input grouping. +// 2) applies "hacks" to the activity. +// 3) runs WCT tiling to create blobs. +// 4) runs blobs sampling to make point clouds +// 5) produces clusters such that the new blobs formed from an old cluster form a new "shadow" cluster. +// 6) forms a PC-tree +// 7) outputs the new grouping +// + + +#ifndef WIRECELLCLUS_RETILE_CLUSTER_H +#define WIRECELLCLUS_RETILE_CLUSTER_H + + +#include "WireCellUtil/RayTiling.h" +#include "WireCellUtil/RayHelpers.h" + +#include "WireCellIface/IBlob.h" +#include "WireCellIface/IBlobSampler.h" +#include "WireCellIface/IAnodeFace.h" +#include "WireCellIface/IDetectorVolumes.h" +#include "WireCellIface/IPCTreeMutate.h" + +#include "WireCellAux/PlaneTools.h" + +#include "WireCellClus/Facade_Grouping.h" +#include "WireCellClus/Facade_Cluster.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + + +#include "WireCellClus/IEnsembleVisitor.h" +#include "WireCellClus/ClusteringFuncs.h" +#include "WireCellClus/ClusteringFuncsMixins.h" + +#include "WireCellIface/IConfigurable.h" + +#include "WireCellUtil/NamedFactory.h" + +#include "WireCellAux/SimpleBlob.h" +#include "WireCellAux/SamplingHelpers.h" + +#include "WireCellUtil/PointTree.h" + +#include "WireCellAux/SimpleSlice.h" +#include "WireCellClus/GroupingHelper.h" + + +#include +#include + +using namespace WireCell; +using namespace WireCell::Clus; +using namespace WireCell::Clus::Facade; +using namespace WireCell::PointCloud::Tree; + +namespace WireCell::Clus { + + + +class RetileCluster : public IConfigurable, public IPCTreeMutate, protected Clus::NeedDV, protected Clus::NeedPCTS { + + + +public: + + RetileCluster() {} + virtual ~RetileCluster() {}; + + // IConfigurable API + void configure(const WireCell::Configuration& config); + virtual Configuration default_configuration() const { + Configuration cfg; + return cfg; + } + + // IPCTreeMutate API + virtual std::unique_ptr mutate(node_t& node) const; + +protected: + // Step 0. Collect grouping info + Facade::Cluster* reinitialize(Points::node_type& node) const; + + // Cache + mutable Grouping* m_grouping = nullptr; + mutable std::map > m_wpid_angles; + std::map>> m_plane_infos; + /** Configuration: "sampler" (required) + + The type/name an IBlobSampler for producing the "3d" point cloud. + + If not given, the retailed blob tree nodes will not have point clouds. + */ + + bool m_verbose{false}; + + + std::map> m_samplers; + + // fixme: this restricts the retiling to single-anode-face clusters. + // As such, it will likely freak out if fed clusters that have been + // stitched across anode faces. Since tiling is inherently a per-face + // operation, this may be okay. + /** Configuration "face" (optional, default is 0) + + The INDEX of the face in the anode's list of faces to use. + */ + std::map> m_face; // now apa/face --> m_face + + // Step 3. Form IBlobs from activities. + std::vector make_iblobs(std::map, std::vector >& map_slices_measures, int apa, int face) const; + + // Step 1. Build activities from blobs in a cluster. + void get_activity(const Cluster& cluster, std::map, std::vector >& map_slices_measures, int apa, int face) const; + + + // Step 2. Modify activity to suit. + void hack_activity(const Cluster& cluster, + std::map, std::vector >& map_slices_measures, + const std::vector& path_wcps, + int apa, int face) const; + +private: + + + std::set remove_bad_blobs(const Cluster& cluster, Cluster& shad_cluster, int tick_span, int apa, int face) const; + + + // Remaining steps are done in the operator() directly. + + + + /** Configuration "cut_time_low" (optional, default is -1e9) + Lower bound for time cut in nanoseconds + */ + double m_cut_time_low; + + /** Configuration "cut_time_high" (optional, default is 1e9) + Upper bound for time cut in nanoseconds + */ + double m_cut_time_high; + + + + /** Configuration "anode" (required) + + The type/name of the anode. + */ + + + // Wrap up getting the shortest path for the cluster high/low points. + const std::vector& cluster_path_wcps(const Cluster* cluster) const { + // find the highest and lowest points + std::pair pair_points = cluster->get_highest_lowest_points(); + // std::cerr << "retile: hilo: " << pair_points.first << " " << pair_points.second << std::endl; + int high_idx = cluster->get_closest_point_index(pair_points.first); + int low_idx = cluster->get_closest_point_index(pair_points.second); + return cluster->graph_algorithms().shortest_path(high_idx, low_idx); + } +}; // RetileCluster + +} + +#endif // WIRECELLCLUS_RETILE_CLUSTER_H \ No newline at end of file diff --git a/clus/test/doctest_boost_dijkstra.cxx b/clus/test/doctest_boost_dijkstra.cxx index 26bcafd6b..fd4219444 100644 --- a/clus/test/doctest_boost_dijkstra.cxx +++ b/clus/test/doctest_boost_dijkstra.cxx @@ -26,10 +26,10 @@ TEST_CASE("standalone dijkstra") { Vertex v4 = boost::add_vertex(graph); // Add edges - boost::add_edge(v0, v1, TestEdgeProp{1}, graph).first; - boost::add_edge(v1, v4, TestEdgeProp{1}, graph).first; - boost::add_edge(v0, v2, TestEdgeProp{1}, graph).first; - boost::add_edge(v2, v4, TestEdgeProp{2}, graph).first; + boost::add_edge(v0, v1, TestEdgeProp{1}, graph); + boost::add_edge(v1, v4, TestEdgeProp{1}, graph); + boost::add_edge(v0, v2, TestEdgeProp{1}, graph); + boost::add_edge(v2, v4, TestEdgeProp{2}, graph); std::vector parents(boost::num_vertices(graph)); std::vector distances(boost::num_vertices(graph)); diff --git a/clus/test/doctest_clustering_prototype.cxx b/clus/test/doctest_clustering_prototype.cxx index 88199d8c2..4ffb1dd14 100644 --- a/clus/test/doctest_clustering_prototype.cxx +++ b/clus/test/doctest_clustering_prototype.cxx @@ -1,9 +1,15 @@ +#include "WireCellClus/Facade.h" +#include "WireCellClus/IPCTransform.h" + +#include "WireCellIface/IConfigurable.h" + #include "WireCellUtil/PointTree.h" #include "WireCellUtil/PointTesting.h" #include "WireCellUtil/doctest.h" #include "WireCellUtil/Logging.h" +#include "WireCellUtil/PluginManager.h" +#include "WireCellUtil/NamedFactory.h" -#include "WireCellClus/Facade.h" #include @@ -11,32 +17,35 @@ using namespace WireCell; using namespace WireCell::PointTesting; using namespace WireCell::PointCloud; using namespace WireCell::PointCloud::Tree; -using namespace WireCell::PointCloud::Facade; -using fa_float_t = WireCell::PointCloud::Facade::float_t; -using fa_int_t = WireCell::PointCloud::Facade::int_t; +using namespace WireCell::Clus::Facade; +using fa_float_t = WireCell::Clus::Facade::float_t; +using fa_int_t = WireCell::Clus::Facade::int_t; // WireCell::PointCloud::Tree::scoped_pointcloud_t using spdlog::debug; using spdlog::warn; using node_ptr = std::unique_ptr; +void print_ds(const Dataset& ds) { + std::stringstream ss; + ss << " size_major " << ds.size_major() << std::endl; + for (const auto& key : ds.keys()) { + ss << key << ": "; + // auto arr = ds.get(key)->elements(); + // for(auto elem : arr) { + // ss << elem << " "; + // } + ss << std::endl; + } + debug(ss.str()); +} + // No more explicit DisjointDataset. It is a PointCloud::Tree::scoped_pointcloud_t. template void print_dds(const DisjointDataset& dds) { for (size_t idx=0; idxelements(); - ss << key << ": "; - for(auto elem : arr) { - ss << elem << " "; - } - ss << std::endl; - } - debug(ss.str()); + print_ds(ds); } } @@ -64,6 +73,7 @@ Points::node_ptr make_simple_pctree() {"center_x", Array({(fa_float_t)0.5})}, {"center_y", Array({(fa_float_t)0.})}, {"center_z", Array({(fa_float_t)0.})}, + {"wpid", Array({(fa_int_t)WirePlaneId(WirePlaneLayer_t::kAllLayers).ident()})}, {"npoints", Array({(fa_int_t)10})}, {"slice_index_min", Array({(fa_int_t)0})}, {"slice_index_max", Array({(fa_int_t)1})}, @@ -93,6 +103,7 @@ Points::node_ptr make_simple_pctree() {"center_x", Array({(fa_float_t)1.5})}, {"center_y", Array({(fa_float_t)0.})}, {"center_z", Array({(fa_float_t)0.})}, + {"wpid", Array({(fa_int_t)WirePlaneId(WirePlaneLayer_t::kAllLayers).ident()})}, {"npoints", Array({(fa_int_t)10})}, {"slice_index_min", Array({(fa_int_t)0})}, {"slice_index_max", Array({(fa_int_t)1})}, @@ -151,7 +162,7 @@ TEST_CASE("clustering prototype point tree") } // name, coords, [depth] - Scope scope{ "3d", {"x","y","z"}}; + Scope scope_3d_raw{ "3d", {"x","y","z"}}; auto const& s3d = rval.scoped_view({ "3d", {"x","y","z"}}); auto const& pc3d = s3d.pcs(); @@ -186,7 +197,7 @@ TEST_CASE("clustering prototype point tree") debug("knn point {} at distance {} from query is in local point cloud {} at local point {}", index, metric, node_index, pin_index); const Dataset& pc = pc3d[node_index]; - for (const auto& name : scope.coords) { + for (const auto& name : scope_3d_raw.coords) { debug("\t{} = {}", name, pc.get(name)->element(pin_index)); } } @@ -218,12 +229,18 @@ TEST_CASE("clustering prototype facade") // (0.5 * 1 + 1.5 * 2) / 3 = 1.1666666666666665 debug("blob 0: q={}, r={}", blobs[0]->charge(), blobs[0]->center_x()); debug("blob 1: q={}, r={}", blobs[1]->charge(), blobs[1]->center_x()); + REQUIRE(blobs[0]->center_x() == 0.5); + REQUIRE(blobs[1]->center_x() == 1.5); + REQUIRE(blobs[0]->charge() == 1); + REQUIRE(blobs[1]->charge() == 2); + double expect = 0; expect += blobs[0]->charge() * blobs[0]->center_x(); expect += blobs[1]->charge() * blobs[1]->center_x(); expect /= blobs[0]->charge() + blobs[1]->charge(); debug("expect average pos {}", expect); - auto ave_pos = pcc.calc_ave_pos({1,0,0}, 1); + // there is now another calc_ave_pos(const geo_point_t& origin, int N) const; + auto ave_pos = pcc.calc_ave_pos({1,0,0}, 1.0); debug("ave_pos: {} | expecting (1.1666666666666665 0 0)", ave_pos); auto l1 = fabs(ave_pos[0] - 1.1666666666666665) + fabs(ave_pos[1]) + fabs(ave_pos[2]); CHECK(l1 < 1e-3); @@ -263,26 +280,26 @@ TEST_CASE("clustering prototype facade") } -static void print_MCUGraph(const MCUGraph& g) { - std::stringstream ss; - ss << "MCUGraph:" << std::endl; - ss << "Vertices: " << num_vertices(g) << std::endl; - ss << "Edges: " << num_edges(g) << std::endl; - ss << "Vertex Properties:" << std::endl; - auto vrange = boost::vertices(g); - for (auto vit = vrange.first; vit != vrange.second; ++vit) { - auto v = *vit; - ss << "Vertex " << v << ": Index = " << g[v].index << std::endl; - } - ss << "Edge Properties:" << std::endl; - auto erange = boost::edges(g); - auto weightMap = get(boost::edge_weight, g); - for (auto eit = erange.first; eit != erange.second; ++eit) { - auto e = *eit; - ss << "Edge " << e << ": Distance = " << get(weightMap, e) << std::endl; - } - debug(ss.str()); -} +// static void print_MCUGraph(const MCUGraph& g) { +// std::stringstream ss; +// ss << "MCUGraph:" << std::endl; +// ss << "Vertices: " << num_vertices(g) << std::endl; +// ss << "Edges: " << num_edges(g) << std::endl; +// ss << "Vertex Properties:" << std::endl; +// auto vrange = boost::vertices(g); +// for (auto vit = vrange.first; vit != vrange.second; ++vit) { +// auto v = *vit; +// ss << "Vertex " << v << ": Index = " << g[v].index << std::endl; +// } +// ss << "Edge Properties:" << std::endl; +// auto erange = boost::edges(g); +// auto weightMap = get(boost::edge_weight, g); +// for (auto eit = erange.first; eit != erange.second; ++eit) { +// auto e = *eit; +// ss << "Edge " << e << ": Distance = " << get(weightMap, e) << std::endl; +// } +// debug(ss.str()); +// } TEST_CASE("clustering prototype pca") { @@ -295,11 +312,11 @@ TEST_CASE("clustering prototype pca") REQUIRE(pccptr->grouping() == grouping); Cluster& pcc = *pccptr; - geo_point_t center = pcc.get_center(); + geo_point_t center = pcc.get_pca().center; debug("center: {} {} {}", center.x(), center.y(), center.z()); for (size_t ind=0; ind<3; ++ind) { - auto axis = pcc.get_pca_axis(ind); - auto val = pcc.get_pca_value(ind); + auto axis = pcc.get_pca().axis.at(ind); + auto val = pcc.get_pca().values.at(ind); debug("pca{}: {} {} {} {}", ind, axis.x(), axis.y(), axis.z(), val); } } @@ -384,20 +401,46 @@ TEST_CASE("clustering prototype Simple3DPointCloud") } -TEST_CASE("clustering prototype dijkstra_shortest_paths") -{ - Points::node_t root_node; - Grouping* grouping = root_node.value.facade(); - REQUIRE(grouping != nullptr); - root_node.insert(make_simple_pctree()); - Cluster* pccptr = grouping->children()[0]; - REQUIRE(pccptr != nullptr); - REQUIRE(pccptr->grouping() == grouping); - Cluster& pcc = *pccptr; - pcc.Create_graph(false); - print_MCUGraph(*pcc.get_graph()); - pcc.dijkstra_shortest_paths(5, false); -} +// static IPCTransformSet::pointer get_pcts() +// { +// PluginManager& pm = PluginManager::instance(); +// pm.add("WireCellClus"); + +// { +// auto icfg = Factory::lookup("DetectorVolumes"); +// auto cfg = icfg->default_configuration(); +// icfg->configure(cfg); +// } +// { +// auto icfg = Factory::lookup("PCTransformSet"); +// auto cfg = icfg->default_configuration(); +// icfg->configure(cfg); +// } +// { +// auto icfg = Factory::lookup("PCTransformSet"); +// auto cfg = icfg->default_configuration(); +// icfg->configure(cfg); +// } + +// return Factory::find_tn("PCTransformSet"); +// } + +// TEST_CASE("clustering prototype dijkstra_shortest_paths") +// { +// auto pcts = get_pcts(); + +// Points::node_t root_node; +// Grouping* grouping = root_node.value.facade(); +// REQUIRE(grouping != nullptr); +// root_node.insert(make_simple_pctree()); +// Cluster* pccptr = grouping->children()[0]; +// REQUIRE(pccptr != nullptr); +// REQUIRE(pccptr->grouping() == grouping); +// Cluster& pcc = *pccptr; +// pcc.Create_graph(pcts, false); +// print_MCUGraph(*pcc.get_graph()); +// pcc.dijkstra_shortest_paths(pcts, 5, false); +// } TEST_CASE("clustering prototype Facade separate") @@ -435,3 +478,204 @@ TEST_CASE("clustering prototype Facade separate") debug("after removal, grouping has {} children", grouping->nchildren()); REQUIRE(grouping->nchildren() == 1); } + + + +/** + This gives an example of how to do the following: + - add xc,yc,zc arrays representing "corrected coordinates" to blob-local "3d" PC. + - create a "filtered scoped view" on xc,yc,zc. + + - create an equivalent scoped view on x,y,z + + See also the test "point tree filtered scoped view" in util/test/doctest_pointtree.cxx + + */ +TEST_CASE("clustering prototype corrected coordinates") +{ + Points::node_t root_node; + root_node.insert(make_simple_pctree()); // cluster 1 + root_node.insert(make_simple_pctree()); // cluster 2 + + // We first get all blobs in a scoped view. + // + // There is more than one way to do this. Here, we rely on the coincidence + // that only blob nodes have a local PC named "3d". + Scope all_blobs_scope{"3d",{"x","y","z"}}; + auto& all_sv = root_node.value.scoped_view(all_blobs_scope); + + // Now add xc,yc,zc by making a "correction". The actual "correction" here + // bogus and just serves as an example. + for (auto* node : all_sv.nodes()) { + Dataset& pc3d = node->value.local_pc("3d"); + auto npoints = pc3d.size_major(); + REQUIRE(npoints); + + // Make copy as we will mutate. + auto xc = Array(*pc3d.get("x")); + auto yc = Array(*pc3d.get("y")); + auto zc = Array(*pc3d.get("z")); + auto xcv = xc.elements(); + auto ycv = yc.elements(); + auto zcv = zc.elements(); + + // Here we do a totally bogus "correction" just to make some different + // arrays. + for (size_t ind=0; ind eo_nodes(eoc_sv.nodes().begin(), eoc_sv.nodes().end()); + Scope everyother_orig_scope{"3d",{"x","y","z"},0, "everyother_orig"}; + auto& eoo_sv = root_node.value.scoped_view(everyother_orig_scope, + [&](const Points::node_t& node) { + return eo_nodes.find(&node) != eo_nodes.end(); + }); + REQUIRE(eoo_sv.nodes().size() == eoc_sv.nodes().size()); + + // We can now do a "k-d tree query" on the "corrected" coordinate PC and + // then use the returned point indices to refer to points in the "original" + // PC. + + // Get k-d trees for each SV. We will query kdc "corr" and index into kdo + // "orig" points. + + const auto& kdo = eoo_sv.kd(); // orig + const auto& kdc = eoc_sv.kd(); // corr + const auto& kdo_pts = kdo.points(); + const auto& kdc_pts = kdc.points(); + CHECK(kdo_pts.size() == kdc_pts.size()); + + // Do some random k-d tree query. + const std::vector origin = {0,0,0}; + auto kdc_nn = kdc.knn(1, origin); + CHECK(kdc_nn.size() == 1); + + for (const auto& [index, metric] : kdc_nn) { + debug("query=({},{},{}) orig=({},{},{}) corr=({},{},{}) metric={}", + origin[0], origin[1], origin[2], + kdo_pts[0][index], kdo_pts[1][index], kdo_pts[2][index], + kdc_pts[0][index], kdc_pts[1][index], kdc_pts[2][index], + metric); + + // We next show some ways that this data is all interrelated. + + // We can get the node that provided the point at the index: + auto* node = eoo_sv.node_with_point(index); + + // That is literally indexing the nodes by the "major index" of the + // point. Remember, there is the pair of (major,minor) indices + // corresponding to the "point" index. The "major" is simply the index + // of the node in the SV and the "minor" is the point in the local PC of + // that node. + REQUIRE(node == eoo_sv.nodes().at(kdo.major_index(index))); + + // Again, the minor index is the index of the point in the node's local + // PC. We can use it in the local PC of the node. + auto minor_index = kdo.minor_index(index); + const auto& pc3d = node->value.local_pc("3d"); + double xc = pc3d.get("xc")->element(minor_index); + REQUIRE(xc == kdc_pts[0][index]); // same! + + } + +} + + +TEST_CASE("haiwang") +{ + Points::node_t root_node; + root_node.insert(make_simple_pctree()); // cluster 1 + // root_node.insert(make_simple_pctree()); // cluster 2 + + Scope all_scope{"3d",{"x","y","z"}}; + auto& all_sv = root_node.value.scoped_view(all_scope); + debug("all_sv has {} nodes", all_sv.nodes().size()); + print_dds(all_sv.pcs()); + + Scope smallx_scope{"3d",{"x","y","z"}, 0, "smallx"}; + auto& smallx_sv = root_node.value.scoped_view(smallx_scope, + [&](const Points::node_t& node) { + debug("filtering node"); + const auto& lpcs = node.value.local_pcs(); + debug("filtering node with {} local pcs", lpcs.size()); + const auto& it = lpcs.find("3d"); + if (it == lpcs.end()) { + return false; + } + const auto& pc = it->second; + // const auto& x = pc.get("x"); + // const auto xv = x->elements(); + const auto& wpid = pc.get("wpid"); + const auto wpidv = wpid->elements(); + // for (auto val : xv) { + // debug("filtering x={}", val); + // if (val < 1.) { + // return true; + // } + // } + for (auto val : wpidv) { + debug("filtering wpid={}", val); + if (val < 11) { + debug("passing wpid={}", val); + return true; + } + } + return false; + } + ); + debug("smallx_sv has {} nodes", smallx_sv.nodes().size()); + debug("print_dds(smallx_sv.pcs());"); + print_dds(smallx_sv.pcs()); + auto smallx_fp = smallx_sv.flat_pc("3d"); + debug("print_ds(smallx_fp);"); + print_ds(smallx_fp); + auto smallx_fc = smallx_sv.flat_coords(); + debug("print_ds(smallx_fc);"); + print_ds(smallx_fc); + + { + WireCell::WirePlaneId wpid{-1}; + debug("wpid: wpid.ident() {} wpid.name() {} ok? {} valid? {}", wpid.ident(), wpid.name(), wpid.valid()? true : false, wpid.valid()); + } + + { + WireCell::WirePlaneId wpid{0}; + debug("wpid: wpid.ident() {} wpid.name() {} ok? {} valid? {}", wpid.ident(), wpid.name(), wpid.valid()? true : false, wpid.valid()); + } +} diff --git a/clus/test/doctest_facades.cxx b/clus/test/doctest_facades.cxx index a17edd91e..d90f2627e 100644 --- a/clus/test/doctest_facades.cxx +++ b/clus/test/doctest_facades.cxx @@ -1,12 +1,11 @@ #include "WireCellUtil/doctest.h" #include "WireCellUtil/Logging.h" -#include "WireCellUtil/Logging.h" #include "WireCellClus/Facade_Cluster.h" using namespace WireCell; using namespace WireCell::PointCloud::Tree; -using namespace WireCell::PointCloud::Facade; +using namespace WireCell::Clus::Facade; using spdlog::debug; TEST_CASE("clustering facade scalar") @@ -24,3 +23,33 @@ TEST_CASE("clustering facade scalar") CHECK(cid == 42); // debug("no1={} no2={} cide={}", no1, no2, cid); } +TEST_CASE("clustering facade graphs") +{ + Points::node_t node; + Cluster* cluster = node.value.facade(); + + CHECK(cluster->graph_store().size() == 0); + + auto& gr1 = cluster->make_graph("test1"); + CHECK(cluster->graph_store().size() == 1); + CHECK(boost::num_vertices(gr1) == 0); + + auto& gr2 = cluster->make_graph("test2", 10); + CHECK(cluster->graph_store().size() == 2); + CHECK(boost::num_vertices(gr2) == 10); + + auto gr = cluster->take_graph("test2"); + CHECK(cluster->graph_store().size() == 1); + CHECK(boost::num_vertices(gr) == 10); + + cluster->give_graph("test1", std::move(gr)); + // gr at this point is in some undefined state + auto& gr3 = cluster->get_graph("test1"); + CHECK(cluster->graph_store().size() == 1); + CHECK(boost::num_vertices(gr3) == 10); + + auto& gr4 = cluster->get_graph("new one"); + CHECK(cluster->graph_store().size() == 2); + CHECK(boost::num_vertices(gr4) == 0); + +} diff --git a/clus/test/doctest_graphs.cxx b/clus/test/doctest_graphs.cxx new file mode 100644 index 000000000..eabc29fbb --- /dev/null +++ b/clus/test/doctest_graphs.cxx @@ -0,0 +1,124 @@ +#include "WireCellClus/Graphs.h" +#include "WireCellUtil/GraphTools.h" + +#include "WireCellUtil/doctest.h" +#include "WireCellUtil/Logging.h" + +#include +#include + +using namespace WireCell::Clus::Graphs::Weighted; +using WireCell::GraphTools::edge_range; +using WireCell::GraphTools::vertex_range; +using spdlog::debug; + +// static +// filtered_graph_type reduce(const graph_type& graph, const vertex_set& vertices, bool accept) +// { +// auto filter = [&](vertex_type vtx) { +// return accept == (vertices.find(vtx) != vertices.end()); +// }; +// return filtered_graph_type(graph, boost::keep_all(), filter); +// } + +static +filtered_graph_type reduce_edges(const graph_type& graph, const edge_set& edges, bool accept=true); +static +filtered_graph_type reduce_edges(const graph_type& graph, const edge_set& edges, bool accept) +{ + auto filter = [&](edge_type edge) { + debug("check edge: {} -- {}", boost::source(edge, graph), boost::target(edge, graph)); + return accept == (edges.count(edge) > 0); + }; + edge_predicate epred = filter; + vertex_predicate vpred = boost::keep_all(); + return filtered_graph_type(graph, epred, vpred); +} + +// static +// filtered_graph_type weight_threshold(const graph_type& graph, double threshold, bool accept) +// { +// auto weight_map = get(boost::edge_weight, graph); +// auto filter = [&](edge_type edge) { +// return accept == (get(weight_map, edge) >= threshold); +// }; +// return filtered_graph_type(graph, filter, boost::keep_all()); +// } + + +TEST_CASE("clus graphs") +{ + graph_type graph(3); + REQUIRE(boost::num_vertices(graph) == 3); + + auto [e0,ok0] = boost::add_edge(0, 1, graph); + REQUIRE(ok0); + auto [e1,ok1] = boost::add_edge(0, 2, graph); + REQUIRE(ok1); + + REQUIRE (boost::num_edges(graph) == 2); + for (const auto& edge : edge_range(graph)) { + debug("made edge: {} -- {}", boost::source(edge, graph), boost::target(edge, graph)); + } + + edge_set to_filter = {e0}; + + auto filtered_with_e0 = reduce_edges(graph, to_filter); + // CHECK (boost::num_edges(filtered_with_e0) == 1); + // Gemini lied to me, num_edges returns original number, not filtered! + CHECK (boost::num_edges(filtered_with_e0) == 2); + for (const auto& edge : edge_range(filtered_with_e0)) { + debug("have edge: {} -- {}", boost::source(edge, filtered_with_e0), boost::target(edge, filtered_with_e0)); + } + CHECK (boost::num_edges(filtered_with_e0) == 2); + auto filtered_without_e0 = reduce_edges(graph, to_filter, false); + CHECK (boost::num_edges(filtered_without_e0) == 2); +} + +void dump_graph(const std::string& filename, const graph_type& graph) +{ + debug("dumping graph to graphviz dot file: {}", filename); + std::ofstream fp(filename); + boost::write_graphviz(fp, graph, + boost::make_label_writer(boost::get(boost::vertex_index, graph)), + boost::make_label_writer(boost::get(boost::edge_weight, graph))); +} + +TEST_CASE("clus graphs voronoi") +{ + graph_type graph(10); + boost::add_edge(0, 1, 1.0, graph); + boost::add_edge(0, 2, 2.0, graph); + boost::add_edge(1, 2, 1.0, graph); + boost::add_edge(0, 3, 0.1, graph); + boost::add_edge(1, 4, 0.1, graph); + boost::add_edge(2, 5, 0.1, graph); + boost::add_edge(5, 6, 0.2, graph); + boost::add_edge(6, 7, 0.2, graph); + boost::add_edge(7, 8, 0.2, graph); + boost::add_edge(8, 9, 0.2, graph); // 8-2 is 0.7 + boost::add_edge(9, 0, 0.8, graph); // 9-2 is 0.9 + dump_graph("doctest-graphs-voronoi-graph.dot", graph); + + + std::vector terminals = {0,1,2}; + auto vor = voronoi(graph, terminals); + for (auto vtx : vertex_range(graph)) { + auto path = terminal_path(graph, vor, vtx); + std::stringstream ss; + for (auto p : path) { + ss << " " << p; + } + debug("{}: terminal={} distance={} last_edge=({} -> {}), path:{}", + vtx, + vor.terminal[vtx], + vor.distance[vtx], + boost::source(vor.last_edge[vtx], graph), + boost::target(vor.last_edge[vtx], graph), + ss.str() + ); + } + auto sg = steiner_graph(graph, vor); + dump_graph("doctest-graphs-steiner-graph.dot", sg); + +} diff --git a/clus/test/doctest_prsegment.cxx b/clus/test/doctest_prsegment.cxx new file mode 100644 index 000000000..628720b00 --- /dev/null +++ b/clus/test/doctest_prsegment.cxx @@ -0,0 +1,17 @@ +#include "WireCellClus/PRSegment.h" + +#include "WireCellUtil/Logging.h" + +#include "WireCellUtil/doctest.h" + +#include + +using namespace WireCell; +using namespace WireCell::Clus; + +TEST_CASE("clus pr segment") { + PR::Segment seg; + + REQUIRE(! seg.descriptor_valid()); + +} diff --git a/clus/test/doctest_prvertex.cxx b/clus/test/doctest_prvertex.cxx new file mode 100644 index 000000000..746f38e64 --- /dev/null +++ b/clus/test/doctest_prvertex.cxx @@ -0,0 +1,19 @@ +#include "WireCellClus/PRVertex.h" + +#include "WireCellUtil/Logging.h" + +#include "WireCellUtil/doctest.h" + +#include + +using namespace WireCell; +using namespace WireCell::Clus; + +TEST_CASE("clus pr vertex") { + PR::Vertex vtx; + + REQUIRE(! vtx.fit().valid()); + + REQUIRE(! vtx.descriptor_valid()); + +} diff --git a/clus/test/test-porting.bats b/clus/test/test-porting.bats new file mode 100644 index 000000000..fe5f8f7b4 --- /dev/null +++ b/clus/test/test-porting.bats @@ -0,0 +1,166 @@ +#!/usr/bin/env bats + +# This is derived from various parts of: + +base_url="https://github.com/HaiwangYu/wcp-porting-img" +raw_url="$base_url/raw/refs/heads/main" + +bats_load_library wct-bats.sh + +# Process a log from one of the tests into a smaller "digest" log and check if +# it changed if a historical version exists. +do_log_digest () { + local log="$1"; shift # the log file name + debug "Checking log: $log" + test -s "$log" + + local ign="${1:-downloads}" + local dig="${log%.log}.dig" + + sort $log | grep ' D ' | grep '\[' | egrep -v "$ign" | while read line + do + printf "%s\n" "${line:15}" + done > $dig + + debug "Checking log digest: $dig" + test -s $dig + + old_dig=$(historical_files --allow-missing --last $(current-test-name)/$dig) + if [ -n "$old_dig" ] ; then + diff $dig $old_dig + fi + saveout -c history $dig +} + + +do_prep () { + name="$1" + cd_tmp file + mkdir -p $name + cd $name +} + +do_qlport_like () { + local name=$1 + do_prep $name + + local url="$raw_url/qlport/rootfiles/nuselEval_5384_130_6501.root" + + local dat="$(download_file "$url")" + local cfg="$(relative_path test-porting/$name/main.jsonnet)" + local bee="$name.zip" + local log="$name.log" + local dig="$name.dig" + local dag="$name.pdf" + + test -n "$dat" + + run_idempotently -s "$cfg" -t "$dag" -- \ + wirecell-pgraph dotify $cfg $dag \ + -A kind=both -A "infiles=$dat" -A "beezip=$bee" + + + run_idempotently -s "$cfg" -s "$dat" -t "$bee" -t "$log" -- \ + bash -c "wire-cell -l stderr -L debug \ + -A kind=both -A infiles=$dat -A beezip=$bee $cfg > $log 2>&1" + do_log_digest $log $dat + + for zip in $name.zip + do + file_larger_than $zip 22 + done +} + +@test "porting qlport" { + do_qlport_like "qlport" +} + +@test "porting steiner" { + do_qlport_like "steiner" +} + +@test "porting stm" { + do_qlport_like "stm" +} + + +@test "porting pdhd" { + + local name=pdhd + do_prep $name + + local gitref=b5f7f21e1ca853e29d746ae9044ac79c885956b0 + local indir=$(download_git_subdir --ref $gitref "$base_url" pdhd/1event) + debug "PDHD input from $indir" + test -n "$indir" + test -d "$indir" + test -s "$indir/clusters-apa-apa0.tar.gz" + + local cfg="$(relative_path test-porting/$name/main.jsonnet)" + local bee="mabc-all-apa.zip" + local log="$name.log" + local dig="$name.dig" + + run_idempotently -s "$cfg" -s "$dat" -t "$bee" -t "$log" -- \ + bash -c "wire-cell -l stderr -L debug \ + -A input=$indir $cfg > $log 2>&1" + + # Note, "empty" zips have finite size, but none should 0 bytes. + for zip in *.zip ; do + test -s $zip + done + + for zip in mabc-all-apa.zip mabc-apa0-face0.zip mabc-apa1-face1.zip mabc-apa2-face0.zip mabc-apa3-face1.zip + do + file_larger_than $zip 22 + done + + do_log_digest $log $indir +} + +@test "porting fgval" { + + local name=fgval + do_prep $name + + local run=5384 + local sub=130 + local evt=6501 + + local url="$raw_url/$name/result_${run}_${sub}_${evt}.root" + local cfg1="$(relative_path test-porting/$name/stage1.jsonnet)" + + local dat="$(download_file "$url")" + test -s "$dat" + debug "Input file: $dat" + + for what in live dead ; do + mkdir -p $what + + local out="$what/clusters.npz" + local log="$what/clusters.log" + + run_idempotently -s "$cfg1" -s "$dat" -t "$out" -t "$log" -- \ + wire-cell -l "$log" -L debug -A iname="$dat" -A oname="$out" -A kind="$what" "$cfg1" + do_log_digest "$log" + done + + local cfg2="$(relative_path test-porting/$name/stage2.jsonnet)" + local log="clustering.log" + local bee="clustering.zip" + local out="tensor-apa-uboone.tar.gz" + + run_idempotently -s "$cfg2" -s "live/clusters.npz" -s "dead/clusters.npz" -t "$bee" -t "$log" -t "$out" -- \ + bash -c "wire-cell -l stderr -L debug \ + -A active_clusters=live/clusters.npz \ + -A masked_clusters=dead/clusters.npz \ + -A bee_zip=$bee \ + -A initial_index=0 \ + -A initial_runNo=$run \ + -A initial_subRunNo=$sub \ + -A initial_eventNo=$evt \ + $cfg2 > $log 2>&1" + do_log_digest "$log" npz + + +} diff --git a/clus/test/test-porting/fgval/stage1.jsonnet b/clus/test/test-porting/fgval/stage1.jsonnet new file mode 100644 index 000000000..9c250f996 --- /dev/null +++ b/clus/test/test-porting/fgval/stage1.jsonnet @@ -0,0 +1,170 @@ +// This loads the Uboone ROOT file with Trun, TC and TDC TTrees to produce +// "live" and "dead" blob sets with two UbooneBlobSource nodes. It then runs a +// version of wct-uboone-img.jsonnet which can the be followed with +// wct-uboone-clustering from https://github.com/HaiwangYu/wcp-porting-img/. + +local high = import "layers/high.jsonnet"; +local wc = high.wc; +local pg = high.pg; +local detector = "uboone"; +local params = high.params(detector); +local mid = high.api(detector, params); +local anodes = mid.anodes(); +local anode = anodes[0]; + +// live/dead symmetries +local UbooneBlobSource(fname, kind /*live or dead*/, views /* uvw, uv, vw, wu */) = pg.pnode({ + type: 'UbooneBlobSource', + name: kind+'-'+views, + data: { + input: fname, + anode: wc.tn(anode), + kind: kind, + views: views, + } +}, nin=0, nout=1, uses=[anode]); +local BlobClustering(name) = pg.pnode({ + type: 'BlobClustering', + name: name, + data: { + policy: "uboone", + }, +}, nin=1, nout=1); +local ClusterFileSink(fname) = pg.pnode({ + type: 'ClusterFileSink', + name: fname, + data: { + format: "numpy", + outname: fname, + }, +}, nin=1, nout=0); + + +// generators of the live pipeline elements +local ProjectionDeghosting(name) = pg.pnode({ + type: 'ProjectionDeghosting', + name: name, + data: {}, +}, nin=1, nout=1); +local InSliceDeghosting(name, round /*1,2,3*/) = pg.pnode({ + type: "InSliceDeghosting", + name: name, + data: { + config_round: round, + } +}, nin=1, nout=1); +local BlobGrouping(name) = pg.pnode({ + type: "BlobGrouping", + name: name, + data: { } +}, nin=1, nout=1); +local ChargeSolving(name, weighting /* uniform, uboone */) = pg.pnode({ + type: "ChargeSolving", + name: name, + data: { + weighting_strategies: [weighting], + } +}, nin=1, nout=1); +local LocalGeomClustering(name) = pg.pnode({ + type: "LocalGeomClustering", + name: name, + data: { }, +}, nin=1, nout=1); +local GlobalGeomClustering(name, policy="uboone") = pg.pnode({ + type: "GlobalGeomClustering", + name: name, + data: { + clustering_policy: policy, + }, +}, nin=1, nout=1); + +local multi_source = function(iname, kind, views) + local nviews = std.length(views); + local srcs = [ UbooneBlobSource(iname, kind, view), for view in views ]; + local bsm = pg.pnode({ + type: "BlobSetMerge", + name: kind, + data: { multiplicity: nviews, }, + }, nin=4, nout=1); + pg.intern(innodes = srcs, outnodes=[bsm], + edges = [ pg.edge(srcs[ind], bsm, 0, ind), + for ind in std.range(0, nviews-1) ]); + + +local live_sampler = { + type: "BlobSampler", + name: "live", + data: { + time_offset: -1600 * wc.us, + drift_speed: 1.101 * wc.mm / wc.us, + strategy: [ + "center", + "stepped", + ], + }}; +local dead_sampler = { + type: "BlobSampler", + name: "dead", + data: { + strategy: [ + "center", + ], + }}; +local BeeBlobSink(fname, sampler) = pg.pnode({ + type: "BeeBlobSink", + name: fname, + data: { + geom: "uboone", + type: "wcp", + outname: fname, + samplers: wc.tn(sampler) + }, +}, nin=1, nout=0, uses=[sampler]); +local BeeBlobTap = function(fname) + local sink = BeeBlobSink(fname); + local fan = pg.pnode({ + type:'BlobSetFanout', + name:fname, + data: { multiplicity: 2 }, + }, nin=1, nout=2); + pg.intern(innodes=[fan], centernodes=[sink], + edges=[ pg.edge(fan, sink, 1, 0) ]); + + +local live(iname, oname) = pg.pipeline([ + multi_source(iname, "live", ["uvw","uv","vw","wu"]), + // BeeBlobSink(oname, live_sampler), + + // BeeBlobTap("live.zip"), + + BlobClustering("live"), + // BlobGrouping("0"), + + // "standard": + // ProjectionDeghosting("1"), + // BlobGrouping("1"), ChargeSolving("1a","uniform"), LocalGeomClustering("1"), ChargeSolving("1b","uboone"), + // InSliceDeghosting("1",1), + // ProjectionDeghosting("2"), + // BlobGrouping("2"), ChargeSolving("2a","uniform"), LocalGeomClustering("2"), ChargeSolving("2b","uboone"), + // InSliceDeghosting("2",2), + // BlobGrouping("3"), ChargeSolving("3a","uniform"), LocalGeomClustering("3"), ChargeSolving("3b","uboone"), + // InSliceDeghosting("3",3), + GlobalGeomClustering(""), + ClusterFileSink(oname), +]); + + +local dead(iname, oname) = pg.pipeline([ + multi_source(iname, "dead", ["uv","vw","wu"]), + BlobClustering("dead"), + GlobalGeomClustering("", "dead_clus"), //uboone, simple, dead_clus + ClusterFileSink(oname), +]); + +local extra_plugins = ["WireCellRoot","WireCellClus"]; + +function(iname, oname, kind /*live or dead*/) + if kind == "live" + then high.main(live(iname, oname), "Pgrapher", extra_plugins) + else high.main(dead(iname, oname), "Pgrapher", extra_plugins) + diff --git a/clus/test/test-porting/fgval/stage2.jsonnet b/clus/test/test-porting/fgval/stage2.jsonnet new file mode 100644 index 000000000..b8cad5ffb --- /dev/null +++ b/clus/test/test-porting/fgval/stage2.jsonnet @@ -0,0 +1,244 @@ +local wc = import "wirecell.jsonnet"; +local g = import "pgraph.jsonnet"; +local f = import 'pgrapher/common/funcs.jsonnet'; +local params = import "pgrapher/experiment/uboone/simparams.jsonnet"; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local tools = tools_maker(params); +local anodes = tools.anodes; +local clus = import "pgrapher/common/clus.jsonnet"; + +local cluster_source(fname) = g.pnode({ + type: "ClusterFileSource", + name: fname, + data: { + inname: fname, + anodes: [wc.tn(a) for a in anodes], + } +}, nin=0, nout=1, uses=anodes); + +function ( + active_clusters = "active-clusters-anode0.npz", + masked_clusters = "masked-clusters-anode0.npz", + output = "tensor-apa-uboone.tar.gz", + bee_dir = "data", + bee_zip = "mabc.zip", + initial_index = "0", + initial_runNo = "1", + initial_subRunNo = "1", + initial_eventNo = "1") + + local index = std.parseInt(initial_index); + + local LrunNo = std.parseInt(initial_runNo); + local LsubRunNo = std.parseInt(initial_subRunNo); + local LeventNo = std.parseInt(initial_eventNo); + + local active = cluster_source(active_clusters); + local masked = cluster_source(masked_clusters); + + // Note, the "sampler" must be unique to the "sampling". + local bs_live = { + type: "BlobSampler", + name: "bs_live", + data: { + drift_speed: 1.101 * wc.mm / wc.us, + time_offset: -1600 * wc.us + 6 * wc.mm/self.drift_speed, + strategy: [ + // "center", + // "corner", + // "edge", + // "bounds", + "stepped", + // {name:"grid", step:1, planes:[0,1]}, + // {name:"grid", step:1, planes:[1,2]}, + // {name:"grid", step:1, planes:[2,0]}, + // {name:"grid", step:2, planes:[0,1]}, + // {name:"grid", step:2, planes:[1,2]}, + // {name:"grid", step:2, planes:[2,0]}, + ], + // extra: [".*"] // want all the extra + extra: [".*wire_index", "wpid"] // + // extra: [] // + }}; + local bs_dead = { + type: "BlobSampler", + name: "bs_dead", + data: { + strategy: [ + "center", + ], + extra: [".*"] // want all the extra + }}; + + local detector_volumes = + { + type: "DetectorVolumes", + name: "", + data: { + anodes: [wc.tn(a) for a in anodes], + metadata: + {overall: { + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_ymin: -99.5 * wc.cm, + FV_ymax: 101.5 * wc.cm, + FV_zmin: 15 * wc.cm, + FV_zmax: 1022 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + FV_ymin_margin: 2.5 * wc.cm, + FV_ymax_margin: 2.5 * wc.cm, + FV_zmin_margin: 3 * wc.cm, + FV_zmax_margin: 3 * wc.cm, + vertical_dir: [0,1,0], + beam_dir: [0,0,1] + }} + + { + [ "a" + std.toString(a.data.ident) + "f0pA" ]: { + drift_speed: 1.101 * wc.mm / wc.us, + tick: 0.5 * wc.us, // 0.5 mm per tick + tick_drift: self.drift_speed * self.tick, + time_offset: -1600 * wc.us + 6 * wc.mm/self.drift_speed, + nticks_live_slice: 4, + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + } for a in anodes + } + }, + }; + + local pctransforms = { + type: "PCTransformSet", + name: "", + data: { detector_volumes: wc.tn(detector_volumes) }, + uses: [detector_volumes] + }; + + + local ptb = g.pnode({ + type: "PointTreeBuilding", + name: "", + data: { + samplers: { + "3d": wc.tn(bs_live), + "dead": wc.tn(bs_dead), + }, + multiplicity: 2, + tags: ["live", "dead"], + anode: wc.tn(anodes[0]), + face: 0, + detector_volumes: wc.tn(detector_volumes), + } + }, nin=2, nout=1, uses=[bs_live, bs_dead, detector_volumes]); + + local front_end = g.intern( + innodes = [active, masked], + outnodes = [ptb], + edges = [ + g.edge(active, ptb, 0, 0), + g.edge(masked, ptb, 0, 1), + ], + name = "front-end"); + + local common_coords = ["x_t0cor", "y", "z"]; + // local common_coords = ["x", "y", "z"]; + + // nominal + local cm = clus.clustering_methods(detector_volumes=detector_volumes, + pc_transforms=pctransforms); + // alternative with a scope built from common_coords. + local cm_com = clus.clustering_methods(detector_volumes=detector_volumes, + pc_transforms=pctransforms, + coords=common_coords); + local mabc_clustering = [ + + // cm.test(), + // cm.ctpointcloud(), + cm.switch_scope(), + cm_com.live_dead(), + cm_com.extend(flag=4, length_cut=60*wc.cm, num_try=0, length_2_cut=15*wc.cm, num_dead_try=1), + cm_com.regular("one", length_cut=60*wc.cm, flag_enable_extend=false), + cm_com.regular("two", length_cut=30*wc.cm, flag_enable_extend=true), + cm_com.parallel_prolong(length_cut=35*wc.cm), + cm_com.close(length_cut=1.2*wc.cm), + cm_com.extend_loop(num_try=3), + cm_com.separate(), + cm_com.connect1(), + cm_com.deghost(), + cm_com.examine_x_boundary(), + cm_com.protect_overclustering(), + cm_com.neutrino(), + cm_com.isolated(), + ]; + + + local mabc = g.pnode({ + type: "MultiAlgBlobClustering", + name: "", + data: { + inpath: "pointtrees/%d", + outpath: "pointtrees/%d", + perf: true, + bee_dir: bee_dir, // "data/0/0", + bee_zip: bee_zip, + initial_index: index, // New RSE configuration + use_config_rse: true, // Enable use of configured RSE + runNo: LrunNo, + subRunNo: LsubRunNo, + eventNo: LeventNo, + save_deadarea: true, + anodes: [wc.tn(a) for a in anodes], + detector_volumes: wc.tn(detector_volumes), + bee_points_sets: [ // New configuration for multiple bee points sets + { + name: "img", // Name of the bee points set + detector: "uboone", // Detector name + algorithm: "img", // Algorithm identifier + pcname: "3d", // Which scope to use + coords: ["x", "y", "z"], // Coordinates to use + individual: false // Whether to output as a whole or individual APA/Face + }, + { + name: "clustering", // Name of the bee points set + detector: "uboone", // Detector name + algorithm: "clustering", // Algorithm identifier + pcname: "3d", // Which scope to use + coords: ["x_t0cor", "y", "z"], // Coordinates to use + individual: true // Output individual APA/Face + } + ], + pipeline: [wc.tn(cmeth) for cmeth in mabc_clustering], + cluster_id_order: "size", // or "tree" for insertion order or nothing for no rewriting + } + }, nin=1, nout=1, uses=[detector_volumes, pctransforms]+mabc_clustering); + + local sink = g.pnode({ + type: "TensorFileSink", + name: output, + data: { + outname: output, + prefix: "clustering_", // json, numpy, dummy + dump_mode: true, + } + }, nin=1, nout=0); + + local graph = g.pipeline([front_end, mabc, sink]); + + local app = { + type: 'Pgrapher', //Pgrapher, TbbFlow + data: { + edges: g.edges(graph), + }, + }; + local cmdline = { + type: "wire-cell", + data: { + plugins: ["WireCellGen", "WireCellPgraph", /*"WireCellTbb",*/ + "WireCellSio", "WireCellSigProc", "WireCellRoot", "WireCellImg", "WireCellClus"], + apps: [wc.tn(app)] + }, + }; + + [cmdline] + g.uses(graph) + [app] diff --git a/clus/test/test-porting/pdhd/clus.jsonnet b/clus/test/test-porting/pdhd/clus.jsonnet new file mode 100644 index 000000000..be4b4810b --- /dev/null +++ b/clus/test/test-porting/pdhd/clus.jsonnet @@ -0,0 +1,487 @@ +local wc = import "wirecell.jsonnet"; +local g = import "pgraph.jsonnet"; +local f = import 'pgrapher/common/funcs.jsonnet'; +local clus = import "pgrapher/common/clus.jsonnet"; + + +local time_offset = -250 * wc.us; +local drift_speed = 1.6 * wc.mm / wc.us; +local bee_dir = "data"; +local bee_zip = "mabc.zip"; + +local initial_index = "0"; +local initial_runNo = "1"; +local initial_subRunNo = "1"; +local initial_eventNo = "1"; +local index = std.parseInt(initial_index); +local LrunNo = std.parseInt(initial_runNo); +local LsubRunNo = std.parseInt(initial_subRunNo); +local LeventNo = std.parseInt(initial_eventNo); + + +local common_coords = ["x", "y", "z"]; +local common_corr_coords = ["x_t0cor", "y", "z"]; + + +local dvm = { + overall: { + FV_xmin: -3579.85 * wc.mm, + FV_xmax: 3579.85 * wc.mm, + FV_ymin: 76.1 * wc.mm, + FV_ymax: 6060.0 * wc.mm, + FV_zmin: 2.34345 * wc.mm, + FV_zmax: 4622.97 * wc.mm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + FV_ymin_margin: 2.5 * wc.cm, + FV_ymax_margin: 2.5 * wc.cm, + FV_zmin_margin: 3 * wc.cm, + FV_zmax_margin: 3 * wc.cm, + vertical_dir: [0,1,0], + beam_dir: [0,0,1] + }, + a0f0pA: { + drift_speed: drift_speed, + tick: 0.5 * wc.us, // 0.5 mm per tick + tick_drift: self.drift_speed * self.tick, + time_offset: time_offset, + nticks_live_slice: 4, + FV_xmin: -3579.85 * wc.mm, + FV_xmax: -25.4 * wc.mm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + }, + a0f1pA: $.a0f0pA + { + FV_xmin: -3579.85 * wc.mm, + FV_xmax: -3579.85 * wc.mm, + }, + a1f0pA: $.a0f0pA + { + FV_xmin: 3579.85 * wc.mm, + FV_xmax: 3579.85 * wc.mm, + }, + a1f1pA: $.a0f0pA + { + FV_xmin: 25.4 * wc.mm, + FV_xmax: 3579.85 * wc.mm, + }, + a2f0pA: $.a0f0pA, + a2f1pA: $.a0f1pA, + a3f0pA: $.a1f0pA, + a3f1pA: $.a1f1pA, +}; + +local anodes_name(anodes, face="") = + std.join("-", [std.toString(a.data.ident) for a in anodes]) + if face == "" then "" else "-" + std.toString(face); + + +local detector_volumes(anodes, face="") = { + "type": "DetectorVolumes", + "name": "dv-apa" + anodes_name(anodes, face), + "data": { + "anodes": [wc.tn(anode) for anode in anodes], + metadata: + {overall: dvm["overall"]} + + { + [ "a" + std.toString(a.data.ident) + "f0pA" ]: + dvm[ "a" + std.toString(a.data.ident) + "f0pA" ] + for a in anodes + } + + { + [ "a" + std.toString(a.data.ident) + "f1pA" ]: + dvm[ "a" + std.toString(a.data.ident) + "f1pA" ] + for a in anodes + } + }, + uses: anodes +}; + + +local pctransforms(dv) = { + type: "PCTransformSet", + name: dv.name, + data: { detector_volumes: wc.tn(dv) }, + uses: [dv] +}; + + + +local bs_live_face(apa, face) = { + type: "BlobSampler", + name: "live-%s-%d"%[apa, face], + data: { + drift_speed: drift_speed, + time_offset: time_offset, + strategy: ["stepped"], + extra: [".*wire_index", "wpid"] + } +}; +local bs_dead_face(apa, face) = { + type: "BlobSampler", + name: "dead-%s-%d"%[apa, face], + data: { + strategy: ["center"], + extra: [".*"] // want all the extra + } +}; +// The factory used to give blob samplers to ClusteringRetile ("rt"). +local bs_rt_face = bs_live_face; + + +local clus_per_face ( + anode, + face, + dump = true, + ) = +{ + + local dv = detector_volumes([anode], face), + local pcts = pctransforms(dv), + + + local cluster_scope_filter_live = g.pnode({ + type: "ClusterScopeFilter", + name: "csf-live-%s-%d"%[anode.name, face], + data: { + face_index: face, + } + }, nin=1, nout=1, uses=[]), + + local cluster_scope_filter_dead = g.pnode({ + type: "ClusterScopeFilter", + name: "csf-dead-%s-%d"%[anode.name, face], + data: { + face_index: face, + } + }, nin=1, nout=1, uses=[]), + + local bsl = bs_live_face(anode.name, face), + local bsd = bs_dead_face(anode.name, face), + + local ptb = g.pnode({ + type: "PointTreeBuilding", + name: "%s-%d"%[anode.name, face], + data: { + samplers: { + "3d": wc.tn(bsl), + "dead": wc.tn(bsd), + }, + multiplicity: 2, + tags: ["live", "dead"], + anode: wc.tn(anode), + face: face, + detector_volumes: wc.tn(dv), + } + }, nin=2, nout=1, uses=[bsl, bsd, dv]), + + local cluster2pct = g.intern( + innodes = [cluster_scope_filter_live, cluster_scope_filter_dead], + centernodes = [], + outnodes = [ptb], + edges = [ + g.edge(cluster_scope_filter_live, ptb, 0, 0), + g.edge(cluster_scope_filter_dead, ptb, 0, 1) + ] + ), + // local cluster2pct = ptb, + + local face_name = "%s-%d"%[anode.name, face], + + local cm = clus.clustering_methods(prefix=face_name, + detector_volumes=dv, + pc_transforms=pcts, + coords=common_coords), + local cm_pipeline = [ + cm.pointed(), + // cm.ctpointcloud(), + cm.live_dead(dead_live_overlap_offset=2), + cm.extend(flag=4, length_cut=60*wc.cm, num_try=0, length_2_cut=15*wc.cm, num_dead_try=1), + cm.regular(name="-one", length_cut=60*wc.cm, flag_enable_extend=false), + cm.regular(name="_two", length_cut=30*wc.cm, flag_enable_extend=true), + cm.parallel_prolong(length_cut=35*wc.cm), + cm.close(length_cut=1.2*wc.cm), + cm.extend_loop(num_try=3), + cm.separate(use_ctpc=true), + cm.connect1(), + // cm.isolated(), + ], + + local mabc = g.pnode({ + local name = "%s-%d"%[anode.name, face], + type: "MultiAlgBlobClustering", + name: name, + data: { + inpath: "pointtrees/%d", + outpath: "pointtrees/%d", + // grouping2file_prefix: "grouping%s-%d"%[anode.name, face], + perf: true, + bee_dir: bee_dir, // "data/0/0", // not used + bee_zip: "mabc-%s-face%d.zip"%[anode.name, face], + bee_detector: "sbnd", + initial_index: index, // New RSE configuration + use_config_rse: true, // Enable use of configured RSE + runNo: LrunNo, + subRunNo: LsubRunNo, + eventNo: LeventNo, + save_deadarea: true, + anodes: [wc.tn(anode)], + face: face, + detector_volumes: wc.tn(dv), + bee_points_sets: [ // New configuration for multiple bee points sets + { + name: "clustering", // Name of the bee points set + detector: "protodunehd", // Detector name + algorithm: "clustering", // Algorithm identifier + pcname: "3d", // Which scope to use + coords: ["x", "y", "z"], // Coordinates to use + individual: true // Output individual APA/Face + } + ], + pipeline: wc.tns(cm_pipeline), + } + }, nin=1, nout=1, uses=[dv, anode, pcts]+cm_pipeline), + + local sink = g.pnode({ + type: "TensorFileSink", + name: "clus_per_face-%s-%d"%[anode.name, face], + data: { + outname: "trash-%s-face%d.tar.gz"%[anode.name, face], + prefix: "clustering_", // json, numpy, dummy + dump_mode: true, + } + }, nin=1, nout=0), + + local end = if dump + then g.pipeline([mabc, sink]) + else g.pipeline([mabc]), + + ret :: g.pipeline([cluster2pct, end], "clus_per_face-%s-%d"%[anode.name, face]) +}.ret; + +local clus_per_apa ( + anode, + dump = true, + ) = +{ + local cfout_live = g.pnode({ + type:'ClusterFanout', + name: 'clus_per_apa-cfout_live-%s'%anode.name, + data: { + multiplicity: 2 + }}, nin=1, nout=2), + + local cfout_dead = g.pnode({ + type:'ClusterFanout', + name: 'clus_per_apa-cfout_dead-%s'%anode.name, + data: { + multiplicity: 2 + }}, nin=1, nout=2), + + local per_face_pipes = [ + clus_per_face(anode, face=0, dump=false), + clus_per_face(anode, face=1, dump=false), + ], + + local pcmerging = g.pnode({ + type: "PointTreeMerging", + name: "%s"%[anode.name], + data: { + multiplicity: 2, + inpath: "pointtrees/%d", + outpath: "pointtrees/%d", + } + }, nin=2, nout=1), + + local dv = detector_volumes([anode]), + local pcts = pctransforms(dv), + + local cm = clus.clustering_methods(prefix=anode.name, + detector_volumes=dv, + pc_transforms=pcts, + coords=common_coords), + local cm_pipeline = [ + cm.deghost(), + cm.protect_overclustering(), + ], + + local mabc = g.pnode({ + local name = anode.name, + type: "MultiAlgBlobClustering", + name: "clus_per_apa-%s"%[name], + data: { + inpath: "pointtrees/%d", + outpath: "pointtrees/%d", + // grouping2file_prefix: "grouping%s-%d"%[anode.name, face], + perf: true, + bee_dir: bee_dir, // "data/0/0", // not used + bee_zip: "mabc-%s.zip"%[anode.name], + bee_detector: "sbnd", + initial_index: index, // New RSE configuration + use_config_rse: true, // Enable use of configured RSE + runNo: LrunNo, + subRunNo: LsubRunNo, + eventNo: LeventNo, + save_deadarea: true, + anodes: [wc.tn(anode)], + detector_volumes: wc.tn(dv), + pipeline: wc.tns(cm_pipeline), + } + }, nin=1, nout=1, uses=[anode, dv, pcts]+cm_pipeline), + + local sink = g.pnode({ + type: "TensorFileSink", + name: "clus_per_apa-%s"%[anode.name], + data: { + outname: "trash-%s.tar.gz"%[anode.name], + prefix: "clustering_", // json, numpy, dummy + dump_mode: true, + } + }, nin=1, nout=0), + + local end = if dump + then g.pipeline([mabc, sink]) + else g.pipeline([mabc]), + + ret :: g.intern( + innodes = [cfout_live, cfout_dead], + centernodes = per_face_pipes + [pcmerging], + outnodes = [end], + edges = [ + g.edge(cfout_live, per_face_pipes[0], 0, 0), + g.edge(cfout_dead, per_face_pipes[0], 0, 1), + g.edge(cfout_live, per_face_pipes[1], 1, 0), + g.edge(cfout_dead, per_face_pipes[1], 1, 1), + g.edge(per_face_pipes[0], pcmerging, 0, 0), + g.edge(per_face_pipes[1], pcmerging, 0, 1), + g.edge(pcmerging, end, 0, 0), + ] + ), +}.ret; + +local clus_all_apa ( + anodes, + dump = true, + ) = { + local nanodes = std.length(anodes), + local pcmerging = g.pnode({ + type: "PointTreeMerging", + name: "clus_all_apa", + data: { + multiplicity: nanodes, + inpath: "pointtrees/%d", + outpath: "pointtrees/%d", + } + }, nin=nanodes, nout=1), + + local dv = detector_volumes(anodes), + local pcts = pctransforms(dv), + + + local cm_old = clus.clustering_methods(prefix="all", + detector_volumes=dv, + pc_transforms=pcts, + coords=common_coords), + + + local cm = clus.clustering_methods(prefix="all", + detector_volumes=dv, + pc_transforms=pcts, + coords=common_corr_coords), + + local retiler = cm.retiler(anodes=anodes, + cut_time_low=3*wc.us, + cut_time_high=5*wc.us, + samplers=[ + clus.sampler(bs_rt_face(0,0), apa=0, face=0), + clus.sampler(bs_rt_face(0,1), apa=0, face=1), + clus.sampler(bs_rt_face(1,0), apa=1, face=0), + clus.sampler(bs_rt_face(1,1), apa=1, face=1), + clus.sampler(bs_rt_face(2,0), apa=2, face=0), + clus.sampler(bs_rt_face(2,1), apa=2, face=1), + clus.sampler(bs_rt_face(3,0), apa=3, face=0), + clus.sampler(bs_rt_face(3,1), apa=3, face=1), + ]), + local cm_pipeline = [ + // cm_old.examine_x_boundary(), + cm_old.switch_scope(), + + cm.extend(flag=4, length_cut=60*wc.cm, num_try=0, length_2_cut=15*wc.cm, num_dead_try= 1), + cm.regular(name="1", length_cut=60*wc.cm, flag_enable_extend=false), + cm.regular(name="2", length_cut=30*wc.cm, flag_enable_extend=true), + cm.parallel_prolong(length_cut=35*wc.cm), + cm.close(length_cut=1.2*wc.cm), + cm.extend_loop(num_try=3), + cm.separate(use_ctpc=true), + cm.neutrino(), + cm.isolated(), + cm.examine_bundles(), + cm.retile(retiler=retiler), + ], + + local mabc = g.pnode({ + type: "MultiAlgBlobClustering", + name: "clus_all_apa", + data: { + inpath: "pointtrees/%d", + outpath: "pointtrees/%d", + // grouping2file_prefix: "grouping%s-%d"%[anode.name, face], + perf: true, + bee_dir: bee_dir, // "data/0/0", // not used + bee_zip: "mabc-all-apa.zip", + bee_detector: "sbnd", + initial_index: index, // New RSE configuration + use_config_rse: true, // Enable use of configured RSE + runNo: LrunNo, + subRunNo: LsubRunNo, + eventNo: LeventNo, + save_deadarea: true, + anodes: [wc.tn(a) for a in anodes], + detector_volumes: wc.tn(dv), + bee_points_sets: [ // New configuration for multiple bee points sets + // { + // name: "img", // Name of the bee points set + // detector: "protodunehd", // Detector name + // algorithm: "img", // Algorithm identifier + // pcname: "3d", // Which scope to use + // coords: ["x", "y", "z"], // Coordinates to use + // individual: false // Whether to output as a whole or individual APA/Face + // }, + { + name: "clustering", // Name of the bee points set + detector: "protodunehd", // Detector name + algorithm: "clustering", // Algorithm identifier + pcname: "3d", // Which scope to use + coords: ["x_t0cor", "y", "z"], // Coordinates to use + individual: false // Output individual APA/Face + } + ], + pipeline: wc.tns(cm_pipeline), + }, + }, nin=1, nout=1, uses=anodes+[dv, pcts]+cm_pipeline), + + local sink = g.pnode({ + type: "TensorFileSink", + name: "clus_all_apa", + data: { + outname: "trash-all-apa.tar.gz", + prefix: "clustering_", // json, numpy, dummy + dump_mode: true, + } + }, nin=1, nout=0), + local end = if dump + then g.pipeline([mabc, sink]) + else g.pipeline([mabc]), + ret :: g.intern( + innodes = [pcmerging], + centernodes = [], + outnodes = [end], + edges = [ + g.edge(pcmerging, end, 0, 0), + ] + ), +}.ret; + + +function () { + per_face(anode, face=0, dump=true) :: clus_per_face(anode, face=face, dump=dump), + per_apa(anode, dump=true) :: clus_per_apa(anode, dump=dump), + all_apa(anodes, dump=true) :: clus_all_apa(anodes, dump=dump), +} diff --git a/clus/test/test-porting/pdhd/main.jsonnet b/clus/test/test-porting/pdhd/main.jsonnet new file mode 100644 index 000000000..00ec3d10d --- /dev/null +++ b/clus/test/test-porting/pdhd/main.jsonnet @@ -0,0 +1,87 @@ +local g = import "pgraph.jsonnet"; +local f = import "pgrapher/common/funcs.jsonnet"; +local wc = import "wirecell.jsonnet"; + +local io = import 'pgrapher/common/fileio.jsonnet'; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; + +function(input="") + + // local input = std.extVar('input'); + + local reality = 'data'; + local data_params = import 'pgrapher/experiment/pdhd/params.jsonnet'; + local simu_params = import 'pgrapher/experiment/pdhd/simparams.jsonnet'; + local params = if reality == 'data' then data_params else simu_params; + local tools_maker = import 'pgrapher/common/tools.jsonnet'; + local tools = tools_maker(params); + local anodes = tools.anodes; + local nanodes = std.length(tools.anodes); + + + local cluster_source(fname) = g.pnode({ + type: "ClusterFileSource", + name: fname, + data: { + inname: fname, + anodes: [wc.tn(a) for a in anodes], + } + }, nin=0, nout=1, uses=anodes); + local active_files = [ "%s/clusters-apa-apa%d-ms-active.tar.gz"%[input, a.data.ident] for a in anodes]; + local masked_files = [ "%s/clusters-apa-apa%d-ms-masked.tar.gz"%[input, a.data.ident] for a in anodes]; + local active_clusters = [cluster_source(f) for f in active_files]; + local masked_clusters = [cluster_source(f) for f in masked_files]; + + local clus = import 'clus.jsonnet'; + local clus_maker = clus(); + // local clus_pipes = [clus_maker.per_face(tools.anodes[n]) for n in std.range(0, std.length(tools.anodes) - 1)]; + local clus_pipes = [clus_maker.per_apa(tools.anodes[n], dump=false) for n in std.range(0, std.length(tools.anodes) - 1)]; + + local img_clus_pipe = [g.intern( + innodes = active_clusters + masked_clusters, + centernodes = [], + outnodes = [clus_pipes[n]], + edges = [ + g.edge(active_clusters[n], clus_pipes[n], 0, 0), + g.edge(masked_clusters[n], clus_pipes[n], 0, 1), + ] + ) + for n in std.range(0, std.length(tools.anodes) - 1)]; + + local clus_all_apa = clus_maker.all_apa(tools.anodes); + + local parallel_graph = + { + local begin = img_clus_pipe, + local end = clus_maker.all_apa(tools.anodes), + ret :: g.intern( + innodes=[begin], + outnodes=[end], + edges=[g.edge(begin, end, i, i) for i in std.range(0, nanodes-1)] + ), + }.ret; + + local graph = g.intern( + innodes=img_clus_pipe, + outnodes=[clus_all_apa], + edges=[g.edge(img_clus_pipe[i], clus_all_apa, 0, i) for i in std.range(0, nanodes-1)] + ); + + local app = { + type: 'Pgrapher', //Pgrapher, TbbFlow + data: { + edges: g.edges(graph), + }, + }; + + local cmdline = { + type: "wire-cell", + data: { + plugins: ["WireCellGen", "WireCellPgraph", "WireCellSio", "WireCellSigProc", "WireCellImg", "WireCellRoot", "WireCellTbb", "WireCellClus"], + apps: ["Pgrapher"] + } + }; + + [cmdline] + g.uses(graph) + [app] +// img_clus_pipe +// clus_maker.all_apa(tools.anodes) diff --git a/clus/test/test-porting/qlport/main.jsonnet b/clus/test/test-porting/qlport/main.jsonnet new file mode 100644 index 000000000..92fcb74db --- /dev/null +++ b/clus/test/test-porting/qlport/main.jsonnet @@ -0,0 +1,431 @@ +// This job inputs Uboone ROOT files (TC, etc), runs MABC, dumbs to bee. +// +// Use like: +// +// wire-cell -l stderr -L debug \ +// -A kind=live +// -A infiles=nuselEval_5384_137_6852.root \ +// clus/test/uboone-mabc.jsonnet +// +// The "kind" can be "live" or "both" (live and dead). + + +local wc = import "wirecell.jsonnet"; +local pg = import "pgraph.jsonnet"; + +//// Experimental new style +// local detector = "uboone"; +// local params = high.params(detector); +// local mid = high.api(detector, params); +// local anode = mid.anodes()[0]; +//// Old style: +local params = import "pgrapher/experiment/uboone/simparams.jsonnet"; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local tools = tools_maker(params); +local anode = tools.anodes[0]; +local anodes = tools.anodes; +local clus = import "pgrapher/common/clus.jsonnet"; + + + + +// The TDM datapath to find point trees. This needs coordination between a few +// nodes. This provides the default but the construction functions allow +// override. +local pointtree_datapath = "pointtrees/%d"; + +// This object holds a bunch of functions that construct parts of the graph. We +// use this object at teh end to build the full graph. Many functions are not +// needed. This "ub" object could be shared more globally to assist in building +// novel uboone-specific graphs. +local ub = { + anode: anode, + + bs_live : { + type: "BlobSampler", + name: "live", + data: { + time_offset: -1600 * wc.us + 6 * wc.mm/self.drift_speed, + drift_speed: 1.101 * wc.mm / wc.us, + strategy: [ + "stepped", + ], + extra: [".*wire_index", "wpid"] // + } + }, + + bs_dead : { + type: "BlobSampler", + name: "dead", + data: { + strategy: [ + "center", + ], + extra: [".*"] // want all the extra + } + }, + + local pctransforms = { + type: "PCTransformSet", + name: "", + data: { detector_volumes: wc.tn(detector_volumes) }, + uses: [detector_volumes] + }, + + local detector_volumes = + { + type: "DetectorVolumes", + name: "", + data: { + anodes: [wc.tn(a) for a in tools.anodes], + metadata: + {overall: { + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_ymin: -99.5 * wc.cm, + FV_ymax: 101.5 * wc.cm, + FV_zmin: 15 * wc.cm, + FV_zmax: 1022 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + FV_ymin_margin: 2.5 * wc.cm, + FV_ymax_margin: 2.5 * wc.cm, + FV_zmin_margin: 3 * wc.cm, + FV_zmax_margin: 3 * wc.cm, + vertical_dir: [0,1,0], + beam_dir: [0,0,1] + }} + + { + [ "a" + std.toString(a.data.ident) + "f0pA" ]: { + drift_speed: 1.101 * wc.mm / wc.us, + tick: 0.5 * wc.us, // 0.5 mm per tick + tick_drift: self.drift_speed * self.tick, + time_offset: -1600 * wc.us + 6 * wc.mm/self.drift_speed, + nticks_live_slice: 4, + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + } for a in tools.anodes + } + }, + }, + + UbooneBlobSource(fname, kind /*live or dead*/, views /* uvw, uv, vw, wu */) :: pg.pnode({ + type: 'UbooneBlobSource', + name: kind+'-'+views, + data: { + input: fname, + anode: wc.tn(anode), + kind: kind, + views: views, + } + }, nin=0, nout=1, uses=[anode]), + + UbooneClusterSource(fname, sampler=$.bs_live, datapath=pointtree_datapath, optical=true, kind="live") :: pg.pnode({ + type: 'UbooneClusterSource', + name: sampler.name, + data: { + input: fname, // file name or list + anode: wc.tn(anode), + datapath: datapath + '/' + kind, // see issue #375 + sampler: wc.tn(sampler), + kind: kind, + } + if optical then { + light: "light", flash: "flash", flashlight: "flashlight" + } else {} + }, nin=1, nout=1, uses=[sampler, anode]), + + TensorSetFanin(multiplicity=2, tensor_order=[0,1]) :: pg.pnode({ + type: 'TensorSetFanin', + name: '', + data: { + multiplicity: multiplicity, + tensor_order: tensor_order, + } + }, nin=multiplicity, nout=1), + + ClusterFlashDump(datapath=pointtree_datapath, kind='live') :: pg.pnode({ + type: 'ClusterFlashDump', + name: "", + data: { + datapath: datapath + '/' + kind, // see issue #375 + }, + }, nin=1, nout=0), + + BlobSetMerge(kind, multiplicity) :: pg.pnode({ + type: "BlobSetMerge", + name: kind, + data: { multiplicity: multiplicity, }, + }, nin=multiplicity, nout=1), + + // Make one UbooneBlobSource for view and multiplex their output + multiplex_blob_views(iname, kind, views) :: + local nviews = std.length(views); + local srcs = [ $.UbooneBlobSource(iname, kind, view), for view in views ]; + local bsm = $.BlobSetMerge(kind, nviews); + pg.intern(innodes = srcs, outnodes=[bsm], + edges = [ pg.edge(srcs[ind], bsm, 0, ind), + for ind in std.range(0, nviews-1) ]), + + BlobClustering(name) :: pg.pnode({ + type: 'BlobClustering', + name: name, + data: { + policy: "uboone", + }, + }, nin=1, nout=1), + + ClusterFileSource(fname) :: pg.pnode({ + type: 'ClusterFileSource', + name: fname, + data: { + inname: fname, + anodes: [wc.tn(anode)], + }, + }, nin=0, nout=1, uses=[anode]), + + ClusterFileSink(fname) :: pg.pnode({ + type: 'ClusterFileSink', + name: fname, + data: { + format: "numpy", + outname: fname, + }, + }, nin=1, nout=0), + + ProjectionDeghosting(name) :: pg.pnode({ + type: 'ProjectionDeghosting', + name: name, + data: {}, + }, nin=1, nout=1), + + InSliceDeghosting(name, round /*1,2,3*/) :: pg.pnode({ + type: "InSliceDeghosting", + name: name, + data: { + config_round: round, + } + }, nin=1, nout=1), + + BlobGrouping(name) :: pg.pnode({ + type: "BlobGrouping", + name: name, + data: { } + }, nin=1, nout=1), + + ChargeSolving(name, weighting /* uniform, uboone */) :: pg.pnode({ + type: "ChargeSolving", + name: name, + data: { + weighting_strategies: [weighting], + } + }, nin=1, nout=1), + + LocalGeomClustering(name) :: pg.pnode({ + type: "LocalGeomClustering", + name: name, + data: { }, + }, nin=1, nout=1), + + + GlobalGeomClustering(name, policy="uboone") :: pg.pnode({ + type: "GlobalGeomClustering", + name: name, + data: { + clustering_policy: policy, + }, + }, nin=1, nout=1), + + // PointTreeBuilding() :: pg.pnode({ + // type: "PointTreeBuilding", + // name: "", + // data: { + // samplers: { + // "3d": wc.tn($.bs_live), + // "dead": wc.tn($.bs_dead), + // }, + // multiplicity: 2, + // tags: ["live", "dead"], + // anode: wc.tn(anode), + // face: 0, + // detector_volumes: "DetectorVolumes", + // } + // }, nin=2, nout=1, uses=[$.bs_live, $.bs_dead, detector_volumes]), + + // point_tree_source(livefn, deadfn) :: + // local livesrc = $.ClusterFileSource(livefn); + // local deadsrc = $.ClusterFileSource(deadfn); + // local ptb = $.PointTreeBuilding(); + // pg.intern(innodes=[livesrc, deadsrc], outnodes=[ptb], + // edges=[ pg.edge(livesrc, ptb, 0, 0), + // pg.edge(deadsrc, ptb, 0, 1) ] + // ), + + BeeBlobSink(fname, sampler) :: pg.pnode({ + type: "BeeBlobSink", + name: fname, + data: { + geom: "uboone", + type: "wcp", + outname: fname, + samplers: wc.tn(sampler) + }, + }, nin=1, nout=0, uses=[sampler]), + + BeeBlobTap(fname) :: + local sink = $.BeeBlobSink(fname); + local fan = pg.pnode({ + type:'BlobSetFanout', + name:fname, + data: { multiplicity: 2 }, + }, nin=1, nout=2); + pg.intern(innodes=[fan], centernodes=[sink], + edges=[ pg.edge(fan, sink, 1, 0) ]), + + MultiAlgBlobClustering(beezip, datapath=pointtree_datapath, live_sampler=$.bs_live) :: + local cm = clus.clustering_methods(detector_volumes=detector_volumes, + pc_transforms=pctransforms); + local retiler = cm.retiler(anodes=anodes, + samplers=[clus.sampler(live_sampler, apa=0, face=0)], + cut_time_low=3*wc.us, cut_time_high=5*wc.us); + local cm_pipeline = [ + cm.examine_bundles(), + cm.retile(retiler=retiler), + ]; + pg.pnode({ + type: "MultiAlgBlobClustering", + name: "", + data: { + inpath: pointtree_datapath, + outpath: pointtree_datapath, + perf: true, + bee_zip: beezip, + initial_index: 0, + use_config_rse: true, // Enable use of configured RSE + runNo: 1, + subRunNo: 1, + eventNo: 1, + save_deadarea: true, + anodes: [wc.tn(a) for a in anodes], + detector_volumes: wc.tn(detector_volumes), + bee_points_sets: [ // New configuration for multiple bee points sets + { + name: "img", // Name of the bee points set + detector: "uboone", // Detector name + algorithm: "img", // Algorithm identifier + pcname: "3d", // Which scope to use + coords: ["x", "y", "z"], // Coordinates to use + individual: false // Whether to output as a whole or individual APA/Face + }, + { + name: "clustering", // Name of the bee points set + detector: "uboone", // Detector name + algorithm: "clustering", // Algorithm identifier + pcname: "3d", // Which scope to use + coords: ["x", "y", "z"], // Coordinates to use + individual: true // Output individual APA/Face + }, + { + name: "retiled", // Name of the bee points set + grouping: "shadow", + detector: "uboone", // Detector name + algorithm: "retiled", // Algorithm identifier + pcname: "3d", // Which scope to use + coords: ["x", "y", "z"], // Coordinates to use + individual: true // Output individual APA/Face + }, + { + name: "examine", // Name of the bee points set + visitor: "ClusteringExamineBundles", + detector: "uboone", // Detector name + algorithm: "examine", // Algorithm identifier + pcname: "3d", // Which scope to use + coords: ["x", "y", "z"], // Coordinates to use + individual: true // Output individual APA/Face + }, + ], + pipeline: wc.tns(cm_pipeline), + cluster_id_order: "size", // or "tree" for insertion order or nothing for no rewriting + } + }, nin=1, nout=1, uses=anodes + [detector_volumes] + cm_pipeline), + + + TensorFileSink(fname) :: pg.pnode({ + type: "TensorFileSink", + name: fname, + data: { + outname: fname, + prefix: "clustering_", + dump_mode: true, + } + }, nin=1, nout=0), + + main(graph, app='Pgrapher', extra_plugins = []) :: + local uses = pg.uses(graph); + local plugins = [ + "WireCellSio", "WireCellAux", + "WireCellGen", "WireCellSigProc", "WireCellImg", "WireCellClus", + "WireCellRoot", + "WireCellApps"] + { + 'TbbFlow': ["WireCellTbb"], + 'Pgrapher': ["WireCellPgraph"], + }[app]; + + local appcfg = { + type: app, + data: { + edges: pg.edges(graph) + }, + }; + local cmdline = { + type: "wire-cell", + data: { + plugins: plugins, + apps: [appcfg.type] + } + }; + [cmdline] + pg.uses(graph) + [appcfg], +}; + + + + +local ingraph_live(infiles, datapath=pointtree_datapath) = pg.pipeline([ + ub.multiplex_blob_views(infiles, 'live', ["uvw","uv","vw","wu"]), + ub.UbooneClusterSource(infiles, datapath=datapath, sampler=ub.bs_live, kind='live') +]); +local ingraph_dead(infiles, datapath=pointtree_datapath) = pg.pipeline([ + ub.multiplex_blob_views(infiles, 'dead', ["uv","vw","wu"]), + ub.UbooneClusterSource(infiles, datapath=datapath, sampler=ub.bs_dead, kind='dead', optical=false) +]); +local outgraph(beezip, datapath=pointtree_datapath) = pg.pipeline([ + ub.MultiAlgBlobClustering(beezip, datapath=datapath), + ub.ClusterFlashDump(datapath=datapath) +]); + + +local graphs = { + live :: function(infiles, beezip, datapath) + pg.pipeline([ingraph_live(infiles, datapath), outgraph(beezip, datapath)]), + + dead :: function(infiles, beezip, datapath) + pg.pipeline([ingraph_dead(infiles, datapath), outgraph(beezip, datapath)]), + + both :: function(infiles, beezip, datapath) + local live = ingraph_live(infiles, datapath); + local dead = ingraph_dead(infiles, datapath); + local out = outgraph(beezip, datapath); + local fanin = ub.TensorSetFanin(); + pg.intern(innodes=[live,dead], outnodes=[out], centernodes=[fanin], + edges=[ + pg.edge(live,fanin,0,0), + pg.edge(dead,fanin,0,1), + pg.edge(fanin,out,0,0)]) +}; + +local extra_plugins = ["WireCellAux", "WireCellRoot", "WireCellClus"]; + +// kind can be "live", "dead" or "both". +function(infiles="uboone.root", beezip="bee.zip", kind="live", datapath=pointtree_datapath) + ub.main(graphs[kind](infiles, beezip, datapath), "Pgrapher", extra_plugins) diff --git a/clus/test/test-porting/steiner/main.jsonnet b/clus/test/test-porting/steiner/main.jsonnet new file mode 100644 index 000000000..8915462a6 --- /dev/null +++ b/clus/test/test-porting/steiner/main.jsonnet @@ -0,0 +1,432 @@ +// This job inputs Uboone ROOT files (TC, etc), runs MABC, dumps to bee. +// +// It tests porting of Steiner-related WCP functionality. +// +// Use it from the corresponding bats script: +// +// $ bats-debug -f steiner clus/test/test-porting.bats +// $ ls tmp/steiner/ +// +// Hint: put test/scripts/ in your PATH to find bats-debug + +local wc = import "wirecell.jsonnet"; +local pg = import "pgraph.jsonnet"; + +local params = import "pgrapher/experiment/uboone/simparams.jsonnet"; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local tools = tools_maker(params); +local anode = tools.anodes[0]; +local anodes = tools.anodes; +local clus = import "pgrapher/common/clus.jsonnet"; + + + +// The TDM datapath to find point trees. This needs coordination between a few +// nodes. This provides the default but the construction functions allow +// override. +local pointtree_datapath = "pointtrees/%d"; + +// This object holds a bunch of functions that construct parts of the graph. We +// use this object at teh end to build the full graph. Many functions are not +// needed. This "ub" object could be shared more globally to assist in building +// novel uboone-specific graphs. +local ub = { + anode: anode, + + bs_live : { + type: "BlobSampler", + name: "live", + data: { + time_offset: -1600 * wc.us + 6 * wc.mm/self.drift_speed, + drift_speed: 1.101 * wc.mm / wc.us, + strategy: [ + "stepped", + ], + extra: [".*wire_index", ".*charge.*", "wpid"] // + } + }, + + // Special for improvedCluster retiling + bs_live_no_dead_mix : { + type: "BlobSampler", + name: "live_no_dead_mix", + data: { + time_offset: -1600 * wc.us + 6 * wc.mm/self.drift_speed, + drift_speed: 1.101 * wc.mm / wc.us, + strategy: { + "name": "charge_stepped", + "disable_mix_dead_cell": false, // This is the key change + }, + extra: [".*wire_index", ".*charge.*", "wpid"] + } + }, + + bs_dead : { + type: "BlobSampler", + name: "dead", + data: { + strategy: [ + "center", + ], + extra: [".*"] // want all the extra + } + }, + + local pctransforms = { + type: "PCTransformSet", + name: "", + data: { detector_volumes: wc.tn(detector_volumes) }, + uses: [detector_volumes] + }, + + local detector_volumes = + { + type: "DetectorVolumes", + name: "", + data: { + anodes: [wc.tn(a) for a in tools.anodes], + metadata: + {overall: { + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_ymin: -99.5 * wc.cm, + FV_ymax: 101.5 * wc.cm, + FV_zmin: 15 * wc.cm, + FV_zmax: 1022 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + FV_ymin_margin: 2.5 * wc.cm, + FV_ymax_margin: 2.5 * wc.cm, + FV_zmin_margin: 3 * wc.cm, + FV_zmax_margin: 3 * wc.cm, + vertical_dir: [0,1,0], + beam_dir: [0,0,1] + }} + + { + [ "a" + std.toString(a.data.ident) + "f0pA" ]: { + drift_speed: 1.101 * wc.mm / wc.us, + tick: 0.5 * wc.us, // 0.5 mm per tick + tick_drift: self.drift_speed * self.tick, + time_offset: -1600 * wc.us + 6 * wc.mm/self.drift_speed, + nticks_live_slice: 4, + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + } for a in tools.anodes + } + }, + }, + + UbooneBlobSource(fname, kind /*live or dead*/, views /* uvw, uv, vw, wu */) :: pg.pnode({ + type: 'UbooneBlobSource', + name: kind+'-'+views, + data: { + input: fname, + anode: wc.tn(anode), + kind: kind, + views: views, + } + }, nin=0, nout=1, uses=[anode]), + + UbooneClusterSource(fname, sampler=$.bs_live, datapath=pointtree_datapath, optical=true, kind="live") :: pg.pnode({ + type: 'UbooneClusterSource', + name: sampler.name, + data: { + input: fname, // file name or list + anode: wc.tn(anode), + datapath: datapath + '/' + kind, // see issue #375 + sampler: wc.tn(sampler), + kind: kind, + } + if optical then { + light: "light", flash: "flash", flashlight: "flashlight" + } else {} + }, nin=1, nout=1, uses=[sampler, anode]), + + TensorSetFanin(multiplicity=2, tensor_order=[0,1]) :: pg.pnode({ + type: 'TensorSetFanin', + name: '', + data: { + multiplicity: multiplicity, + tensor_order: tensor_order, + } + }, nin=multiplicity, nout=1), + + ClusterFlashDump(datapath=pointtree_datapath, kind='live') :: pg.pnode({ + type: 'ClusterFlashDump', + name: "", + data: { + datapath: datapath + '/' + kind, // see issue #375 + }, + }, nin=1, nout=0), + + BlobSetMerge(kind, multiplicity) :: pg.pnode({ + type: "BlobSetMerge", + name: kind, + data: { multiplicity: multiplicity, }, + }, nin=multiplicity, nout=1), + + // Make one UbooneBlobSource for view and multiplex their output + multiplex_blob_views(iname, kind, views) :: + local nviews = std.length(views); + local srcs = [ $.UbooneBlobSource(iname, kind, view), for view in views ]; + local bsm = $.BlobSetMerge(kind, nviews); + pg.intern(innodes = srcs, outnodes=[bsm], + edges = [ pg.edge(srcs[ind], bsm, 0, ind), + for ind in std.range(0, nviews-1) ]), + + BlobClustering(name) :: pg.pnode({ + type: 'BlobClustering', + name: name, + data: { + policy: "uboone", + }, + }, nin=1, nout=1), + + ClusterFileSource(fname) :: pg.pnode({ + type: 'ClusterFileSource', + name: fname, + data: { + inname: fname, + anodes: [wc.tn(anode)], + }, + }, nin=0, nout=1, uses=[anode]), + + ClusterFileSink(fname) :: pg.pnode({ + type: 'ClusterFileSink', + name: fname, + data: { + format: "numpy", + outname: fname, + }, + }, nin=1, nout=0), + + ProjectionDeghosting(name) :: pg.pnode({ + type: 'ProjectionDeghosting', + name: name, + data: {}, + }, nin=1, nout=1), + + InSliceDeghosting(name, round /*1,2,3*/) :: pg.pnode({ + type: "InSliceDeghosting", + name: name, + data: { + config_round: round, + } + }, nin=1, nout=1), + + BlobGrouping(name) :: pg.pnode({ + type: "BlobGrouping", + name: name, + data: { } + }, nin=1, nout=1), + + ChargeSolving(name, weighting /* uniform, uboone */) :: pg.pnode({ + type: "ChargeSolving", + name: name, + data: { + weighting_strategies: [weighting], + } + }, nin=1, nout=1), + + LocalGeomClustering(name) :: pg.pnode({ + type: "LocalGeomClustering", + name: name, + data: { }, + }, nin=1, nout=1), + + + GlobalGeomClustering(name, policy="uboone") :: pg.pnode({ + type: "GlobalGeomClustering", + name: name, + data: { + clustering_policy: policy, + }, + }, nin=1, nout=1), + + // PointTreeBuilding() :: pg.pnode({ + // type: "PointTreeBuilding", + // name: "", + // data: { + // samplers: { + // "3d": wc.tn($.bs_live), + // "dead": wc.tn($.bs_dead), + // }, + // multiplicity: 2, + // tags: ["live", "dead"], + // anode: wc.tn(anode), + // face: 0, + // detector_volumes: "DetectorVolumes", + // } + // }, nin=2, nout=1, uses=[$.bs_live, $.bs_dead, detector_volumes]), + + // point_tree_source(livefn, deadfn) :: + // local livesrc = $.ClusterFileSource(livefn); + // local deadsrc = $.ClusterFileSource(deadfn); + // local ptb = $.PointTreeBuilding(); + // pg.intern(innodes=[livesrc, deadsrc], outnodes=[ptb], + // edges=[ pg.edge(livesrc, ptb, 0, 0), + // pg.edge(deadsrc, ptb, 0, 1) ] + // ), + + BeeBlobSink(fname, sampler) :: pg.pnode({ + type: "BeeBlobSink", + name: fname, + data: { + geom: "uboone", + type: "wcp", + outname: fname, + samplers: wc.tn(sampler) + }, + }, nin=1, nout=0, uses=[sampler]), + + BeeBlobTap(fname) :: + local sink = $.BeeBlobSink(fname); + local fan = pg.pnode({ + type:'BlobSetFanout', + name:fname, + data: { multiplicity: 2 }, + }, nin=1, nout=2); + pg.intern(innodes=[fan], centernodes=[sink], + edges=[ pg.edge(fan, sink, 1, 0) ]), + + MultiAlgBlobClustering(beezip, datapath=pointtree_datapath, live_sampler=$.bs_live) :: + local cm = clus.clustering_methods(detector_volumes=detector_volumes, + pc_transforms=pctransforms); + local retiler = cm.retiler(anodes=anodes, + samplers=[clus.sampler(live_sampler, apa=0, face=0)], + cut_time_low=3*wc.us, cut_time_high=5*wc.us); + + local improve_cluster_2 = cm.improve_cluster_2(anodes=anodes, + samplers=[clus.sampler($.bs_live_no_dead_mix, apa=0, face=0)], + verbose=true); + + local cm_pipeline = [ + cm.tagger_flag_transfer("tagger"), + cm.clustering_recovering_bundle("recover_bundle"), + cm.switch_scope(), + // cm.examine_bundles(), + // cm.retile(retiler=retiler), + cm.steiner(retiler=improve_cluster_2), + ]; + pg.pnode({ + type: "MultiAlgBlobClustering", + name: "", + data: { + inpath: pointtree_datapath, + outpath: pointtree_datapath, + perf: true, + bee_zip: beezip, + initial_index: 0, + use_config_rse: true, // Enable use of configured RSE + runNo: 1, + subRunNo: 1, + eventNo: 1, + save_deadarea: true, + anodes: [wc.tn(a) for a in anodes], + detector_volumes: wc.tn(detector_volumes), + bee_points_sets: [ // New configuration for multiple bee points sets + { + name: "regular", // Name of the bee points set + visitor: "CreateSteinerGraph", + detector: "uboone", // Detector name + algorithm: "regular", // Algorithm identifier + pcname: "3d", // Which scope to use + coords: ["x_t0cor", "y", "z"], // Coordinates to use + individual: false, // Output individual APA/Face + filter: 1 // 1 apply scope filter, 0 ignore scope filter, -1 apply inverse scope filter + }, + { + name: "steiner", // Name of the bee points set + visitor: "CreateSteinerGraph", + detector: "uboone", // Detector name + algorithm: "steiner", // Algorithm identifier + pcname: "steiner_pc", // Which scope to use + coords: ["x_t0cor", "y", "z"], // Coordinates to use + individual: false, // Output individual APA/Face + }, + ], + pipeline: wc.tns(cm_pipeline), + cluster_id_order: "size", // or "tree" for insertion order or nothing for no rewriting + } + }, nin=1, nout=1, uses=anodes + [detector_volumes] + cm_pipeline), + + + TensorFileSink(fname) :: pg.pnode({ + type: "TensorFileSink", + name: fname, + data: { + outname: fname, + prefix: "clustering_", + dump_mode: true, + } + }, nin=1, nout=0), + + main(graph, app='Pgrapher', extra_plugins = []) :: + local uses = pg.uses(graph); + local plugins = [ + "WireCellSio", "WireCellAux", + "WireCellGen", "WireCellSigProc", "WireCellImg", "WireCellClus", + "WireCellRoot", + "WireCellApps"] + { + 'TbbFlow': ["WireCellTbb"], + 'Pgrapher': ["WireCellPgraph"], + }[app]; + + local appcfg = { + type: app, + data: { + edges: pg.edges(graph) + }, + }; + local cmdline = { + type: "wire-cell", + data: { + plugins: plugins, + apps: [appcfg.type] + } + }; + [cmdline] + pg.uses(graph) + [appcfg], +}; + + + + +local ingraph_live(infiles, datapath=pointtree_datapath) = pg.pipeline([ + ub.multiplex_blob_views(infiles, 'live', ["uvw","uv","vw","wu"]), + ub.UbooneClusterSource(infiles, datapath=datapath, sampler=ub.bs_live, kind='live') +]); +local ingraph_dead(infiles, datapath=pointtree_datapath) = pg.pipeline([ + ub.multiplex_blob_views(infiles, 'dead', ["uv","vw","wu"]), + ub.UbooneClusterSource(infiles, datapath=datapath, sampler=ub.bs_dead, kind='dead', optical=false) +]); +local outgraph(beezip, datapath=pointtree_datapath) = pg.pipeline([ + ub.MultiAlgBlobClustering(beezip, datapath=datapath), + ub.ClusterFlashDump(datapath=datapath) +]); + + +local graphs = { + live :: function(infiles, beezip, datapath) + pg.pipeline([ingraph_live(infiles, datapath), outgraph(beezip, datapath)]), + + dead :: function(infiles, beezip, datapath) + pg.pipeline([ingraph_dead(infiles, datapath), outgraph(beezip, datapath)]), + + both :: function(infiles, beezip, datapath) + local live = ingraph_live(infiles, datapath); + local dead = ingraph_dead(infiles, datapath); + local out = outgraph(beezip, datapath); + local fanin = ub.TensorSetFanin(); + pg.intern(innodes=[live,dead], outnodes=[out], centernodes=[fanin], + edges=[ + pg.edge(live,fanin,0,0), + pg.edge(dead,fanin,0,1), + pg.edge(fanin,out,0,0)]) +}; + +local extra_plugins = ["WireCellAux", "WireCellRoot", "WireCellClus"]; + +// kind can be "live", "dead" or "both". +function(infiles="uboone.root", beezip="bee.zip", kind="live", datapath=pointtree_datapath) + ub.main(graphs[kind](infiles, beezip, datapath), "Pgrapher", extra_plugins) diff --git a/clus/test/test-porting/stm/main.jsonnet b/clus/test/test-porting/stm/main.jsonnet new file mode 100644 index 000000000..61897a3ce --- /dev/null +++ b/clus/test/test-porting/stm/main.jsonnet @@ -0,0 +1,443 @@ +// This job inputs Uboone ROOT files (TC, etc), runs MABC, dumps to bee. +// +// It tests porting of WCP up to and including "STM tagger" functionality. +// +// This is yet another extension to a growing, incremental chain: +// stm > steiner > qlport +// +// Use it from the corresponding bats script: +// +// $ bats-debug -f stm clus/test/test-porting.bats +// $ ls tmp/stm/ +// +// Hint: put test/scripts/ in your PATH to find bats-debug +// + +local wc = import "wirecell.jsonnet"; +local pg = import "pgraph.jsonnet"; + +local params = import "pgrapher/experiment/uboone/simparams.jsonnet"; +local tools_maker = import 'pgrapher/common/tools.jsonnet'; +local tools = tools_maker(params); +local anode = tools.anodes[0]; +local anodes = tools.anodes; +local clus = import "pgrapher/common/clus.jsonnet"; + + + +// The TDM datapath to find point trees. This needs coordination between a few +// nodes. This provides the default but the construction functions allow +// override. +local pointtree_datapath = "pointtrees/%d"; + +// This object holds a bunch of functions that construct parts of the graph. We +// use this object at teh end to build the full graph. Many functions are not +// needed. This "ub" object could be shared more globally to assist in building +// novel uboone-specific graphs. +local ub = { + anode: anode, + + bs_live : { + type: "BlobSampler", + name: "live", + data: { + time_offset: -1600 * wc.us + 6 * wc.mm/self.drift_speed, + drift_speed: 1.101 * wc.mm / wc.us, + strategy: [ + "stepped", + ], + extra: [".*wire_index", ".*charge.*", "wpid"] // + } + }, + + // Special for improvedCluster retiling + bs_live_no_dead_mix : { + type: "BlobSampler", + name: "live_no_dead_mix", + data: { + time_offset: -1600 * wc.us + 6 * wc.mm/self.drift_speed, + drift_speed: 1.101 * wc.mm / wc.us, + strategy: { + "name": "charge_stepped", + "disable_mix_dead_cell": false, // This is the key change + }, + extra: [".*wire_index", ".*charge.*", "wpid"] + } + }, + + bs_dead : { + type: "BlobSampler", + name: "dead", + data: { + strategy: [ + "center", + ], + extra: [".*"] // want all the extra + } + }, + + local pctransforms = { + type: "PCTransformSet", + name: "", + data: { detector_volumes: wc.tn(detector_volumes) }, + uses: [detector_volumes] + }, + + local detector_volumes = + { + type: "DetectorVolumes", + name: "", + data: { + anodes: [wc.tn(a) for a in tools.anodes], + metadata: + {overall: { + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_ymin: -99.5 * wc.cm, + FV_ymax: 101.5 * wc.cm, + FV_zmin: 15 * wc.cm, + FV_zmax: 1022 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + FV_ymin_margin: 2.5 * wc.cm, + FV_ymax_margin: 2.5 * wc.cm, + FV_zmin_margin: 3 * wc.cm, + FV_zmax_margin: 3 * wc.cm, + vertical_dir: [0,1,0], + beam_dir: [0,0,1] + }} + + { + [ "a" + std.toString(a.data.ident) + "f0pA" ]: { + drift_speed: 1.101 * wc.mm / wc.us, + tick: 0.5 * wc.us, // 0.5 mm per tick + tick_drift: self.drift_speed * self.tick, + time_offset: -1600 * wc.us + 6 * wc.mm/self.drift_speed, + nticks_live_slice: 4, + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + } for a in tools.anodes + } + }, + }, + + // DetectorVolumes component happens to ALSO implement IFiducial. + // To start with, we will use it to provide that interface. + local fiducial = detector_volumes, + + UbooneBlobSource(fname, kind /*live or dead*/, views /* uvw, uv, vw, wu */) :: pg.pnode({ + type: 'UbooneBlobSource', + name: kind+'-'+views, + data: { + input: fname, + anode: wc.tn(anode), + kind: kind, + views: views, + } + }, nin=0, nout=1, uses=[anode]), + + UbooneClusterSource(fname, sampler=$.bs_live, datapath=pointtree_datapath, optical=true, kind="live") :: pg.pnode({ + type: 'UbooneClusterSource', + name: sampler.name, + data: { + input: fname, // file name or list + anode: wc.tn(anode), + datapath: datapath + '/' + kind, // see issue #375 + sampler: wc.tn(sampler), + kind: kind, + } + if optical then { + light: "light", flash: "flash", flashlight: "flashlight" + } else {} + }, nin=1, nout=1, uses=[sampler, anode]), + + TensorSetFanin(multiplicity=2, tensor_order=[0,1]) :: pg.pnode({ + type: 'TensorSetFanin', + name: '', + data: { + multiplicity: multiplicity, + tensor_order: tensor_order, + } + }, nin=multiplicity, nout=1), + + ClusterFlashDump(datapath=pointtree_datapath, kind='live') :: pg.pnode({ + type: 'ClusterFlashDump', + name: "", + data: { + datapath: datapath + '/' + kind, // see issue #375 + }, + }, nin=1, nout=0), + + BlobSetMerge(kind, multiplicity) :: pg.pnode({ + type: "BlobSetMerge", + name: kind, + data: { multiplicity: multiplicity, }, + }, nin=multiplicity, nout=1), + + // Make one UbooneBlobSource for view and multiplex their output + multiplex_blob_views(iname, kind, views) :: + local nviews = std.length(views); + local srcs = [ $.UbooneBlobSource(iname, kind, view), for view in views ]; + local bsm = $.BlobSetMerge(kind, nviews); + pg.intern(innodes = srcs, outnodes=[bsm], + edges = [ pg.edge(srcs[ind], bsm, 0, ind), + for ind in std.range(0, nviews-1) ]), + + BlobClustering(name) :: pg.pnode({ + type: 'BlobClustering', + name: name, + data: { + policy: "uboone", + }, + }, nin=1, nout=1), + + ClusterFileSource(fname) :: pg.pnode({ + type: 'ClusterFileSource', + name: fname, + data: { + inname: fname, + anodes: [wc.tn(anode)], + }, + }, nin=0, nout=1, uses=[anode]), + + ClusterFileSink(fname) :: pg.pnode({ + type: 'ClusterFileSink', + name: fname, + data: { + format: "numpy", + outname: fname, + }, + }, nin=1, nout=0), + + ProjectionDeghosting(name) :: pg.pnode({ + type: 'ProjectionDeghosting', + name: name, + data: {}, + }, nin=1, nout=1), + + InSliceDeghosting(name, round /*1,2,3*/) :: pg.pnode({ + type: "InSliceDeghosting", + name: name, + data: { + config_round: round, + } + }, nin=1, nout=1), + + BlobGrouping(name) :: pg.pnode({ + type: "BlobGrouping", + name: name, + data: { } + }, nin=1, nout=1), + + ChargeSolving(name, weighting /* uniform, uboone */) :: pg.pnode({ + type: "ChargeSolving", + name: name, + data: { + weighting_strategies: [weighting], + } + }, nin=1, nout=1), + + LocalGeomClustering(name) :: pg.pnode({ + type: "LocalGeomClustering", + name: name, + data: { }, + }, nin=1, nout=1), + + + GlobalGeomClustering(name, policy="uboone") :: pg.pnode({ + type: "GlobalGeomClustering", + name: name, + data: { + clustering_policy: policy, + }, + }, nin=1, nout=1), + + // PointTreeBuilding() :: pg.pnode({ + // type: "PointTreeBuilding", + // name: "", + // data: { + // samplers: { + // "3d": wc.tn($.bs_live), + // "dead": wc.tn($.bs_dead), + // }, + // multiplicity: 2, + // tags: ["live", "dead"], + // anode: wc.tn(anode), + // face: 0, + // detector_volumes: "DetectorVolumes", + // } + // }, nin=2, nout=1, uses=[$.bs_live, $.bs_dead, detector_volumes]), + + // point_tree_source(livefn, deadfn) :: + // local livesrc = $.ClusterFileSource(livefn); + // local deadsrc = $.ClusterFileSource(deadfn); + // local ptb = $.PointTreeBuilding(); + // pg.intern(innodes=[livesrc, deadsrc], outnodes=[ptb], + // edges=[ pg.edge(livesrc, ptb, 0, 0), + // pg.edge(deadsrc, ptb, 0, 1) ] + // ), + + BeeBlobSink(fname, sampler) :: pg.pnode({ + type: "BeeBlobSink", + name: fname, + data: { + geom: "uboone", + type: "wcp", + outname: fname, + samplers: wc.tn(sampler) + }, + }, nin=1, nout=0, uses=[sampler]), + + BeeBlobTap(fname) :: + local sink = $.BeeBlobSink(fname); + local fan = pg.pnode({ + type:'BlobSetFanout', + name:fname, + data: { multiplicity: 2 }, + }, nin=1, nout=2); + pg.intern(innodes=[fan], centernodes=[sink], + edges=[ pg.edge(fan, sink, 1, 0) ]), + + MultiAlgBlobClustering(beezip, datapath=pointtree_datapath, live_sampler=$.bs_live) :: + local cm = clus.clustering_methods(detector_volumes=detector_volumes, + pc_transforms=pctransforms, + fiducial=fiducial); + local retiler = cm.retiler(anodes=anodes, + samplers=[clus.sampler(live_sampler, apa=0, face=0)], + cut_time_low=3*wc.us, cut_time_high=5*wc.us); + + local improve_cluster_2 = cm.improve_cluster_2(anodes=anodes, + samplers=[clus.sampler($.bs_live_no_dead_mix, apa=0, face=0)], + verbose=true); + + local cm_pipeline = [ + cm.tagger_flag_transfer("tagger"), + cm.clustering_recovering_bundle("recover_bundle"), + cm.switch_scope(), + // cm.examine_bundles(), + // cm.retile(retiler=retiler), + cm.steiner(retiler=improve_cluster_2), + cm.fiducialutils(), + // ... in future add stmtagger() or etc here + ]; + pg.pnode({ + type: "MultiAlgBlobClustering", + name: "", + data: { + inpath: pointtree_datapath, + outpath: pointtree_datapath, + perf: true, + bee_zip: beezip, + initial_index: 0, + use_config_rse: true, // Enable use of configured RSE + runNo: 1, + subRunNo: 1, + eventNo: 1, + save_deadarea: true, + anodes: [wc.tn(a) for a in anodes], + detector_volumes: wc.tn(detector_volumes), + bee_points_sets: [ // New configuration for multiple bee points sets + { + name: "regular", // Name of the bee points set + visitor: "CreateSteinerGraph", + detector: "uboone", // Detector name + algorithm: "regular", // Algorithm identifier + pcname: "3d", // Which scope to use + coords: ["x_t0cor", "y", "z"], // Coordinates to use + individual: false, // Output individual APA/Face + filter: 1 // 1 apply scope filter, 0 ignore scope filter, -1 apply inverse scope filter + }, + { + name: "steiner", // Name of the bee points set + visitor: "CreateSteinerGraph", + detector: "uboone", // Detector name + algorithm: "steiner", // Algorithm identifier + pcname: "steiner_pc", // Which scope to use + coords: ["x_t0cor", "y", "z"], // Coordinates to use + individual: false, // Output individual APA/Face + }, + ], + pipeline: wc.tns(cm_pipeline), + //cluster_id_order: "size", // or "tree" for insertion order or nothing for no rewriting + } + }, nin=1, nout=1, uses=anodes + [detector_volumes] + cm_pipeline), + + + TensorFileSink(fname) :: pg.pnode({ + type: "TensorFileSink", + name: fname, + data: { + outname: fname, + prefix: "clustering_", + dump_mode: true, + } + }, nin=1, nout=0), + + main(graph, app='Pgrapher', extra_plugins = []) :: + local uses = pg.uses(graph); + local plugins = [ + "WireCellSio", "WireCellAux", + "WireCellGen", "WireCellSigProc", "WireCellImg", "WireCellClus", + "WireCellRoot", + "WireCellApps"] + { + 'TbbFlow': ["WireCellTbb"], + 'Pgrapher': ["WireCellPgraph"], + }[app]; + + local appcfg = { + type: app, + data: { + edges: pg.edges(graph) + }, + }; + local cmdline = { + type: "wire-cell", + data: { + plugins: plugins, + apps: [appcfg.type] + } + }; + [cmdline] + pg.uses(graph) + [appcfg], +}; + + + + +local ingraph_live(infiles, datapath=pointtree_datapath) = pg.pipeline([ + ub.multiplex_blob_views(infiles, 'live', ["uvw","uv","vw","wu"]), + ub.UbooneClusterSource(infiles, datapath=datapath, sampler=ub.bs_live, kind='live') +]); +local ingraph_dead(infiles, datapath=pointtree_datapath) = pg.pipeline([ + ub.multiplex_blob_views(infiles, 'dead', ["uv","vw","wu"]), + ub.UbooneClusterSource(infiles, datapath=datapath, sampler=ub.bs_dead, kind='dead', optical=false) +]); +local outgraph(beezip, datapath=pointtree_datapath) = pg.pipeline([ + ub.MultiAlgBlobClustering(beezip, datapath=datapath), + ub.ClusterFlashDump(datapath=datapath) +]); + + +local graphs = { + live :: function(infiles, beezip, datapath) + pg.pipeline([ingraph_live(infiles, datapath), outgraph(beezip, datapath)]), + + dead :: function(infiles, beezip, datapath) + pg.pipeline([ingraph_dead(infiles, datapath), outgraph(beezip, datapath)]), + + both :: function(infiles, beezip, datapath) + local live = ingraph_live(infiles, datapath); + local dead = ingraph_dead(infiles, datapath); + local out = outgraph(beezip, datapath); + local fanin = ub.TensorSetFanin(); + pg.intern(innodes=[live,dead], outnodes=[out], centernodes=[fanin], + edges=[ + pg.edge(live,fanin,0,0), + pg.edge(dead,fanin,0,1), + pg.edge(fanin,out,0,0)]) +}; + +local extra_plugins = ["WireCellAux", "WireCellRoot", "WireCellClus"]; + +// kind can be "live", "dead" or "both". +function(infiles="uboone.root", beezip="bee.zip", kind="live", datapath=pointtree_datapath) + ub.main(graphs[kind](infiles, beezip, datapath), "Pgrapher", extra_plugins) diff --git a/clus/test/test_graph_edge_filter.cxx b/clus/test/test_graph_edge_filter.cxx new file mode 100644 index 000000000..52bb132c9 --- /dev/null +++ b/clus/test/test_graph_edge_filter.cxx @@ -0,0 +1,121 @@ +#include "WireCellUtil/Logging.h" + + +// Boost Graph Library includes +#include +#include // For print_graph +#include // For filtered_graph + +#include +#include +#include // For std::function + + +using spdlog::debug; + +// --- Your Boost Graph Related Types --- +using edge_weight_type = double; +using Graph = boost::adjacency_list< + boost::vecS, // vertices + boost::vecS, // edges + boost::undirectedS, // edge direction (none) + boost::property, + boost::property + >; +using graph_type = Graph; +using vertex_type = boost::graph_traits::vertex_descriptor; +using edge_type = boost::graph_traits::edge_descriptor; + +// A set of unique vertices or edges; +using vertex_set = std::set; +using edge_set = std::set; + +// Filtered graphs and their predicates. +using vertex_predicate = std::function; +using edge_predicate = std::function; +using filtered_graph_type = boost::filtered_graph; + +// --- Your reduce_edges Function --- +filtered_graph_type reduce_edges(const graph_type& graph, const edge_set& edges, bool accept) +{ + // The 'filter' lambda captures 'edges' and 'accept' by reference. + // This lambda will be called for each edge when iterating the filtered graph. + auto filter = [&](edge_type edge) { + // This debug statement will only print if the predicate is actually invoked. + debug(" [Predicate Call] Checking edge: {} -- {}", + boost::source(edge, graph), boost::target(edge, graph)); + return accept == (edges.count(edge) > 0); + }; + + edge_predicate epred = filter; + vertex_predicate vpred = boost::keep_all(); // Keep all vertices regardless of edges + + // Construct the filtered graph view. + return filtered_graph_type(graph, epred, vpred); +} + +int main() +{ + // Create a sample graph + graph_type g(5); // 5 vertices (0, 1, 2, 3, 4) + + // Add some edges + edge_type e01, e02, e12, e13, e24; + bool b; + + boost::tie(e01, b) = add_edge(0, 1, 1.0, g); + boost::tie(e02, b) = add_edge(0, 2, 2.0, g); + boost::tie(e12, b) = add_edge(1, 2, 3.0, g); + boost::tie(e13, b) = add_edge(1, 3, 4.0, g); + boost::tie(e24, b) = add_edge(2, 4, 5.0, g); + + std::cout << "Original Graph Edges:" << std::endl; + // Iterate and print edges of the original graph to confirm setup + auto edge_it_orig = boost::edges(g); + for (; edge_it_orig.first != edge_it_orig.second; ++edge_it_orig.first) { + std::cout << " " << boost::source(*edge_it_orig.first, g) << " -- " + << boost::target(*edge_it_orig.first, g) << std::endl; + } + std::cout << "Number of edges in original graph: " << boost::num_edges(g) << std::endl << std::endl; + + + // --- Test Case 1: Accept only a specific set of edges --- + edge_set edges_to_retain; + edges_to_retain.insert(e01); + edges_to_retain.insert(e13); + + std::cout << "Reducing edges (accepting e01, e13):" << std::endl; + filtered_graph_type fg_accept = reduce_edges(g, edges_to_retain, true); + + // ************* IMPORTANT: THIS IS WHERE THE PREDICATE IS INVOKED ************* + // Iterate over the edges of the filtered graph to trigger predicate calls + std::cout << "Edges in filtered graph (accepting):" << std::endl; + auto edge_it_filtered_accept = boost::edges(fg_accept); + for (; edge_it_filtered_accept.first != edge_it_filtered_accept.second; ++edge_it_filtered_accept.first) { + std::cout << " " << boost::source(*edge_it_filtered_accept.first, fg_accept) << " -- " + << boost::target(*edge_it_filtered_accept.first, fg_accept) << std::endl; + } + std::cout << "Number of edges in filtered graph (accepting): " << boost::num_edges(fg_accept) << std::endl << std::endl; + + + // --- Test Case 2: Exclude a specific set of edges --- + edge_set edges_to_exclude; + edges_to_exclude.insert(e01); // Exclude 0-1 + edges_to_exclude.insert(e24); // Exclude 2-4 + + std::cout << "Reducing edges (excluding e01, e24):" << std::endl; + filtered_graph_type fg_exclude = reduce_edges(g, edges_to_exclude, false); + + // ************* IMPORTANT: THIS IS WHERE THE PREDICATE IS INVOKED ************* + // Iterate over the edges of the filtered graph to trigger predicate calls + std::cout << "Edges in filtered graph (excluding):" << std::endl; + auto edge_it_filtered_exclude = boost::edges(fg_exclude); + for (; edge_it_filtered_exclude.first != edge_it_filtered_exclude.second; ++edge_it_filtered_exclude.first) { + std::cout << " " << boost::source(*edge_it_filtered_exclude.first, fg_exclude) << " -- " + << boost::target(*edge_it_filtered_exclude.first, fg_exclude) << std::endl; + } + std::cout << "Number of edges in filtered graph (excluding): " << boost::num_edges(fg_exclude) << std::endl << std::endl; + + return 0; +} + diff --git a/clus/test/test_wpid.cxx b/clus/test/test_wpid.cxx new file mode 100644 index 000000000..e09cc2a32 --- /dev/null +++ b/clus/test/test_wpid.cxx @@ -0,0 +1,46 @@ + +#include "WireCellIface/WirePlaneId.h" + +#include +#include +#include + +using namespace WireCell; + +int main () +{ + WireCell::WirePlaneId a5f0pA(kAllLayers, 0, 5); + WireCell::WirePlaneId a5f1pA(kAllLayers, 1, 5); + std::cout << "a5f0pA: " << a5f0pA.name() << " ident " << a5f0pA.ident() << std::endl; + std::cout << "a5f1pA: " << a5f1pA.name() << " ident " << a5f1pA.ident() << std::endl; + std::cout << " a5f0pA < a5f1pA? " << (a5f0pA < a5f1pA) << std::endl; + std::cout << " a5f1pA < a5f0pA? " << (a5f1pA < a5f0pA) << std::endl; + std::cout << " a5f0pA == a5f1pA? " << (a5f0pA == a5f1pA) << std::endl; + std::cout << " a5f0pA != a5f1pA? " << (a5f0pA != a5f1pA) << std::endl; + + { + std::set wpid_set; + wpid_set.insert(a5f0pA); + wpid_set.insert(a5f1pA); + for (auto wpid : wpid_set) { + std::cout << "wpid_set [0, 1]: " << wpid.name() << std::endl; + } + } + { + std::set wpid_set; + wpid_set.insert(a5f1pA); + wpid_set.insert(a5f0pA); + for (auto wpid : wpid_set) { + std::cout << "wpid_set [1, 0]: " << wpid.name() << std::endl; + } + } + { + std::set int_set; + int_set.insert(a5f1pA.ident()); + int_set.insert(a5f0pA.ident()); + for (auto wpid_ident : int_set) { + std::cout << "int_set: " << WirePlaneId(wpid_ident).name() << std::endl; + } + } + return 0; +} \ No newline at end of file diff --git a/clus/test/uboone-mabc.jsonnet b/clus/test/uboone-mabc.jsonnet index 06a714fa4..fa74c04a4 100644 --- a/clus/test/uboone-mabc.jsonnet +++ b/clus/test/uboone-mabc.jsonnet @@ -7,7 +7,7 @@ // -A infiles=nuselEval_5384_137_6852.root \ // clus/test/uboone-mabc.jsonnet // -// The "kind" can be "live" or "both" (live and dead - the default). +// The "kind" can be "live" or "both" (live and dead). local wc = import "wirecell.jsonnet"; @@ -23,6 +23,9 @@ local params = import "pgrapher/experiment/uboone/simparams.jsonnet"; local tools_maker = import 'pgrapher/common/tools.jsonnet'; local tools = tools_maker(params); local anode = tools.anodes[0]; +local anodes = tools.anodes; +local clus = import "pgrapher/common/clus.jsonnet"; + @@ -44,10 +47,26 @@ local ub = { data: { time_offset: -1600 * wc.us + 6 * wc.mm/self.drift_speed, drift_speed: 1.101 * wc.mm / wc.us, - strategy: [ - "stepped", - ], - extra: [".*wire_index"] // + strategy: { + "name": "charge_stepped", + "disable_mix_dead_cell": true, + }, + extra: [".*wire_index", ".*charge.*", "wpid"] // + } + }, + + // Special for improvedCluster retiling + bs_live_no_dead_mix : { + type: "BlobSampler", + name: "live_no_dead_mix", + data: { + time_offset: -1600 * wc.us + 6 * wc.mm/self.drift_speed, + drift_speed: 1.101 * wc.mm / wc.us, + strategy: { + "name": "charge_stepped", + "disable_mix_dead_cell": false, // This is the key change + }, + extra: [".*wire_index", ".*charge.*", "wpid"] } }, @@ -61,6 +80,52 @@ local ub = { extra: [".*"] // want all the extra } }, + + local pctransforms = { + type: "PCTransformSet", + name: "", + data: { detector_volumes: wc.tn(detector_volumes) }, + uses: [detector_volumes] + }, + + local detector_volumes = + { + type: "DetectorVolumes", + name: "", + data: { + anodes: [wc.tn(a) for a in tools.anodes], + metadata: + {overall: { + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_ymin: -99.5 * wc.cm, + FV_ymax: 101.5 * wc.cm, + FV_zmin: 15 * wc.cm, + FV_zmax: 1022 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + FV_ymin_margin: 2.5 * wc.cm, + FV_ymax_margin: 2.5 * wc.cm, + FV_zmin_margin: 3 * wc.cm, + FV_zmax_margin: 3 * wc.cm, + vertical_dir: [0,1,0], + beam_dir: [0,0,1] + }} + + { + [ "a" + std.toString(a.data.ident) + "f0pA" ]: { + drift_speed: 1.101 * wc.mm / wc.us, + tick: 0.5 * wc.us, // 0.5 mm per tick + tick_drift: self.drift_speed * self.tick, + time_offset: -1600 * wc.us + 6 * wc.mm/self.drift_speed, + nticks_live_slice: 4, + FV_xmin: 1 * wc.cm, + FV_xmax: 255 * wc.cm, + FV_xmin_margin: 2 * wc.cm, + FV_xmax_margin: 2 * wc.cm, + } for a in tools.anodes + } + }, + }, UbooneBlobSource(fname, kind /*live or dead*/, views /* uvw, uv, vw, wu */) :: pg.pnode({ type: 'UbooneBlobSource', @@ -78,13 +143,14 @@ local ub = { name: sampler.name, data: { input: fname, // file name or list + anode: wc.tn(anode), datapath: datapath + '/' + kind, // see issue #375 sampler: wc.tn(sampler), kind: kind, } + if optical then { light: "light", flash: "flash", flashlight: "flashlight" } else {} - }, nin=1, nout=1, uses=[sampler]), + }, nin=1, nout=1, uses=[sampler, anode]), TensorSetFanin(multiplicity=2, tensor_order=[0,1]) :: pg.pnode({ type: 'TensorSetFanin', @@ -187,63 +253,30 @@ local ub = { }, }, nin=1, nout=1), - - SimpleClusGeomHelper() :: { - type: "SimpleClusGeomHelper", - name: "uboone", - data: { - a0f0: { - pitch_u: 3 * wc.mm, - pitch_v: 3 * wc.mm, - pitch_w: 3 * wc.mm, - angle_u: 1.0472, // 60 degrees - angle_v: -1.0472, // -60 degrees - angle_w: 0, // 0 degrees - drift_speed: 1.101 * wc.mm / wc.us, - tick: 0.5 * wc.us, // 0.5 mm per tick - tick_drift: self.drift_speed * self.tick, - time_offset: -1600 * wc.us + 6 * wc.mm/self.drift_speed, - nticks_live_slice: 4, - FV_xmin: 1 * wc.cm, - FV_xmax: 255 * wc.cm, - FV_ymin: -99.5 * wc.cm, - FV_ymax: 101.5 * wc.cm, - FV_zmin: 15 * wc.cm, - FV_zmax: 1022 * wc.cm, - FV_xmin_margin: 2 * wc.cm, - FV_xmax_margin: 2 * wc.cm, - FV_ymin_margin: 2.5 * wc.cm, - FV_ymax_margin: 2.5 * wc.cm, - FV_zmin_margin: 3 * wc.cm, - FV_zmax_margin: 3 * wc.cm - }, - } - }, - - PointTreeBuilding(geom_helper = $.SimpleClusGeomHelper()) :: pg.pnode({ - type: "PointTreeBuilding", - name: "", - data: { - samplers: { - "3d": wc.tn($.bs_live), - "dead": wc.tn($.bs_dead), - }, - multiplicity: 2, - tags: ["live", "dead"], - anode: wc.tn(anode), - face: 0, - geom_helper: wc.tn(geom_helper), - } - }, nin=2, nout=1, uses=[$.bs_live, $.bs_dead, geom_helper]), - - point_tree_source(livefn, deadfn) :: - local livesrc = $.ClusterFileSource(livefn); - local deadsrc = $.ClusterFileSource(deadfn); - local ptb = $.PointTreeBuilding(); - pg.intern(innodes=[livesrc, deadsrc], outnodes=[ptb], - edges=[ pg.edge(livesrc, ptb, 0, 0), - pg.edge(deadsrc, ptb, 0, 1) ] - ), + // PointTreeBuilding() :: pg.pnode({ + // type: "PointTreeBuilding", + // name: "", + // data: { + // samplers: { + // "3d": wc.tn($.bs_live), + // "dead": wc.tn($.bs_dead), + // }, + // multiplicity: 2, + // tags: ["live", "dead"], + // anode: wc.tn(anode), + // face: 0, + // detector_volumes: "DetectorVolumes", + // } + // }, nin=2, nout=1, uses=[$.bs_live, $.bs_dead, detector_volumes]), + + // point_tree_source(livefn, deadfn) :: + // local livesrc = $.ClusterFileSource(livefn); + // local deadsrc = $.ClusterFileSource(deadfn); + // local ptb = $.PointTreeBuilding(); + // pg.intern(innodes=[livesrc, deadsrc], outnodes=[ptb], + // edges=[ pg.edge(livesrc, ptb, 0, 0), + // pg.edge(deadsrc, ptb, 0, 1) ] + // ), BeeBlobSink(fname, sampler) :: pg.pnode({ type: "BeeBlobSink", @@ -266,8 +299,30 @@ local ub = { pg.intern(innodes=[fan], centernodes=[sink], edges=[ pg.edge(fan, sink, 1, 0) ]), - MultiAlgBlobClustering(beezip, datapath=pointtree_datapath, live_sampler=$.bs_live, - geom_helper = $.SimpleClusGeomHelper()) :: pg.pnode({ + MultiAlgBlobClustering(beezip, datapath=pointtree_datapath, live_sampler=$.bs_live, + index=0, runNo=1, subRunNo=1, eventNo=1) :: + local cm = clus.clustering_methods(detector_volumes=detector_volumes, + pc_transforms=pctransforms); + local retiler = cm.retiler(anodes=anodes, + samplers=[clus.sampler(live_sampler, apa=0, face=0)], + cut_time_low=3*wc.us, cut_time_high=5*wc.us); + + local improve_cluster_2 = cm.improve_cluster_2(anodes=anodes, + samplers=[clus.sampler($.bs_live_no_dead_mix, apa=0, face=0)], + verbose=true); + + + local cm_pipeline = [ + cm.tagger_flag_transfer("tagger"), + cm.clustering_recovering_bundle("recover_bundle"), + cm.switch_scope(), + // cm.examine_bundles(), + // cm.retile(retiler=retiler), + cm.steiner(retiler=improve_cluster_2), + cm.tagger_check_stm(), + cm.do_tracking("","multiple"), + ]; + pg.pnode({ type: "MultiAlgBlobClustering", name: "", data: { @@ -275,37 +330,75 @@ local ub = { outpath: pointtree_datapath, perf: true, bee_zip: beezip, - dump_json: false, // true to produce summary of groupings in JSON for debugging. - initial_index: 0, + initial_index: index, use_config_rse: true, // Enable use of configured RSE - runNo: 1, - subRunNo: 1, - eventNo: 1, + runNo: runNo, + subRunNo: subRunNo, + eventNo: eventNo, save_deadarea: true, - anode: wc.tn(anode), - face: 0, // FIXME: take an IAnodeFace! - geom_helper: wc.tn(geom_helper), - func_cfgs: [ - // {name: "clustering_ctpointcloud"}, - // {name: "clustering_live_dead", dead_live_overlap_offset: 2}, - // {name: "clustering_extend", flag: 4, length_cut: 60 * wc.cm, num_try: 0, length_2_cut: 15 * wc.cm, num_dead_try: 1}, - // {name: "clustering_regular", length_cut: 60*wc.cm, flag_enable_extend: false}, - // {name: "clustering_regular", length_cut: 30*wc.cm, flag_enable_extend: true}, - // {name: "clustering_parallel_prolong", length_cut: 35*wc.cm}, - // {name: "clustering_close", length_cut: 1.2*wc.cm}, - // {name: "clustering_extend_loop", num_try: 3}, - // {name: "clustering_separate", use_ctpc: true}, - // {name: "clustering_connect1"}, - // {name: "clustering_deghost"}, - // {name: "clustering_examine_x_boundary"}, - // {name: "clustering_protect_overclustering"}, - // {name: "clustering_neutrino"}, - // {name: "clustering_isolated"}, - {name: "clustering_examine_bundles"}, - {name: "clustering_retile", sampler: wc.tn(live_sampler), anode: wc.tn(anode), cut_time_low: 3*wc.us, cut_time_high: 5*wc.us}, + anodes: [wc.tn(a) for a in anodes], + detector_volumes: wc.tn(detector_volumes), + bee_points_sets: [ // New configuration for multiple bee points sets + //{ + // name: "img", // Name of the bee points set + // detector: "uboone", // Detector name + // algorithm: "img", // Algorithm identifier + // pcname: "3d", // Which scope to use + // coords: ["x", "y", "z"], // Coordinates to use + // individual: false // Whether to output as a whole or individual APA/Face + //}, + //{ + // name: "clustering", // Name of the bee points set + // detector: "uboone", // Detector name + // algorithm: "clustering", // Algorithm identifier + // pcname: "3d", // Which scope to use + // coords: ["x", "y", "z"], // Coordinates to use + // individual: true // Output individual APA/Face + //}, + //{ + // name: "retiled", // Name of the bee points set + // grouping: "shadow", + // detector: "uboone", // Detector name + // algorithm: "retiled", // Algorithm identifier + // pcname: "3d", // Which scope to use + // coords: ["x", "y", "z"], // Coordinates to use + // individual: true // Output individual APA/Face + //}, + //{ + // name: "examine", // Name of the bee points set + // visitor: "ClusteringExamineBundles", + // detector: "uboone", // Detector name + // algorithm: "examine", // Algorithm identifier + // pcname: "3d", // Which scope to use + // coords: ["x", "y", "z"], // Coordinates to use + // individual: true // Output individual APA/Face + //}, + { + name: "regular", // Name of the bee points set + visitor: "CreateSteinerGraph", + detector: "uboone", // Detector name + algorithm: "regular", // Algorithm identifier + pcname: "3d", // Which scope to use + coords: ["x_t0cor", "y", "z"], // Coordinates to use + individual: false, // Output individual APA/Face + filter: 1 // 1 apply scope filter, 0 ignore scope filter, -1 apply inverse scope filter + }, + { + name: "steiner", // Name of the bee points set + visitor: "CreateSteinerGraph", + detector: "uboone", // Detector name + algorithm: "steiner", // Algorithm identifier + pcname: "steiner_pc", // Which scope to use + coords: ["x_t0cor", "y", "z"], // Coordinates to use + individual: false, // Output individual APA/Face + }, + ], + pipeline: wc.tns(cm_pipeline), + // cluster_id_order: "size", // or "tree" for insertion order or nothing for no rewriting } - }, nin=1, nout=1, uses=[geom_helper, live_sampler, anode]), + }, nin=1, nout=1, uses=anodes + [detector_volumes] + cm_pipeline), + TensorFileSink(fname) :: pg.pnode({ type: "TensorFileSink", @@ -355,23 +448,29 @@ local ingraph_dead(infiles, datapath=pointtree_datapath) = pg.pipeline([ ub.multiplex_blob_views(infiles, 'dead', ["uv","vw","wu"]), ub.UbooneClusterSource(infiles, datapath=datapath, sampler=ub.bs_dead, kind='dead', optical=false) ]); -local outgraph(beezip, datapath=pointtree_datapath) = pg.pipeline([ - ub.MultiAlgBlobClustering(beezip, datapath=datapath), +local outgraph(beezip, datapath=pointtree_datapath, index=0, runNo=1, subRunNo=1, eventNo=1) = pg.pipeline([ + ub.MultiAlgBlobClustering(beezip, datapath=datapath, index=index, runNo=runNo, subRunNo=subRunNo, eventNo=eventNo), ub.ClusterFlashDump(datapath=datapath) ]); +//local outgraph(beezip, datapath=pointtree_datapath) = pg.pipeline([ +// ub.MultiAlgBlobClustering(beezip, datapath=datapath), +// ub.ClusterFlashDump(datapath=datapath) +//]); local graphs = { - live :: function(infiles, beezip, datapath) - pg.pipeline([ingraph_live(infiles, datapath), outgraph(beezip, datapath)]), + live :: function(infiles, beezip, datapath, index=0, runNo=1, subRunNo=1, eventNo=1) + pg.pipeline([ingraph_live(infiles, datapath), + outgraph(beezip, datapath, index, runNo, subRunNo, eventNo)]), - dead :: function(infiles, beezip, datapath) - pg.pipeline([ingraph_dead(infiles, datapath), outgraph(beezip, datapath)]), + dead :: function(infiles, beezip, datapath, index=0, runNo=1, subRunNo=1, eventNo=1) + pg.pipeline([ingraph_dead(infiles, datapath), + outgraph(beezip, datapath, index, runNo, subRunNo, eventNo)]), - both :: function(infiles, beezip, datapath) + both :: function(infiles, beezip, datapath, index=0, runNo=1, subRunNo=1, eventNo=1) local live = ingraph_live(infiles, datapath); local dead = ingraph_dead(infiles, datapath); - local out = outgraph(beezip, datapath); + local out = outgraph(beezip, datapath, index, runNo, subRunNo, eventNo); local fanin = ub.TensorSetFanin(); pg.intern(innodes=[live,dead], outnodes=[out], centernodes=[fanin], edges=[ @@ -380,8 +479,40 @@ local graphs = { pg.edge(fanin,out,0,0)]) }; +//local graphs = { +// live :: function(infiles, beezip, datapath) +// pg.pipeline([ingraph_live(infiles, datapath), outgraph(beezip, datapath)]), +// +// dead :: function(infiles, beezip, datapath) +// pg.pipeline([ingraph_dead(infiles, datapath), outgraph(beezip, datapath)]), +// +// both :: function(infiles, beezip, datapath) +// local live = ingraph_live(infiles, datapath); +// local dead = ingraph_dead(infiles, datapath); +// local out = outgraph(beezip, datapath); +// local fanin = ub.TensorSetFanin(); +// pg.intern(innodes=[live,dead], outnodes=[out], centernodes=[fanin], +// edges=[ +// pg.edge(live,fanin,0,0), +// pg.edge(dead,fanin,0,1), +// pg.edge(fanin,out,0,0)]) +//}; + local extra_plugins = ["WireCellAux", "WireCellRoot", "WireCellClus"]; // kind can be "live", "dead" or "both". -function(infiles="uboone.root", beezip="bee.zip", kind="both", datapath=pointtree_datapath) - ub.main(graphs[kind](infiles, beezip, datapath), "Pgrapher", extra_plugins) +function(infiles="uboone.root", beezip="bee.zip", kind="live", datapath=pointtree_datapath, + initial_index="0", initial_runNo="1", initial_subRunNo="1", initial_eventNo="1") + + // Parse the integer values from strings + local index = std.parseInt(initial_index); + local runNo = std.parseInt(initial_runNo); + local subRunNo = std.parseInt(initial_subRunNo); + local eventNo = std.parseInt(initial_eventNo); + + // Use these parameters in the main graph + ub.main(graphs[kind](infiles, beezip, datapath, index, runNo, subRunNo, eventNo), + "Pgrapher", extra_plugins) + +//function(infiles="uboone.root", beezip="bee.zip", kind="live", datapath=pointtree_datapath) +// ub.main(graphs[kind](infiles, beezip, datapath), "Pgrapher", extra_plugins) diff --git a/gen/docs/GaussianDiffusion.md b/gen/docs/GaussianDiffusion.md deleted file mode 100644 index 8c688de05..000000000 --- a/gen/docs/GaussianDiffusion.md +++ /dev/null @@ -1,123 +0,0 @@ -# GaussianDiffusion Class Analysis - -## Core Components - -### 1. GausDesc Structure -- Represents a Gaussian distribution with two key parameters: - - `center`: The mean/center of the Gaussian distribution - - `sigma`: The standard deviation of the distribution - -### 2. GaussianDiffusion Class -- Main purpose: Models the diffusion of charge deposits in a wire chamber detector -- Key members: - - `m_deposition`: Pointer to the original charge deposition - - `m_time_desc`: Gaussian description for time dimension - - `m_pitch_desc`: Gaussian description for spatial/pitch dimension - - `m_patch`: 2D array storing the diffused charge distribution - - `m_qweights`: Vector storing weights for charge interpolation - - `m_toffset_bin`, `m_poffset_bin`: Offset bins for time and pitch dimensions - -## Key Algorithms - -### 1. Gaussian Sampling (GausDesc::sample) -```cpp -std::vector sample(double start, double step, int nsamples) const { - if (!sigma) { - // Handle point source case - return {1.0}; - } - // Sample Gaussian at regular intervals - for (int ind = 0; ind < nsamples; ++ind) { - const double rel = (start + ind * step - center) / sigma; - ret[ind] = exp(-0.5 * rel * rel); - } -} -``` - -### 2. Bin Integration (GausDesc::binint) -- Uses error function (erf) to compute integrated charge in each bin -- More accurate than simple sampling for charge conservation -```cpp -std::vector binint(double start, double step, int nbins) const { - if (!sigma) { - return {1.0}; // Point source case - } - // Compute erf differences for bin integration - const double sqrt2 = sqrt(2.0); - for (int ind = 0; ind <= nbins; ++ind) { - double x = (start + step * ind - center) / (sqrt2 * sigma); - erfs[ind] = 0.5 * std::erf(x); - } - // Calculate bin contents - for (int ibin = 0; ibin < nbins; ++ibin) { - bins[ibin] = erfs[ibin + 1] - erfs[ibin]; - } -} -``` - -### 3. Weight Calculation Algorithm (GausDesc::weight) - -The weight calculation is a sophisticated algorithm designed to handle linear charge interpolation between impact positions. Here's the detailed breakdown: - -#### Purpose -- Provides weights for linear interpolation of charge between adjacent wire positions -- Accounts for the continuous nature of the charge distribution - -#### Algorithm Steps -1. For each bin: - ```cpp - double x2 = start; - double x1 = 0; - double gaus2 = exp(-0.5 * (start - center) / sigma * (start - center) / sigma); - double gaus1 = 0; - - for (int ind = 0; ind < nbins; ind++) { - x1 = x2; - x2 = x1 + step; - double rel = (x2 - center) / sigma; - gaus1 = gaus2; - gaus2 = exp(-0.5 * rel * rel); - ``` - -2. Weight Calculation Formula: - ```cpp - wt[ind] = -1.0 * sigma / (x1 - x2) * (gaus2 - gaus1) / sqrt(2.0 * pi) / pvec[ind] - + (center - x2) / (x1 - x2); - ``` - -#### Mathematical Explanation -The weight calculation combines two components: -1. Gaussian derivative term: `-1.0 * sigma / (x1 - x2) * (gaus2 - gaus1) / sqrt(2.0 * pi) / pvec[ind]` - - Represents the rate of change of the Gaussian distribution - - Normalized by the bin's total charge (pvec[ind]) - -2. Linear position term: `(center - x2) / (x1 - x2)` - - Provides linear interpolation based on position relative to bin edges - -### 4. Diffusion Sampling (set_sampling method) - -The `set_sampling` method combines all these components to create the final diffusion model: - -1. Calculate time and pitch ranges based on number of sigmas -2. Sample or integrate both dimensions -3. Create 2D charge distribution patch -4. Apply optional charge fluctuations -5. Normalize to preserve total charge - -## Implementation Details - -### Charge Conservation -- The implementation carefully preserves total charge through normalization -- Both bin integration and sampling methods are normalized -- Fluctuations (if applied) maintain the total charge through renormalization - -### Coordinate Systems -- Uses two coordinate systems: - 1. Absolute coordinates (center, time) - 2. Bin-relative coordinates (offsets) -- Transforms between these systems using offset bins (m_toffset_bin, m_poffset_bin) - -### Performance Considerations -- Pre-calculates error functions for efficiency -- Uses vectorized operations where possible -- Caches results in m_patch to avoid recalculation \ No newline at end of file diff --git a/gen/docs/GaussianDiffusion_weight.md b/gen/docs/GaussianDiffusion_weight.md deleted file mode 100644 index 648171859..000000000 --- a/gen/docs/GaussianDiffusion_weight.md +++ /dev/null @@ -1,196 +0,0 @@ -# Mathematical Derivation of Gaussian Weight Calculation - -## 1. Basic Setup and Goal - -Induced current of drifted charge is computed by convolution of continuous charge distribution and discrete field responses. -The fundamental problem is to determine how to distribute charge between two adjacent impact wire positions (where the field response is simulated; usually every 1/10th of the actual wire pitch), in order to effectively apply a linear interpolation of the field responses between the two impact positions, thus maintaining the continuity and accuracy of the simulated waveforms across impact positions along wire pitch orientation for any given charge. -Check page 38 of arXiv:1802.08709 for more detail. - -### Initial Conditions: -- Gaussian distribution centered at μ with standard deviation σ -- Two adjacent impact wire positions at x₁ and x₂ -- Total charge Q in the region between x₁ and x₂ - -## 2. Mathematical Framework - -### The Gaussian Distribution: -``` -G(x) = (1/√(2πσ²)) * exp(-(x-μ)²/(2σ²)) -``` - -### Linear Interpolation Position: -For any point x between two wires: -``` -u_linear(x) = (x₂ - x)/(x₂ - x₁) [for x₁ impact position] -u_linear(x) = (x - x₁)/(x₂ - x₁) [for x₂ impact position] -``` - -## 3. Weight Derivation Steps - -### Step 1: Consider Charge Conservation -The total charge Q between x₁ and x₂ is: -``` -Q = ∫(x₁ to x₂) G(x) dx -``` - -### Step 2: Center of Charge -The average position of charge (first moment) is: -``` -x_avg = (1/Q) * ∫(x₁ to x₂) x*G(x) dx -``` - -### Step 3: Weight Formula Derivation -The weight formula: -``` -w₁ = σ²/(x₁-x₂) * (G(x₁)-G(x₂)) / Q + (μ-x₂)/(x₁-x₂) [for x₁ impact position] -w₂ = 1 - w₁ [for x₂ impact position] -``` - -This comes from combining: -1. The normalized derivative of the Gaussian (rate of change term) -2. Constant term based on the distance from the central/peak position of the charge - -## 4. Physical Interpretation - -The weight calculation balances two physical aspects: -1. The shape of the charge distribution (Gaussian term) -2. The geometric position of the charge relative to the wires (linear term) - - -```svg - - - - - - - - 1. Basic Setup - - - - - - - - - - - - x₁ - x₂ - - - - μ - - - - - - - 2. Charge Integration - - - - - - - - - - - - - Q = ∫G(x)dx - - - - - - - 3. Weight Components - - - - - - - - Gaussian derivative term - - - - Linear position term - - - - w(x) = σ²/(x₁-x₂) * (G(x₁)-G(x₂)) / Q + (μ-x₂)/(x₁-x₂) - - - - -``` - -The mathematical interpretation of the weight calculation can be understood in three key parts: - -1. **Gaussian Charge Distribution** - - The fundamental distribution is Gaussian, representing the diffused charge - - This accounts for the physical nature of charge diffusion in the detector - - The distribution has a center μ and width σ - -2. **Charge Integration** - - Between any two wires (x₁ and x₂), we have a total charge Q - - This charge must be conserved in our weighting scheme - - The integral of the Gaussian between the wires gives us Q: - ``` - Q = ∫(x₁ to x₂) G(x) dx - ``` - -3. **Weight Calculation Components** - The weight formula combines two essential physical aspects: - - a) **Gaussian Derivative Term**: `σ²/(x₁-x₂) * (G(x₁)-G(x₂)) / Q` - - Represents how quickly the charge distribution changes - - Accounts for the shape of the Gaussian - - Normalized by total charge Q to maintain conservation - - b) **Linear Position Term**: `(μ-x₂)/(x₁-x₂)` - - Provides basic geometric interpolation - - Ensures smooth transition between wires - - Based on the center position relative to wire positions - -The key insights behind this formulation are: - -1. **Physical Motivation**: - - The charge distribution is continuous but must be measured at discrete points - - The weighting should reflect both the distribution shape and position - - Total charge must be conserved - -2. **Mathematical Properties**: - - Smooth transition between wires - - Proper handling of both narrow and wide distributions - - Conservation of total charge - - Correct limiting behavior for point-like deposits - -3. **Practical Implementation**: - - Computationally efficient - - Numerically stable - - Handles edge cases appropriately - -The diagrams [need to be fixed] show: -1. The basic setup with the Gaussian distribution and wire positions -2. The charge integration between wires -3. The components of the weight calculation - diff --git a/gen/inc/WireCellGen/RecombinationModels.h b/gen/inc/WireCellGen/RecombinationModels.h index 6cb045934..49cdf8945 100644 --- a/gen/inc/WireCellGen/RecombinationModels.h +++ b/gen/inc/WireCellGen/RecombinationModels.h @@ -18,6 +18,7 @@ namespace WireCell { MipRecombination(double Rmip = 0.7, double Wi = 23.6 * units::eV / (-1 * units::eplus)); virtual ~MipRecombination(); virtual double operator()(double dE, double dX = 0.0); + virtual double dE(double dQ, double dX); virtual void configure(const WireCell::Configuration& config); virtual WireCell::Configuration default_configuration() const; }; @@ -36,6 +37,7 @@ namespace WireCell { double Wi = 23.6 * units::eV / (-1 * units::eplus)); virtual ~BirksRecombination(); virtual double operator()(double dE, double dX = 0.0); + virtual double dE(double dQ, double dX); virtual void configure(const WireCell::Configuration& config); virtual WireCell::Configuration default_configuration() const; }; @@ -53,6 +55,7 @@ namespace WireCell { double Wi = 23.6 * units::eV / (-1 * units::eplus)); virtual ~BoxRecombination(); virtual double operator()(double dE, double dX = 0.0); + virtual double dE(double dQ, double dX); virtual void configure(const WireCell::Configuration& config); virtual WireCell::Configuration default_configuration() const; }; diff --git a/gen/inc/WireCellGen/Scaler.h b/gen/inc/WireCellGen/Scaler.h index 31a3432b2..74371968a 100644 --- a/gen/inc/WireCellGen/Scaler.h +++ b/gen/inc/WireCellGen/Scaler.h @@ -31,7 +31,7 @@ namespace WireCell { private: std::vector m_boxes; - std::size_t m_count{0}; + //std::size_t m_count{0}; double bin_width; double tpc_width; diff --git a/gen/src/AnodePlane.cxx b/gen/src/AnodePlane.cxx index 04eb277a9..e9659b3f7 100644 --- a/gen/src/AnodePlane.cxx +++ b/gen/src/AnodePlane.cxx @@ -233,7 +233,8 @@ void Gen::AnodePlane::configure(const WireCell::Configuration& cfg) const double pitchmax = wire_pitch_dirs.second.dot(wires[nwires - 1]->center() - plane_center); const Vector pimpos_origin(response_x, plane_center.y(), plane_center.z()); - log->debug("face:{}, plane:{}, origin:{} mm", iface, iplane, pimpos_origin / units::mm); + log->debug("face:{}, plane:{}, origin:{} mm, wpid:{}", + iface, iplane, pimpos_origin / units::mm, wire_plane_id); Pimpos* pimpos = new Pimpos(nwires, pitchmin, pitchmax, wire_pitch_dirs.first, wire_pitch_dirs.second, pimpos_origin, nimpacts); diff --git a/gen/src/DepoFluxSplat.cxx b/gen/src/DepoFluxSplat.cxx index 21412be44..296dcd4af 100644 --- a/gen/src/DepoFluxSplat.cxx +++ b/gen/src/DepoFluxSplat.cxx @@ -413,6 +413,8 @@ bool Gen::DepoFluxSplat::operator()(const input_pointer& in, output_pointer& out log->debug("splat {} ndepos={}/{}/[{}] ntraces={}", out->ident(), ndepos_seen, in->depos()->size(), nplanes_skipped, accum->ntraces()); ++m_count; + + (void)ndepos_skipped; return true; } diff --git a/gen/src/DepoSplat.cxx b/gen/src/DepoSplat.cxx index 313a95902..6eeb5aecf 100644 --- a/gen/src/DepoSplat.cxx +++ b/gen/src/DepoSplat.cxx @@ -371,7 +371,7 @@ ITrace::vector Gen::DepoSplat::process_face(IAnodeFace::pointer face, const IDep l->debug("DepoSplat: plane {} " "dropped {} (time) and {} (pitch) from {} total", iplane, t_dropped, p_dropped, depos.size()); - + (void)idepo; } // make output traces @@ -382,5 +382,6 @@ ITrace::vector Gen::DepoSplat::process_face(IAnodeFace::pointer face, const IDep auto trace = std::make_shared(chid, 0, chv); traces.push_back(trace); } + return traces; } diff --git a/gen/src/Fourdee.cxx b/gen/src/Fourdee.cxx index 394484df5..f252f1451 100644 --- a/gen/src/Fourdee.cxx +++ b/gen/src/Fourdee.cxx @@ -471,4 +471,6 @@ void Gen::Fourdee::execute_old() } bail: // what's this weird syntax? What is this, BASIC? cerr << em.summary() << endl; + +(void)ndepos; } diff --git a/gen/src/RecombinationModels.cxx b/gen/src/RecombinationModels.cxx index f75f350ca..233150c4b 100644 --- a/gen/src/RecombinationModels.cxx +++ b/gen/src/RecombinationModels.cxx @@ -23,6 +23,7 @@ Gen::MipRecombination::MipRecombination(double Rmip, double Wi) } Gen::MipRecombination::~MipRecombination() {} double Gen::MipRecombination::operator()(double dE, double dX) { return m_rmip * dE / m_wi; } +double Gen::MipRecombination::dE(double dQ, double dX) { return dQ * m_wi / m_rmip; } void Gen::MipRecombination::configure(const WireCell::Configuration& config) { m_rmip = get(config, "Rmip", m_rmip); @@ -53,6 +54,12 @@ double Gen::BirksRecombination::operator()(double dE, double dX) const double R = m_a3t / (1 + (dE / dX) * m_k3t / (m_efield * m_rho)); return R * dE / m_wi; } +double Gen::BirksRecombination::dE(double dQ, double dX) +{ + const double numerator = dQ; + const double denominator = m_a3t/m_wi - dQ/dX * m_k3t/(m_efield*m_rho); + return numerator / denominator; +} void Gen::BirksRecombination::configure(const WireCell::Configuration& config) { m_a3t = get(config, "A3t", m_a3t); @@ -90,6 +97,14 @@ double Gen::BoxRecombination::operator()(double dE, double dX) const double R = std::log(m_a + tmp) / tmp; return R * dE / m_wi; } +double Gen::BoxRecombination::dE(double dQ, double dX) +{ + const double coeff = m_b / (m_efield * m_rho); + const double a_exp = std::exp(dQ/dX * coeff * m_wi); + const double numerator = (a_exp - m_a)*dX; + const double denominator = coeff; + return numerator / denominator; +} void Gen::BoxRecombination::configure(const WireCell::Configuration& config) { m_efield = get(config, "Efield", m_efield); diff --git a/gen/src/TrackDepos.cxx b/gen/src/TrackDepos.cxx index 3002a9765..677c6f59b 100644 --- a/gen/src/TrackDepos.cxx +++ b/gen/src/TrackDepos.cxx @@ -103,6 +103,7 @@ void Gen::TrackDepos::add_track(double time, const WireCell::Ray& ray, double ch // earliest first std::sort(m_depos.begin(), m_depos.end(), ascending_time); log->debug("depos: {} over {}mm", m_depos.size(), length / units::mm); + (void)count; // unused, but useful for debugging } bool Gen::TrackDepos::operator()(output_pointer& out) diff --git a/iface/docs/iface-guidlines.org b/iface/docs/iface-guidlines.org new file mode 100644 index 000000000..fd3680043 --- /dev/null +++ b/iface/docs/iface-guidlines.org @@ -0,0 +1,54 @@ +#+title: Guidelines for defining WCT interfaces + +* Overview + +A fundamental aspect of the toolkit is the ~Interface~ class. A WCT *interface* is +a pure abstract base class (ABC) inheriting from ~Interface~ and defining a small +number of pure virtual methods. A WCT *component* is a class that inherits from +one or more interface classes and implements the pure virtual methods. Any code +may then retrieve an *instance* of a *component* class by its "type/name" string and +the *interface* class in order to then execute the methods. + +This retrieval is a run-time operation. There is no compile-time dependency +between retriever and retrieved. The application that will retrieve an +component instance must arrange for the plugin library that provides the +component class to have been loaded and for the instance to have been created. +This is typically assured via WCT's ~Main~ and driven by user configuration. + +The WCT ~iface/~ subpackage provides a "zoo" of subclasses to ~Interface~ and is +intended to collect all interfaces that do not require packages outside the WCT +core dependencies. Users are of course free to define their own interfaces in +their own packages but their use will be limited + +The rest of this document provides guidance on defining interfaces. + +* Scope of an interface + +TBD. Balance between number of methods and number of interfaces. Separation of concern. + +* Inheritance structure + +TBD. ~INode~, ~IData~, when to use it, when to avoid it. + +* Method names + +Define method names assuming a flat method namespace. + +A user typically accesses a component by an interface type. This limits the set +of methods the user may call and thus the method namespace is limited and there +is no concern for name collision. However, take some care to balance brevity +and verbosity in choosing a name. + +#+begin_src c++ + // not great + virtual float value() const = 0; + + // better + virtual float drift_speed() const = 0; +#+end_src + +Take some care to choose method names that are unique across known interfaces +and especially between interfaces that likely may be implemented by a single +component. + + diff --git a/iface/inc/WireCellIface/IDetectorVolumes.h b/iface/inc/WireCellIface/IDetectorVolumes.h new file mode 100644 index 000000000..a40f8f0a2 --- /dev/null +++ b/iface/inc/WireCellIface/IDetectorVolumes.h @@ -0,0 +1,78 @@ +/** A view of the basic spatial composition of an entire detector. + + The information returned by implementations of this interface MUST adhere to + the "wire schema" and its convention. See the util/docs/wire-schema.org. + + */ +#ifndef WIRECELL_IDETECTORVOLUMES +#define WIRECELL_IDETECTORVOLUMES + +#include "WireCellUtil/IComponent.h" +#include "WireCellUtil/Point.h" +#include "WireCellUtil/Configuration.h" +#include "WireCellUtil/BoundingBox.h" +#include "WireCellIface/WirePlaneId.h" +#include "WireCellIface/IAnodePlane.h" + + +namespace WireCell { + + class IDetectorVolumes : public IComponent { + public: + virtual ~IDetectorVolumes() {} + + /// Return the the wpid for the detector unit that contains the point in + /// its sensitive volume. The wpid is evaluates to false if the point + /// is not contained in any sensitive volume. The Point must be + /// provided in the global spatial coordinate system (that which is + /// defined by "wires"). + virtual WirePlaneId contained_by(const Point& point) const = 0; + + /// Return the sign (+/-1) of the direction along the global X + /// coordinate direction in which the sensitive face of the detector + /// volume is "looking". Note, this is always opposite of the nominal + /// direction of electron drift toward that face. If wpid is illegal or + /// unknown, 0 is returned. + virtual int face_dirx(WirePlaneId wpid) const = 0; + + /// Return a unit vector along the "wire" (or strip) direction for the + /// plane. If wpid is illegal or unknown, a zero vector is returned. + virtual Vector wire_direction(WirePlaneId wpid) const = 0; + + /// Return a vector with magnitude equal to the pitch between "wires" + /// (or strips) and with a direction that is perpendicular to the "wire" + /// direction and along increasing pitch. The "layer" component of the + /// wpid must be well determined. If wpid is illegal or unknown, a zero + /// vector is returned. + virtual Vector pitch_vector(WirePlaneId wpid) const = 0; + + /// Return the largest possible bounding box that is fully inside the + /// detector volume. + /// + /// If the wpid is not known, the returned BoundingBox::empty() will + /// return true. + virtual BoundingBox inner_bounds(WirePlaneId wpid) const = 0; + + /// Return the smallest possible bounding box that contains the detector + /// volume. For detector volumes that are themselves boxed shaped, this + /// bounding box should be identical to the one from inner_bounds(). + /// Implementations need only implement this method for non-box shaped + /// detector volumes. + /// + /// If the wpid is not known, the returned BoundingBox::empty() will + /// return true. + virtual BoundingBox outer_bounds(WirePlaneId wpid) const { return inner_bounds(wpid); } + + + /// Forward any user-provided, application specific metadata for a + /// particular wpid. + virtual Configuration metadata(WirePlaneId wpid) const = 0; + + /// Return a map of all the faces in the detector volume. The key is + /// the WirePlaneId::ident() of the face. + virtual const std::map& wpident_faces() const = 0; + + }; + +} +#endif diff --git a/iface/inc/WireCellIface/IFiducial.h b/iface/inc/WireCellIface/IFiducial.h new file mode 100644 index 000000000..9465969f1 --- /dev/null +++ b/iface/inc/WireCellIface/IFiducial.h @@ -0,0 +1,20 @@ +/** Determine if a point is inside a closed 3D region */ +#ifndef WIRECELL_IFIDUCIAL +#define WIRECELL_IFIDUCIAL + +#include "WireCellUtil/IComponent.h" +#include "WireCellUtil/Point.h" + + +namespace WireCell { + + class IFiducial : public IComponent { + public: + virtual ~IFiducial() {} + + /// Return true if the point is inside the region, else false. + virtual bool contained(const Point& point) const = 0; + }; + +} +#endif diff --git a/iface/inc/WireCellIface/IPCTreeMutate.h b/iface/inc/WireCellIface/IPCTreeMutate.h new file mode 100644 index 000000000..793892bac --- /dev/null +++ b/iface/inc/WireCellIface/IPCTreeMutate.h @@ -0,0 +1,27 @@ +#ifndef WIRECELL_IPCTREEMUTATE +#define WIRECELL_IPCTREEMUTATE + +#include "WireCellUtil/IComponent.h" +#include "WireCellUtil/PointTree.h" + +namespace WireCell { + + /** An IPCTreeMutate accepts a point cloud tree node, may mutate it and may + * produces a new one. + * + * Caller takes ownership of the returned node via unique pointer. + * + */ + class IPCTreeMutate : public IComponent { + public: + virtual ~IPCTreeMutate() {} + + using node_t = PointCloud::Tree::Points::node_t; + + virtual std::unique_ptr mutate(node_t& node) const = 0; + + }; + +} // namespace WireCell + +#endif diff --git a/iface/inc/WireCellIface/IPCTreeTransform.h b/iface/inc/WireCellIface/IPCTreeTransform.h new file mode 100644 index 000000000..4b45ab923 --- /dev/null +++ b/iface/inc/WireCellIface/IPCTreeTransform.h @@ -0,0 +1,29 @@ +#ifndef WIRECELL_IPCTREETRANSFORM +#define WIRECELL_IPCTREETRANSFORM + +#include "WireCellUtil/IComponent.h" +#include "WireCellUtil/PointTree.h" + +namespace WireCell { + + /** An IPCTreeTransform accepts a point cloud tree node and produces a new + * one. + * + * Caller takes ownership of the returned node via unique pointer. + * + * Implementations should provide a contract as to what, if any, node facade + * is assumed for both input and produced output nodes. + */ + class IPCTreeTransform : public IComponent { + public: + virtual ~IPCTreeTransform() {} + + using node_t = PointCloud::Tree::Points::node_t; + + virtual std::unique_ptr transform(const node_t& node) const = 0; + + }; + +} // namespace WireCell + +#endif diff --git a/iface/inc/WireCellIface/IRecombinationModel.h b/iface/inc/WireCellIface/IRecombinationModel.h index 195f87c75..2aa78a566 100644 --- a/iface/inc/WireCellIface/IRecombinationModel.h +++ b/iface/inc/WireCellIface/IRecombinationModel.h @@ -11,6 +11,9 @@ namespace WireCell { // Convert a point or step to ionized charge virtual double operator()(double dE, double dX = 0.0) = 0; + + // Convert the dQ and dx to dE + virtual double dE(double dQ, double dX) = 0; }; } // namespace WireCell diff --git a/iface/inc/WireCellIface/IScalarFunction.h b/iface/inc/WireCellIface/IScalarFunction.h new file mode 100644 index 000000000..a8a35c2e8 --- /dev/null +++ b/iface/inc/WireCellIface/IScalarFunction.h @@ -0,0 +1,25 @@ +#ifndef WIRECELL_ISCALARFUNCTION +#define WIRECELL_ISCALARFUNCTION + +#include "WireCellUtil/IComponent.h" + +namespace WireCell { + + + /** A scalar function maps R -> R. + * + * See also IWaveform which provides a similar concept but for a regularly + * sampled function. + */ + class IScalarFunction : public IComponent { + public: + + virtual ~IScalarFunction() {}; + + /// Implementation provides the function. + virtual double scalar_function(double x) = 0; + + }; +} + +#endif diff --git a/iface/inc/WireCellIface/IWireParameters.h b/iface/inc/WireCellIface/IWireParameters.h index 1f947d743..76848c0e3 100644 --- a/iface/inc/WireCellIface/IWireParameters.h +++ b/iface/inc/WireCellIface/IWireParameters.h @@ -99,7 +99,7 @@ namespace WireCell { return pitchV(); case kWlayer: return pitchW(); - case kUnknownLayer: + default: return bogus; } return bogus; diff --git a/iface/inc/WireCellIface/WirePlaneId.h b/iface/inc/WireCellIface/WirePlaneId.h index 687f9e39f..21352bcc8 100644 --- a/iface/inc/WireCellIface/WirePlaneId.h +++ b/iface/inc/WireCellIface/WirePlaneId.h @@ -9,8 +9,15 @@ namespace WireCell { - /// Enumerate layer IDs. These are not indices! - enum WirePlaneLayer_t { kUnknownLayer = 0, kUlayer = 1, kVlayer = 2, kWlayer = 4 }; + /// Enumerate layer IDs. These are not indices but are masks! A wpid can + /// have a "layer" that is multiple layers. + enum WirePlaneLayer_t { + kUnknownLayer = 0, + kUlayer = 1, + kVlayer = 2, + kWlayer = 4, + kAllLayers=7 // represents anode+face context + }; const WirePlaneLayer_t iplane2layer[3] = {kUlayer, kVlayer, kWlayer}; class WirePlaneId { @@ -27,7 +34,7 @@ namespace WireCell { /// Layer as integer (not index!) int ilayer() const; - /// Layer as index number (0,1 or 2). -1 if unknown + /// Layer as index number (0,1 or 2). -1 is returned when the layer is not well defined. int index() const; /// per-Anode face index NOT ident! @@ -36,15 +43,38 @@ namespace WireCell { /// APA number int apa() const; - /// return true if valid - // operator bool() const; + /// Return true if apa, face and layer are all valid numbers. bool valid() const; - bool operator==(const WirePlaneId& rhs); + /// Return true if the wpid has only legal values for apa, face and + /// layer. Layer must be well defined as a single layer (u,v,w) or as + /// "all" layers which then represents the anode+face context. + // operator bool() const; + + bool operator==(const WirePlaneId& rhs) const; + + bool operator!=(const WirePlaneId& rhs) const; + + bool operator<(const WirePlaneId& rhs) const; + + /// Return a new wpid defined with the given layer value but same apa/face. + WirePlaneId to_layer(WirePlaneLayer_t layer) const; + + /// Return a new wpid with a well defined plane but same apa/face. + WirePlaneId to_u() const; + WirePlaneId to_v() const; + WirePlaneId to_w() const; - bool operator!=(const WirePlaneId& rhs); + /// Return a new wpid brocaded to apply to all planes but same apa/face. + WirePlaneId to_all() const; - bool operator<(const WirePlaneId& rhs); + /// Return a standardized name of the form: + /// + /// "a{apa()}f{face()}p{layer()}" + /// + /// Eg, "a2f1pU" is the U-plane of face index 1 on anode ident 2. Layer + /// letter may be "A" for all or "?" for unknown. + std::string name() const; private: int m_pack; diff --git a/iface/src/WirePlaneId.cxx b/iface/src/WirePlaneId.cxx index e180dc366..f0efb0ed6 100644 --- a/iface/src/WirePlaneId.cxx +++ b/iface/src/WirePlaneId.cxx @@ -13,9 +13,6 @@ WireCell::WirePlaneId::WirePlaneId(WirePlaneLayer_t layer, int face, int apa) WireCell::WirePlaneId::WirePlaneId(int packed) : m_pack(packed) { - // It is very dubious that I allow this constructor. I do it for - // reading WireSchema files where the packing is done by the user. - // Very dubious indeed. } int WireCell::WirePlaneId::ident() const { return m_pack; } @@ -31,7 +28,7 @@ int WireCell::WirePlaneId::index() const return 1; case kWlayer: return 2; - case kUnknownLayer: + default: return -1; } return -1; @@ -41,36 +38,87 @@ int WireCell::WirePlaneId::apa() const { return m_pack >> apa_shift; } bool WireCell::WirePlaneId::valid() const { - int ind = index(); + if (apa() < 0) return false; + if (face() < 0) return false; + if (layer() == kAllLayers) return true; + const int ind = index(); return 0 <= ind && ind < 3; } -bool WireCell::WirePlaneId::operator==(const WirePlaneId& rhs) { return m_pack == rhs.m_pack; } +// WireCell::WirePlaneId::operator bool() const +// { +// if (apa() < 0) return false; +// if (face() < 0) return false; +// if (layer() == kAllLayers) return true; +// const int ind = index(); +// return 0 <= ind && ind < 3; +// } + +WirePlaneId WirePlaneId::to_layer(WirePlaneLayer_t layer) const +{ + return WirePlaneId(layer, face(), apa()); +} +WirePlaneId WirePlaneId::to_u() const +{ + return to_layer(kUlayer); +} +WirePlaneId WirePlaneId::to_v() const +{ + return to_layer(kVlayer); +} +WirePlaneId WirePlaneId::to_w() const +{ + return to_layer(kWlayer); +} +WirePlaneId WirePlaneId::to_all() const +{ + return to_layer(kAllLayers); +} +std::string WirePlaneId::name() const +{ + std::stringstream ss; + ss << "a" << apa() << "f" << face() << "p" << layer(); + return ss.str(); +} + -bool WireCell::WirePlaneId::operator!=(const WirePlaneId& rhs) { return !(*this == rhs); } +bool WireCell::WirePlaneId::operator==(const WirePlaneId& rhs) const { return m_pack == rhs.m_pack; } -bool WireCell::WirePlaneId::operator<(const WirePlaneId& rhs) +bool WireCell::WirePlaneId::operator!=(const WirePlaneId& rhs) const { return !(*this == rhs); } + +bool WireCell::WirePlaneId::operator<(const WirePlaneId& rhs) const { - if (!this->valid() || !rhs.valid()) { - return false; - } + return m_pack < rhs.m_pack; + // if (!this->valid() || !rhs.valid()) { + // return false; + // } - if (apa() == rhs.apa()) { - if (face() == rhs.face()) { - return index() < rhs.index(); - } - return face() < rhs.face(); - } - return apa() < rhs.apa(); + // if (apa() == rhs.apa()) { + // if (face() == rhs.face()) { + // return index() < rhs.index(); + // } + // return face() < rhs.face(); + // } + // return apa() < rhs.apa(); } std::ostream& WireCell::operator<<(std::ostream& o, const WireCell::WirePlaneId& wpid) { - o << "[WirePlaneId " << wpid.ident() << " ind:" << wpid.index() << " layer:" << wpid.layer() + o << "[WirePlaneId \"" << wpid.name() << "\" ident=" << wpid.ident() + << " ind:" << wpid.index() << " layer:" << wpid.layer() << " apa:" << wpid.apa() << " face:" << wpid.face(); - if (!wpid.valid()) { + if (wpid.valid()) { + o << " valid"; + } + else { o << " bogus"; } + // if (wpid) { + // o << " true"; + // } + // else { + // o << " false"; + // } o << "]"; return o; } @@ -79,16 +127,19 @@ std::ostream& WireCell::operator<<(std::ostream& o, const WireCell::WirePlaneLay { switch (layer) { case WireCell::kUlayer: - o << ""; + o << "U"; break; case WireCell::kVlayer: - o << ""; + o << "V"; break; case WireCell::kWlayer: - o << ""; + o << "W"; + break; + case WireCell::kAllLayers: + o << "A"; break; default: - o << ""; + o << "?"; break; } return o; diff --git a/iface/test/doctest_wireplaneid.cxx b/iface/test/doctest_wireplaneid.cxx new file mode 100644 index 000000000..182f5dae2 --- /dev/null +++ b/iface/test/doctest_wireplaneid.cxx @@ -0,0 +1,22 @@ +#include "WireCellIface/WirePlaneId.h" +#include "WireCellUtil/doctest.h" + +using namespace WireCell; + +TEST_CASE("iface wireplaneid") { + + WirePlaneId wpid(WirePlaneLayer_t::kAllLayers, 0, 0); + CHECK(wpid.layer() == WirePlaneLayer_t::kAllLayers); + // CHECK(wpid == true); + CHECK(wpid.name() == "a0f0pA"); + + + auto wpid_u = wpid.to_u(); + CHECK(wpid_u.index() == 0); + CHECK(wpid_u.valid()); + // CHECK(wpid_u == true); + CHECK(wpid_u.name() == "a0f0pU"); + + CHECK(wpid_u.to_all().name() == "a0f0pA"); + CHECK(wpid_u.to_layer(WirePlaneLayer_t::kUnknownLayer).name() == "a0f0p?"); +} diff --git a/iface/test/test_wireplaneid.cxx b/iface/test/test_wireplaneid.cxx index d7b01310d..0255de325 100644 --- a/iface/test/test_wireplaneid.cxx +++ b/iface/test/test_wireplaneid.cxx @@ -73,9 +73,15 @@ int main() std::vector packed = {0, 8, 32, 56, 80, 88}; for (auto p : packed) { WirePlaneId wpid(p); - cerr << "wpid=" << wpid.ident() << " packed=" << p << " layer=" << wpid.layer() << " face=" << wpid.face() << " apa=" << wpid.apa() << "\n"; + cerr << "wpid=" << wpid << endl; + cerr << "wpid=" << wpid.ident() << " packed=" << p << " layer=" << wpid.layer() << " face=" << wpid.face() << " apa=" << wpid.apa() << " valid=" << wpid.valid() << "\n"; Assert(p == wpid.ident()); } + { + WirePlaneId wpid(kAllLayers,0,0); + cerr << "wpid=" << wpid << endl; + cerr << "packed=" << wpid.ident() << " layer=" << wpid.layer() << " face=" << wpid.face() << " apa=" << wpid.apa() << " valid=" << wpid.valid() << "\n"; + } } return 0; } diff --git a/img/docs/BlobClustering.md b/img/docs/BlobClustering.md deleted file mode 100644 index e49d7a315..000000000 --- a/img/docs/BlobClustering.md +++ /dev/null @@ -1,171 +0,0 @@ -# BlobClustering Class Documentation - -## Overview - -The BlobClustering class is a key component in the Wire-Cell Toolkit's image processing framework for particle physics detector data. It takes collections of "blobs" (spatial regions of charge) and organizes them into clusters based on spatial and temporal relationships. This clustering is a crucial step in particle track reconstruction. - -## Purpose - -The main purpose of BlobClustering is to: - -1. Assemble blobs from different time slices into coherent particle tracks -2. Establish relationships between detector components (slices, blobs, wires, channels) -3. Apply geometric clustering to connect blobs that are likely from the same particle - -## Key Concepts - -### Blob - -A blob represents a region of charge in 3D space, detected by wire planes. It has: -- A shape (geometric boundaries) -- Associated slice (time information) -- Connection to a specific detector face -- A charge value and uncertainty - -### Detector Geometry - -The detector hierarchy is: -- **Anode Plane**: The overall detector component - - **Faces**: Sub-sections of the anode plane - - **Wire Planes**: Layers of wires at different angles - - **Wires**: Individual sensing elements - - **Channels**: Electronics connected to wires - -### Clustering - -Clustering establishes relationships between: -- Slices and blobs (temporal connection) -- Blobs and wires (spatial connection) -- Wires and channels (detector structure) -- Blobs and blobs (spatial proximity) - -## Implementation Details - -### Class Definition - -```cpp -class blobclustering : public aux::logger, public iclustering, public iconfigurable { -public: - blobclustering(); - virtual ~blobclustering(); - virtual void configure(const wirecell::configuration& cfg); - virtual wirecell::configuration default_configuration() const; - virtual bool operator()(const input_pointer& blobset, output_queue& clusters); - -private: - std::string m_policy{"uboone"}; - iblobset::vector m_cache; - void flush(output_queue& clusters); - bool graph_bs(const input_pointer& newbs); - bool new_frame(const input_pointer& newbs) const; - int cur_ident() const; - int m_count{0}; -}; -``` - -### Key Methods - -#### `operator()` - -This is the main processing function that gets called when new blob sets arrive: -- Handles EOS (End Of Stream) signals -- Detects when a new frame begins -- Caches blob sets until a frame is complete -- Calls `flush()` to process accumulated blob sets - -#### `flush()` - -Processes all cached blob sets: -1. Sorts blob sets by time -2. Creates graph connections between slices, blobs, wires, and channels -3. Performs geometric clustering to connect related blobs -4. Produces a cluster and clears the cache - -#### `add_blobs()` (Helper Function) - -Sets up the graph structure: -- Connects slices to blobs -- Connects blobs to wires based on their geometry and the face they belong to -- Establishes the basic topology for clustering - -#### `geom_clustering()` (Helper Function) - -Creates blob-to-blob connections based on geometric proximity: -- Uses the configured policy (e.g., "simple", "uboone") -- Applies different tolerances for connecting blobs based on their time separation -- Considers spatial overlap in wire plane coordinates - -## Configuration - -The class accepts these configuration parameters: - -| Parameter | Type | Description | Default | -|-----------|------|-------------|---------| -| `policy` | string | Geometric clustering policy (e.g., "simple", "uboone") | "uboone" | - -## Usage Example - -In a Wire-Cell Toolkit configuration: - -```jsonnet -local bc = { - type: "BlobClustering", - data: { - policy: "uboone", - } -}; - -// Pipeline components -local input = {...}; // source of blob sets -local clustering = bc; -local output = {...}; // next processing step - -local pipeline = [ - input, - clustering, - output -]; -``` - -## The Role of Anode Face - -The anode face is a critical concept for BlobClustering because: - -1. **Coordinate System**: Each face provides its own coordinate system via the `raygrid` object -2. **Wire Planes**: The face organizes multiple wire planes that detect charge from different angles -3. **Containment**: Blobs are associated with a specific face, keeping clustering local to detector sections -4. **Geometric Relationships**: Blob-to-wire mapping is done through the face's geometry - -When processing blobs, the code accesses the face via `iblob->face()` and uses it to: -- Get the wire planes: `auto wire_planes = iface->planes()` -- Map blob strips to wires in these planes -- Establish connections in the graph - -## Typical Processing Flow - -1. Blob sets are received, each associated with a time slice -2. Sets are cached until a frame boundary is detected -3. When a frame is complete: - - Slice-blob-wire-channel connections are established - - Geometric clustering identifies blob-blob connections - - A cluster is formed and output -4. The process repeats for the next frame - -## Debugging - -The class provides detailed debug logging: -- Reports blob counts and graph statistics -- Logs when flushing occurs and how many clusters are produced -- Helps trace the flow of data through the clustering process - -## Common Issues - -1. **Missing Slice References**: Blobs must have valid slice references -2. **Inconsistent Face References**: Blobs should reference consistent detector faces -3. **Configuration Mismatch**: The clustering policy should match detector geometry - -## Relationship with Other Components - -- **Upstream**: Receives blob sets from blobbing algorithms -- **Downstream**: Provides clusters to later analysis stages -- **Related Classes**: Works with `GeomClusteringUtil` for blob-blob connections \ No newline at end of file diff --git a/img/docs/GeomClusteringUtil.md b/img/docs/GeomClusteringUtil.md deleted file mode 100644 index 868f5b2cb..000000000 --- a/img/docs/GeomClusteringUtil.md +++ /dev/null @@ -1,142 +0,0 @@ -# GeomClusteringUtil in Wire-Cell Toolkit - -## Overview - -GeomClusteringUtil is a component in the Wire-Cell Toolkit's imaging module (`WireCellImg`) responsible for grouping blobs together based on their spatial and temporal relationships. This document explains its core functionality, the concept of AnodePlane faces, and how the clustering mechanism works. - -## Key Files - -- `inc/WireCellImg/GeomClusteringUtil.h` - Header file defining the interfaces -- `src/GeomClusteringUtil.cxx` - Implementation file with the core algorithms - -## Core Functions - -### `geom_clustering()` - -This function establishes geometric connections between blobs across adjacent time slices: - -```cpp -void wirecell::img::geom_clustering( - cluster_indexed_graph_t& grind, - iblobset::vector::iterator beg, - iblobset::vector::iterator end, - std::string policy -) -``` - -Parameters: -- `grind` - The indexed graph that will store connections between blobs -- `beg` and `end` - Iterators defining a range of blob sets to process -- `policy` - String specifying which clustering policy to use - -### `grouped_geom_clustering()` - -This function is similar to `geom_clustering()` but respects pre-established blob groupings: - -```cpp -void wirecell::img::grouped_geom_clustering( - cluster_graph_t& cg, - std::string policy, - const std::unordered_map groups -) -``` - -Parameters: -- `cg` - The cluster graph to update with new blob-blob connections -- `policy` - String specifying which clustering policy to use -- `groups` - Map of blob vertex descriptors to group IDs. Only blobs in the same group can be connected. - -## Clustering Policies - -The code supports several pre-defined clustering policies: - -1. **"simple"** - - Maximum relative time difference: 1 time slice - - Grid tolerance: 0 (no tolerance for spatial overlap) - - Used for basic clustering with strict requirements - -2. **"uboone"** - - Maximum relative time difference: 2 time slices - - Grid tolerance: {1→2, 2→1} (more tolerance for adjacent time slices) - - Optimized for MicroBooNE detector characteristics - -3. **"uboone_local"** - - Maximum relative time difference: 2 time slices - - Grid tolerance: {1→2, 2→2} (consistent higher tolerance) - - Requires time slices to be adjacent in the ordered set of slice times - -4. **"dead_clus"** - - Special handling for regions with dead channels - - Uses `adjacent_dead()` function to determine time adjacency - - Grid tolerance: {0→1, 1→1} (tolerance even for same-time slices) - -## The "Face" Concept in Wire-Cell Toolkit - -### What is a Face? - -In the Wire-Cell Toolkit, an AnodePlane represents a physical detector plane, and each AnodePlane can have multiple "faces": - -1. A face represents one side of a detector plane where charge can be collected -2. Typical TPCs have two faces (front and back) -3. Each face contains multiple wire planes (typically 3 planes: U, V, and W views) - -### Face Structure - -A face provides: - -1. A coordinate system via its `raygrid` -2. Access to the wire planes through `iface->planes()` -3. A unique identifier via `iface->ident()` - -### How Faces Impact Clustering - -Faces are fundamental to blob clustering because: - -1. Each blob is associated with a specific face -2. The face's coordinate system (raygrid) is used to determine spatial relationships -3. Clustering only occurs between blobs on the same face -4. Wire indices and channel numbers are face-specific - -In the code, you can see face-specific handling in the `grouped_geom_clustering()` function where blobs from different slices are connected based on their face-specific coordinates. - -## Blob Association & Overlap - -The core mechanism for determining if blobs should be connected uses a "tolerant visitor" pattern: - -```cpp -struct tolerantvisitor { - raygrid::grid_index_t tolerance{0}; - bool verbose{false}; - raygrid::blobvec_t operator()(const raygrid::blobref_t& blob, - const raygrid::blobproj_t& proj, - raygrid::layer_index_t layer) { - return raygrid::overlap(blob, proj, layer, tolerance, verbose); - } -}; -``` - -This visitor is used with `raygrid::associate()` to find overlapping blobs and create edges between them. - -The overlap calculation takes into account: -1. The wire index bounds in each plane -2. The time relationship between slices -3. The policy-specific tolerance for gaps - -## Practical Example - -When processing detector data: - -1. Charge depositions are first converted to "blobs" associated with specific detector faces -2. GeomClusteringUtil connects these blobs across time slices based on their spatial overlap -3. The resulting clusters represent potential particle tracks or showers -4. Different policies allow tuning for different detector configurations or reconstruction goals - -## Usage in the Wire-Cell Processing Chain - -GeomClusteringUtil is typically used in these components: - -1. `LocalGeomClustering` - Groups blobs within local regions -2. `GlobalGeomClustering` - Connects clusters across larger regions -3. `InSliceDeghosting` - Uses clustering to identify and remove "ghost" hits - -The output is a graph where vertices represent blobs and edges represent their geometric connections, forming the basis for further analysis and particle identification. \ No newline at end of file diff --git a/img/docs/GlobalGeomClustering.md b/img/docs/GlobalGeomClustering.md deleted file mode 100644 index eaa15c782..000000000 --- a/img/docs/GlobalGeomClustering.md +++ /dev/null @@ -1,127 +0,0 @@ -# GlobalGeomClustering - -## Overview - -`GlobalGeomClustering` is a component in Wire-Cell Toolkit's image processing framework that performs geometric clustering of blobs based on their spatial proximity. The class is designed to create connections between charge blobs that likely belong to the same physical particle track or shower. - -## Purpose - -The primary purpose of this class is to: - -1. Take existing clusters of charge blobs -2. Remove existing blob-to-blob connections -3. Create new connections based on geometric proximity criteria -4. Return an updated cluster with improved blob connectivity - -## Class Definition - -```cpp -class globalgeomclustering : public aux::logger, - public iclusterfilter, - public iconfigurable { -public: - globalgeomclustering(); - virtual ~globalgeomclustering(); - virtual void configure(const wirecell::configuration& cfg); - virtual wirecell::configuration default_configuration() const; - virtual bool operator()(const input_pointer& in, output_pointer& out); -private: - std::string m_clustering_policy{"uboone"}; -}; -``` - -## Key Concepts - -### Anode Plane and Faces - -In Wire-Cell, detector geometry is represented by: - -- **AnodePlane**: An abstraction of the physical wire planes in a Time Projection Chamber (TPC) detector -- **Face**: A subset of the anode that collects charge drifting from a particular direction - - In a single-sided TPC: one face - - In a double-sided TPC: two faces (front and back) - - Each face has multiple wire planes (typically U, V, W in LArTPCs) at different angles - -The `GlobalGeomClustering` operates on blobs within the same face. It does not connect blobs across different faces, as these would represent charges originating from opposite directions. - -### Blobs and Clusters - -- **Blob**: A contiguous region of charge deposition in 2D (wire-time) space -- **Cluster**: A collection of blobs with various connection types: - - blob-to-slice: connecting blobs to their time slices - - wire-to-channel: connecting wires to readout channels - - blob-to-blob: connecting blobs that are likely part of the same physical object - -This class specifically modifies the blob-to-blob connections based on geometric proximity. - -## Configuration Parameters - -| Parameter | Description | Default Value | -|-----------|-------------|---------------| -| `clustering_policy` | Determines the algorithm and parameters for establishing blob-to-blob connections | "uboone" | - -### Available Clustering Policies - -1. **"uboone"**: Parameters tuned for the MicroBooNE detector - - Maximum relative time difference between slices: 2 ticks - - Gap tolerance for adjacent slices: {1:2, 2:1} (time diff : wire tolerance) - -2. **"simple"**: A simpler clustering algorithm - - Maximum relative time difference: 1 tick - - Gap tolerance: {1:0} - -3. **"uboone_local"**: Similar to "uboone" but with modified wire tolerance - - Maximum relative time difference: 2 ticks - - Gap tolerance: {1:2, 2:2} - -## Implementation Details - -1. The `operator()` method takes an input cluster and produces an output cluster: - - First, it examines blob-blob connectivity in the original cluster - - It creates a filtered graph that excludes blob-blob edges - - It calls `grouped_geom_clustering()` to create new edges based on geometry - - It returns a new cluster with the updated connectivity - -2. The geometric clustering considers: - - Spatial proximity of blobs - - Time proximity of slices - - Wire plane topology - -3. Only blobs within the same face can be connected, as cross-face connections would require additional transformation logic. - -## Usage Example - -```cpp -// Configuration JSON -{ - "clustering_policy": "uboone" -} - -// In a WCT configuration file -local clusterer = { - type: "GlobalGeomClustering", - data: { - clustering_policy: "uboone" - } -} -``` - -## Internal Operation Flow - -1. Receives an input cluster -2. Filters the cluster to remove existing blob-blob edges -3. Creates a new graph without these edges -4. Calls `grouped_geom_clustering()` with the specified policy -5. Returns the cluster with new edges based on geometric proximity - -## Limitations - -- Only operates on blobs within the same face -- Does not connect blobs across different faces -- Uses predefined policies with fixed parameters -- Does not handle time transformations between faces - -## References - -1. Wire-Cell Toolkit: [https://github.com/WireCell/wire-cell-toolkit](https://github.com/WireCell/wire-cell-toolkit) -2. MicroBooNE Experiment: [https://microboone.fnal.gov/](https://microboone.fnal.gov/) \ No newline at end of file diff --git a/img/docs/InSliceDeghosting.md b/img/docs/InSliceDeghosting.md deleted file mode 100644 index 080eec531..000000000 --- a/img/docs/InSliceDeghosting.md +++ /dev/null @@ -1,129 +0,0 @@ -# Understanding InSliceDeghosting in Wire-Cell Toolkit - -## Overview - -The `InSliceDeghosting` class is a component of the Wire-Cell Toolkit designed to identify and remove "ghost" signals in Time Projection Chamber (TPC) detectors. This document explains the purpose, methodology, and implementation of this class. - -## What is Ghosting? - -In wire chamber detectors like Liquid Argon Time Projection Chambers (LArTPCs), multiple wire planes are used to detect charge signals and reconstruct 3D positions. Each plane provides a 2D projection of the charge deposits. When multiple unrelated charge deposits occur in different locations, their wire plane projections can create false intersections, leading to "ghost" signals. - -![Ghost Signals Illustration](https://raw.githubusercontent.com/WireCell/wire-cell-graphics/master/ghosting.png) - -*Note: Image shows how multiple real charge deposits (green) can create false intersections (red) in the wire plane readouts.* - -## Class Purpose - -`InSliceDeghosting` performs "in-slice de-ghosting," which: - -1. Identifies high-confidence "good" blobs based on charge thresholds -2. Assigns quality tags to blobs (good, bad, potential_good, potential_bad, to_be_removed) -3. Uses geometric consistency to determine which ambiguous blobs are likely ghosts -4. Removes identified ghost blobs from the cluster -5. Creates new blob-to-blob connections between remaining blobs - -## Key Concepts - -### Wire-Cell Detector Geometry - -- **AnodePlane**: Represents the entire detection system -- **Face**: A region of the anode plane, typically with multiple wire planes -- **Wire Planes**: Each face has multiple wire planes (typically 3 in U, V, W orientations) -- **Blobs**: Detector response regions representing charge deposits -- **Clusters**: Groups of related blobs - -### Quality Tags - -The class uses a bitwise tagging system to mark blobs: - -```cpp -enum blob_quality_bitpos { - good, // High confidence real blob - bad, // High confidence ghost blob - potential_good, // Likely real blob - potential_bad, // Likely ghost blob - to_be_removed // Marked for removal -}; -``` - -## Algorithm Steps - -The de-ghosting algorithm follows these general steps: - -1. **Initial Quality Identification** (`blob_quality_ident`): - - Tags blobs with charge above threshold as `good` and `potential_good` - - Considers blob-to-blob connections to enhance quality assessment - -2. **Local De-ghosting** (`local_deghosting` or `local_deghosting1`): - - Groups blobs by number of active wire planes they appear in - - Uses wire channel consistency across planes to identify ghosts - - Tags low-confidence blobs for removal - -3. **Geometric Clustering**: - - Removes tagged ghost blobs - - Creates new blob-to-blob edges within quality groups - - Uses ray grid overlap logic to determine proximity - -## Configuration Parameters - -| Parameter | Type | Description | -|-----------|------|-------------| -| `dryrun` | bool | If true, outputs original clusters without changes | -| `good_blob_charge_th` | double | Charge threshold for identifying "good" blobs | -| `clustering_policy` | string | Policy used for geometric clustering ("uboone", "simple", etc.) | -| `config_round` | int | Which algorithm variant to use (1, 2, or 3) | -| `deghost_th` | float | Threshold for de-ghosting decision logic | -| `deghost_th1` | float | Alternative threshold for second de-ghosting algorithm | - -## Usage Example - -```jsonnet -local wc = import "wirecell.jsonnet"; - -local deghosting = { - type: "InSliceDeghosting", - data: { - good_blob_charge_th: 300.0, - clustering_policy: "uboone", - config_round: 1, - deghost_th: 0.75, - deghost_th1: 0.5, - } -}; -``` - -## Implementation Details - -### Wire Plane Consistency - -The algorithm uses wire channel crossings to determine when a blob is likely a ghost: - -``` -For each two-wire-plane blob: - Check if adjacent to any good three-wire-plane blob - If not adjacent to at least two good blobs: - Calculate "score" based on wire channel consistency - If score below threshold, mark as ghost -``` - -### Face and Wire Planes - -The algorithm extensively uses the "face" concept: - -1. Blobs store which face they belong to -2. Each face has wire planes (typically U, V, W) -3. Channels are organized by plane within each face -4. Spatial consistency is checked using the face's coordinate system - -## Performance Considerations - -- The algorithm uses bitwise operations for efficient tagging -- Connected component analysis identifies blob groups -- Geometric clustering operations can be computationally intensive -- The algorithm can be configured for different detector geometries - -## References - -- [Wire-Cell Documentation](https://wirecell.github.io/) -- [Cluster Shadow Documentation](https://github.com/wirecell/wire-cell-toolkit/blob/master/aux/docs/cluster-shadow.org) -- [LArTPC Reconstruction Techniques](https://arxiv.org/abs/1804.02583) \ No newline at end of file diff --git a/img/docs/LCBlobRemoval.md b/img/docs/LCBlobRemoval.md deleted file mode 100644 index 783a9dde5..000000000 --- a/img/docs/LCBlobRemoval.md +++ /dev/null @@ -1,200 +0,0 @@ -# LCBlobRemoval Class Documentation - -## Overview - -The `LCBlobRemoval` class is a component of the Wire-Cell Toolkit's image processing module (`WireCellImg`). It functions as a filter for removing low-charge blobs from a cluster, effectively reducing noise and simplifying downstream processing. - -## Class Definition - -```cpp -class lcblobremoval : public aux::logger, public iclusterfilter, public iconfigurable { -public: - lcblobremoval(); - virtual ~lcblobremoval(); - virtual void configure(const wirecell::configuration& cfg); - virtual wirecell::configuration default_configuration() const; - virtual bool operator()(const input_pointer& in, output_pointer& out); -private: - // used to hold measurement and blob values - // (central+uncertainty). - using value_t = islice::value_t; - - // config: blob_{value,error}_threshold. blob - // central value less dropped. - // uncertainty currently considered. - value_t m_blob_thresh{0,1}; -}; -``` - -## Class Inheritance - -- **aux::logger**: Provides logging capabilities -- **iclusterfilter**: Defines the interface for filters that process clusters -- **iconfigurable**: Allows the class to be configured via JSON configuration - -## Key Components - -### Configuration Parameters - -The class accepts the following configuration parameters: - -- **blob_value_threshold**: The minimum charge value a blob must have to be kept (default: 0) -- **blob_error_threshold**: Related to uncertainty of measurements (default: 1, but not directly used in the filtering logic) - -Example configuration: - -```json -{ - "blob_value_threshold": 300, - "blob_error_threshold": 1 -} -``` - -### Default Configuration - -```cpp -wirecell::configuration img::lcblobremoval::default_configuration() const { - wirecell::configuration cfg; - cfg["blob_value_threshold"] = m_blob_thresh.value(); - cfg["blob_error_threshold"] = m_blob_thresh.uncertainty(); - return cfg; -} -``` - -### Main Operation - -The main operation is performed in the `operator()` method: - -```cpp -bool img::lcblobremoval::operator()(const input_pointer& in, output_pointer& out) { - out = nullptr; - if(!in) { - log->debug("eos"); - return true; - } - - const auto in_graph = in->graph(); - dump_cg(in_graph, log); - - auto out_graph = prune(in_graph, m_blob_thresh.value()); - dump_cg(out_graph, log); - - out = std::make_shared(out_graph, in->ident()); - return true; -} -``` - -The key processing is in the `prune` function: - -```cpp -cluster_graph_t prune(const cluster_graph_t& cg, float threshold) { - cluster_graph_t cg_out; - size_t nblobs = 0; - std::unordered_map old2new; - - // Iterate through all vertices - for(const auto& vtx : mir(boost::vertices(cg))) { - const auto& node = cg[vtx]; - if(node.code() == 'b') { - const auto iblob = get(node.ptr); - auto bval = iblob->value(); - if(bval < threshold) continue; // Skip low-charge blobs - } - ++nblobs; - old2new[vtx] = boost::add_vertex(node, cg_out); - } - - // Copy edges between remaining vertices - for(auto edge : mir(boost::edges(cg))) { - auto old_tail = boost::source(edge, cg); - auto old_head = boost::target(edge, cg); - auto old_tit = old2new.find(old_tail); - if(old_tit == old2new.end()) { - continue; - } - auto old_hit = old2new.find(old_head); - if(old_hit == old2new.end()) { - continue; - } - boost::add_edge(old_tit->second, old_hit->second, cg_out); - } - - return cg_out; -} -``` - -## Cluster Graph and Faces in Wire-Cell - -The `LCBlobRemoval` class operates within the Wire-Cell Toolkit's graph-based representation of detector data: - -### Cluster Graph Structure - -- **Vertices**: Represent different entities including: - - Blobs ('b'): 2D projections of charge deposits - - Slices ('s'): Time windows - - Wires ('w'): Individual detector wires - - Channels ('c'): Electronics channels - - Measures ('m'): Measurement nodes - -- **Edges**: Represent relationships between entities: - - Blob-Slice: A blob exists within a time slice - - Blob-Wire: A blob covers certain wires - - Wire-Channel: Wires connect to channels - - Blob-Blob: Spatial or temporal relationships between blobs - -### The "Face" Concept - -Although `LCBlobRemoval` doesn't directly manipulate face information, understanding faces is important: - -1. **Anode Plane Assembly (APA)**: - - Physical detector component with sensing surfaces - -2. **Face**: - - A specific sensing surface of an APA - - Contains multiple wire planes (typically U, V, W) - - Each blob is associated with a specific face - -3. **How Faces Are Used**: - - Blobs are associated with faces: `blob->face()` - - This association is preserved during filtering - - The face determines the coordinate system of the blob - - Different faces might have different geometries or calibrations - -## Usage in Processing Pipeline - -The `LCBlobRemoval` class is typically used in a processing pipeline where: - -1. Detector data is converted to clusters of blobs -2. `LCBlobRemoval` filters out low-charge blobs -3. Subsequent components process the filtered clusters - -Example configuration in a pipeline: - -```json -{ - "configs": [ - { - "type": "LCBlobRemoval", - "data": { - "blob_value_threshold": 300 - } - } - ], - "connections": [ - { - "input": "SomeBlobGenerator:output", - "output": "LCBlobRemoval:input" - }, - { - "input": "LCBlobRemoval:output", - "output": "NextProcessor:input" - } - ] -} -``` - -## Summary - -The `LCBlobRemoval` class serves as a simple but essential filter in the Wire-Cell Toolkit's image processing module. It removes blobs with charge values below a configurable threshold, thereby reducing noise and simplifying downstream processing. - -While the class itself doesn't directly interact with the "face" concept, it preserves the face associations of blobs that pass the filter. Understanding the role of faces in the Wire-Cell framework is important for comprehending how blob filtering fits into the overall data processing pipeline. \ No newline at end of file diff --git a/img/docs/ProjectionDeghosting.md b/img/docs/ProjectionDeghosting.md deleted file mode 100644 index 4da68b80f..000000000 --- a/img/docs/ProjectionDeghosting.md +++ /dev/null @@ -1,184 +0,0 @@ -# Understanding ProjectionDeghosting in Wire-Cell Toolkit - -## Introduction - -The `ProjectionDeghosting` class is a critical component in the Wire-Cell Toolkit, designed to address a fundamental challenge in 3D reconstruction for Liquid Argon Time Projection Chambers (LArTPCs). This document explains the purpose, functionality, and implementation details of this class to help developers and physicists understand how it contributes to the overall reconstruction chain. - -## The Ghosting Problem - -In LArTPCs, charged particles create ionization tracks in liquid argon. These ionization electrons drift to wire planes that are oriented at different angles. Each wire plane provides a 2D projection of the particle trajectory from a different perspective. When attempting to reconstruct 3D positions from these 2D projections, ambiguities can arise: - -- Multiple possible 3D points may be consistent with the same set of wire signals -- These ambiguities create "ghost" hits - false intersections that don't correspond to actual energy deposits -- Ghosts can significantly degrade reconstruction quality and create spurious features in event displays - -![Ghosting Illustration](https://i.imgur.com/Lm1YLFD.png) -*Conceptual illustration: Left - true particle trajectory. Right - ambiguous reconstructions including ghosts.* - -## How ProjectionDeghosting Works - -The `ProjectionDeghosting` class implements a sophisticated algorithm to identify and remove ghost hits by analyzing the 2D projections of reconstructed 3D clusters. Its operation can be broken down into these key stages: - -### 1. Blob and Cluster Shadows - -The algorithm first constructs specialized graph representations: - -- **BlobShadow Graph**: Represents relationships between "blobs" (3D reconstructed charge deposits) -- **ClusterShadow Graph**: Represents connections between clusters of blobs - -These graphs capture how blobs and clusters relate to each other across different wire plane views. - -### 2. 2D Projections - -For each cluster, the algorithm creates 2D projections for each wire plane layer: - -```cpp -projection2d::layerprojection2dmap& proj_cluster = get_projection( - id2lproj, cs_cluster, in_graph, b_cluster, m_nchan, m_nslice, m_uncer_cut, m_dead_default_charge); -``` - -These projections represent how each 3D cluster would appear when viewed from each wire plane's perspective. - -### 3. Coverage Analysis - -The algorithm then analyzes the "coverage" between different projections: - -```cpp -coverage_alt = projection2d::judge_coverage_alt(proj2d_comp_3dclus, - proj2d_clust3d, m_judge_alt_cut_values, m_uncer_cut); -``` - -Coverage analysis examines how projections from different clusters overlap or contain each other. Specific patterns of coverage provide strong indicators of whether a cluster is real or a ghost. - -### 4. Ghost Identification - -Based on the coverage analysis, charge distribution, and other metrics, the algorithm makes decisions about which clusters are likely to be ghosts: - -```cpp -if(sqrt(pow(n_timeslices / m_global_deghosting_cut_values.at(0), 2) + - pow(min_charge / n_blobs / m_global_deghosting_cut_values.at(1), 2)) < 1 || - min_charge / n_blobs / m_global_deghosting_cut_values.at(2) < 1.) { - saved = 0; // Mark as ghost -} else { - saved = 1; // Keep -} -``` - -The algorithm applies configurable thresholds to make these determinations, considering: -- Time extent of the cluster -- Charge distribution -- Projection coverage patterns -- Blob count and density - -### 5. Cluster Pruning - -Finally, the algorithm removes clusters identified as ghosts: - -```cpp -auto out_graph = remove_blobs(in_graph, tagged_bs, true); -``` - -This produces a refined cluster graph with ghost hits removed, resulting in a more accurate 3D reconstruction. - -## Key Concepts - -### AnodePlane and Face - -In Wire-Cell: -- An `AnodePlane` represents a physical detector component with multiple wire planes -- Each `AnodePlane` can have one or more "faces" (typically two, one on each side) -- A "face" (`ianodeface`) is a collection of wire planes that share the same sensitive volume - -The "face" concept is crucial for deghosting because: -1. Each face contains wire planes at different angles (typically U, V, and W views) -2. Particles passing through the detector create signals on wires from each plane of the same face -3. Ghosts can only form between wires from the same face - -The algorithm tracks which face each blob belongs to: -```cpp -auto iblob = get(cg[vtx].ptr); -face2blobs[iblob->face()].push_back(vdesc); -``` - -### Projections and Coverage - -The algorithm uses several types of coverage relationships between projections: -- **ref_covers_tar**: One projection fully covers another -- **tar_covers_ref**: One projection is fully covered by another -- **ref_eq_tar**: Projections are equivalent -- **both_empty**: Both projections are empty -- **other**: Other relationship patterns - -These coverage relationships form distinctive patterns that help distinguish real tracks from ghost hits. - -## Configuration Parameters - -The `ProjectionDeghosting` class uses several configurable parameters: - -```cpp -cfg["verbose"] = m_verbose; -cfg["nchan"] = (unsigned int) m_nchan; -cfg["nslice"] = (unsigned int) (m_nslice); -cfg["dryrun"] = m_dryrun; -cfg["global_deghosting_cut_nparas"] = m_global_deghosting_cut_nparas; -``` - -Key parameters include: -- **nchan/nslice**: Dimensions of the detector readout -- **global_deghosting_cut_values**: Values controlling ghost identification thresholds -- **judge_alt_cut_values**: Parameters for the coverage judgment algorithm -- **uncer_cut**: Threshold for handling measurement uncertainties -- **dead_default_charge**: Value to use for dead/inactive channels - -## Integration with Wire-Cell - -The `ProjectionDeghosting` class integrates with the broader Wire-Cell reconstruction chain: - -1. It implements the `iclusterfilter` interface, allowing it to be used as a filter in reconstruction pipelines -2. It takes cluster graphs as input and produces filtered cluster graphs as output -3. It can be configured through Wire-Cell's JSON configuration system -4. It's typically positioned in the reconstruction chain after initial clustering but before 3D point extraction - -## Performance Considerations - -The effectiveness of projection deghosting depends on several factors: - -- **Detector geometry**: The angles between wire planes affect ghosting patterns -- **Signal quality**: Cleaner signals lead to better disambiguation -- **Tuning parameters**: The thresholds need to be tuned for specific detectors -- **Computational cost**: The algorithm performs significant graph operations and may be computationally intensive - -## Example Usage - -In a Wire-Cell configuration: - -```json -{ - "configs": [ - { - "data": { - "verbose": false, - "nchan": 8256, - "nslice": 9592, - "global_deghosting_cut_nparas": 3, - "global_deghosting_cut_values": [3.0, 3000.0, 2000.0, 8.0, 8000.0, 4000.0, 8.0, 8000.0, 6000.0], - "judge_alt_cut_values": [0.05, 0.33, 0.15, 0.33] - }, - "name": "ProjectionDeghosting" - } - ] -} -``` - -## Summary - -The `ProjectionDeghosting` class provides a sophisticated solution to the ghosting problem in LArTPC reconstruction. By analyzing 2D projections of 3D clusters and applying geometric constraints, it can effectively distinguish real particle trajectories from ghost hits, leading to significantly improved 3D reconstruction quality. - -Understanding this class is essential for anyone working with Wire-Cell reconstruction, particularly when dealing with complex event topologies where ghosting can severely impact reconstruction performance. - -## References - -1. Wire-Cell Toolkit documentation -2. LArTPC reconstruction techniques -3. MicroBooNE and DUNE reconstruction papers -4. Wire-Cell developer notes on clustering algorithms \ No newline at end of file diff --git a/img/docs/talks/imaging_porting_summary.pptx b/img/docs/talks/imaging_porting_summary.pptx new file mode 100644 index 000000000..45ffa19b7 Binary files /dev/null and b/img/docs/talks/imaging_porting_summary.pptx differ diff --git a/img/inc/WireCellImg/ClusterScopeFilter.h b/img/inc/WireCellImg/ClusterScopeFilter.h new file mode 100644 index 000000000..05ad397a8 --- /dev/null +++ b/img/inc/WireCellImg/ClusterScopeFilter.h @@ -0,0 +1,40 @@ +/** + * ClusterScopeFilter + */ +#ifndef WIRECELL_CLUSTERSCOPEFILTER_H +#define WIRECELL_CLUSTERSCOPEFILTER_H + +#include "WireCellIface/IClusterFilter.h" +#include "WireCellIface/IConfigurable.h" +#include "WireCellAux/Logger.h" + +namespace WireCell { + + namespace Img { + + class ClusterScopeFilter : public Aux::Logger, public IClusterFilter, public IConfigurable { + public: + /// TODO: what is needed here + /// FIXME: bit operation would be better + enum BLOB_QUALITY_BITPOS { GOOD, BAD, POTENTIAL_GOOD, POTENTIAL_BAD, TO_BE_REMOVED }; + + using vertex_tags_t = std::unordered_map; + // using vertex_tagging_t = std::function; + + ClusterScopeFilter(); + virtual ~ClusterScopeFilter(); + + virtual void configure(const WireCell::Configuration& cfg); + virtual WireCell::Configuration default_configuration() const; + + virtual bool operator()(const input_pointer& in, output_pointer& out); + + private: + int m_face_index{-1}; + }; + + } // namespace Img + +} // namespace WireCell + +#endif /* WIRECELL_CLUSTERSCOPEFILTER_H */ diff --git a/img/src/ChargeSolving.cxx b/img/src/ChargeSolving.cxx index 5bd478e75..73f1efd3a 100644 --- a/img/src/ChargeSolving.cxx +++ b/img/src/ChargeSolving.cxx @@ -132,6 +132,7 @@ void blob_weight_uboone(const cluster_graph_t& cgraph, graph_t& csg) // TODO remove this // std::cout << String::format("cent_time: %f next_con: %d, prev_con: %d, weight: %d", cent_time, next_con, prev_con, weight) << std::endl; } + (void)nblobs; // unused, but useful for debugging // TODO remove this // std::cout << String::format("nblobs: %d", nblobs) << std::endl; } diff --git a/img/src/ClusterScopeFilter.cxx b/img/src/ClusterScopeFilter.cxx new file mode 100644 index 000000000..698b1f656 --- /dev/null +++ b/img/src/ClusterScopeFilter.cxx @@ -0,0 +1,78 @@ +#include "WireCellImg/ClusterScopeFilter.h" +#include "WireCellImg/CSGraph.h" +#include "WireCellImg/GeomClusteringUtil.h" +#include "WireCellAux/SimpleCluster.h" +#include "WireCellAux/ClusterHelpers.h" +#include "WireCellUtil/GraphTools.h" + +#include "WireCellUtil/NamedFactory.h" +#include "WireCellUtil/Logging.h" +#include "WireCellUtil/String.h" +#include "WireCellUtil/Exceptions.h" +#include "WireCellUtil/TimeKeeper.h" + +#include +#include +#include + +WIRECELL_FACTORY(ClusterScopeFilter, WireCell::Img::ClusterScopeFilter, WireCell::INamed, WireCell::IClusterFilter, + WireCell::IConfigurable) + +using namespace WireCell; +using namespace WireCell::Img; +using namespace WireCell::Aux; + +Img::ClusterScopeFilter::ClusterScopeFilter() + : Aux::Logger("ClusterScopeFilter", "img") +{ +} + +Img::ClusterScopeFilter::~ClusterScopeFilter() {} + +WireCell::Configuration Img::ClusterScopeFilter::default_configuration() const +{ + WireCell::Configuration cfg; + return cfg; +} + +void Img::ClusterScopeFilter::configure(const WireCell::Configuration& cfg) +{ + m_face_index = get(cfg, "face_index", m_face_index); + + log->debug("{}", cfg); +} + +bool ClusterScopeFilter::operator()(const input_pointer& in, output_pointer& out) +{ + out = nullptr; + if (!in) { + log->debug("EOS"); + return true; + } + + TimeKeeper tk(fmt::format("ClusterScopeFilter")); + + const auto in_graph = in->graph(); + log->debug("in_graph: {}", dumps(in_graph)); + + log->debug(tk(fmt::format("start delete some blobs"))); + using VFiltered = + typename boost::filtered_graph >; + VFiltered filtered_graph(in_graph, {}, [&](auto vtx) { + if (in_graph[vtx].code() != 'b') return true; + const auto iblob = std::get(in_graph[vtx].ptr); + if (iblob->face()->which() == m_face_index) return true; + return false; + }); + + WireCell::cluster_graph_t out_graph; + boost::copy_graph(filtered_graph, out_graph); + log->debug("out_graph: {}", dumps(out_graph)); + + /// output + log->debug("in_graph: {}", dumps(in_graph)); + log->debug("out_graph: {}", dumps(out_graph)); + + out = std::make_shared(out_graph, in->ident()); + return true; +} diff --git a/img/src/GeomClusteringUtil.cxx b/img/src/GeomClusteringUtil.cxx index 993cfa139..bf01a585c 100644 --- a/img/src/GeomClusteringUtil.cxx +++ b/img/src/GeomClusteringUtil.cxx @@ -275,9 +275,9 @@ void WireCell::Img::grouped_geom_clustering(cluster_graph_t& cg, std::string pol if (map_gap_tol.find(rel_diff) == map_gap_tol.end()) continue; if (policy == "uboone_local" && !adjacent(slice_times, islice1->start(), islice2->start())) continue; if (policy == "dead_clus") { - std::cout << "adjacent_dead: " - << islice1->start() << " " << islice1->span() << " " - << islice2->start() << " " << islice2->span() << std::endl; + // std::cout << "adjacent_dead: " + // << islice1->start() << " " << islice1->span() << " " + // << islice2->start() << " " << islice2->span() << std::endl; if (!adjacent_dead(islice1, islice2)) continue; rel_diff = 0; // use 1 as wire offset } diff --git a/img/src/InSliceDeghosting.cxx b/img/src/InSliceDeghosting.cxx index 0794488d6..ff981c9c2 100644 --- a/img/src/InSliceDeghosting.cxx +++ b/img/src/InSliceDeghosting.cxx @@ -142,56 +142,56 @@ namespace { // } // } - bool adjacent(std::unordered_map>& cid1, - std::unordered_map>& cid2) + bool adjacent(const std::unordered_map>& cid1, + const std::unordered_map>& cid2) { - std::map map_plane_score; - - // Initialize score map - for (auto it = cid1.begin(); it != cid1.end(); it++) { - map_plane_score[it->first] = 0; - } - int sum_score = 0; - for (auto it = map_plane_score.begin(); it != map_plane_score.end(); it++) { - WireCell::WirePlaneLayer_t plane = it->first; - - // Skip if this plane doesn't exist in both maps - if (cid1.find(plane) == cid1.end() || cid2.find(plane) == cid2.end()) { - continue; - } - const std::set& set1 = cid1[plane]; - const std::set& set2 = cid2[plane]; + // Iterate through all planes in cid1 + for (const auto& [plane, channels1] : cid1) { + // Skip if this plane isn't in cid2 + auto it2 = cid2.find(plane); + if (it2 == cid2.end()) continue; + + const auto& channels2 = it2->second; - // Check for overlap between sets + // Check for overlap and adjacency in a single pass bool has_overlap = false; - for (int ch1 : set1) { - if (set2.find(ch1) != set2.end()) { + bool is_adjacent = false; + + // Iterate through the smaller set for efficiency + const auto& smaller = (channels1.size() <= channels2.size()) ? channels1 : channels2; + const auto& larger = (channels1.size() <= channels2.size()) ? channels2 : channels1; + + for (int ch : smaller) { + // Check for overlap + if (larger.find(ch) != larger.end()) { has_overlap = true; - break; + if (is_adjacent) break; // If we already found adjacency, we can stop } - } - - // Check for adjacency (difference of 1) between any elements - bool is_adjacent = false; - for (int ch1 : set1) { - if (set2.find(ch1 + 1) != set2.end() || set2.find(ch1 - 1) != set2.end()) { + + // Check for adjacency + if (larger.find(ch + 1) != larger.end() || larger.find(ch - 1) != larger.end()) { is_adjacent = true; - break; + if (has_overlap) break; // If we already found overlap, we can stop } } - // Apply the same scoring logic + // Apply scoring logic + int score = 0; if (is_adjacent && !has_overlap) { - map_plane_score[plane] = 1; - } - else if (has_overlap) { - map_plane_score[plane] = 2; + score = 1; + } else if (has_overlap) { + score = 2; } - - if (map_plane_score[plane] == 0) return false; - sum_score += map_plane_score[plane]; + + // For planes that exist in both maps, a score of 0 means immediate failure + if (score == 0) return false; + + sum_score += score; + + // Early exit if the sum is already >= 5 + if (sum_score >= 5) return true; } return sum_score >= 5; diff --git a/img/src/ProjectionDeghosting.cxx b/img/src/ProjectionDeghosting.cxx index d9a91002d..9891d1ab1 100644 --- a/img/src/ProjectionDeghosting.cxx +++ b/img/src/ProjectionDeghosting.cxx @@ -143,6 +143,7 @@ namespace { } boost::add_edge(old_tit->second, old_hit->second, cg_out); } + (void)nblobs; // std::cout << "boost::num_vertices(cg_out): " << boost::num_vertices(cg_out) << std::endl; return cg_out; } diff --git a/patrec/README.org b/patrec/README.org deleted file mode 100644 index a53f23943..000000000 --- a/patrec/README.org +++ /dev/null @@ -1,20 +0,0 @@ -#+title: Wire-Cell Pattern Recognition -#+include: ../docs/include-readme.org - -This sub-package holds Wire-Cell toolkit pattern recognition components. - -* PAAL - -A major part forms Steiner tree using the PAAL package from https://paal.mimuw.edu.pl/ and the headers of which are interned into [[inc/WireCellPatRec/paal/]]. - -When developing ~patrec~, it is best not to directly include files from there but instead use: - -#+begin_src c++ - #include "WireCellPatRec/paal.h" -#+end_src - -This header takes care of some compiler warnings mostly from the ~#include~ of Boost headers. However, it only includes select headers. More may be added unless up to that becoming a bottleneck to compilation speed. - -The copy of PAAL is "pristine" from 2017-01-30 git hash ~e537b58d50e93d4a72709821b9ea413008970c6b~. Nominally it was taken from ~git clone http://siekiera.mimuw.edu.pl:8082/paal~ however that server was non-responsive and so the copy here was cloned from ~git clone https://salsa.debian.org/maxzor/paal~. - -It has some fixes are applied to the pristine copy to get it to compile. diff --git a/patrec/docs/QLBundles/Bundle.md b/patrec/docs/QLBundles/Bundle.md deleted file mode 100644 index 9fd99f2e8..000000000 --- a/patrec/docs/QLBundles/Bundle.md +++ /dev/null @@ -1,212 +0,0 @@ -## Describe FlashTPCBundle in WCP - -See also the [OpFlash documentation](https://github.com/BNLIF/wire-cell-data/blob/master/docs/FlashTPCBundle.md) for specifics about that data product. - -## Example prototype jobs or files ... - -A WCP rootfile can be found @ [this link](https://www.phy.bnl.gov/xqian/talks/wire-cell-porting/nuselEval_5384_137_6852.root) - -Bundle information are saved in -```cpp - TTree *T_match1 = new TTree("T_match","T_match"); - T_match1->SetDirectory(file1); - Int_t ncluster; - T_match1->Branch("tpc_cluster_id",&ncluster,"tpc_cluster_id/I"); // TPC parent cluster id (see TC tree for more explainations) - T_match1->Branch("flash_id",&flash_id,"flash_id/I"); // PMT Flash ID, see flash_id for more information - T_match1->Branch("event_type",&event_type,"event_type/I"); // this is to save the event tagger information (saved as bits in WCP) - Double_t flash_time; - T_match1->Branch("flash_time",&flash_time,"flash_time/D"); // Flash time - cluster_length = 0; - T_match1->Branch("cluster_length",&cluster_length,"cluster_length/D"); // cluster length for main cluster -``` - - -TPC Blob information are saved in -```cpp - - // load mcell - TTree *TC = (TTree*)file->Get("TC"); - std::vector *cluster_id_vec = new std::vector; - std::vector *parent_cluster_id = new std::vector; - std::vector *time_slice_vec = new std::vector; - std::vector *q_vec = new std::vector; - std::vector *uq_vec = new std::vector; - std::vector *vq_vec = new std::vector; - std::vector *wq_vec = new std::vector; - std::vector *udq_vec = new std::vector; - std::vector *vdq_vec = new std::vector; - std::vector *wdq_vec = new std::vector; - - std::vector *nwire_u_vec = new std::vector; - std::vector *nwire_v_vec = new std::vector; - std::vector *nwire_w_vec = new std::vector; - std::vector *flag_u_vec = new std::vector; - std::vector *flag_v_vec = new std::vector; - std::vector *flag_w_vec = new std::vector; - - std::vector> *wire_index_u_vec = new std::vector>; - std::vector> *wire_index_v_vec = new std::vector>; - std::vector> *wire_index_w_vec = new std::vector>; - std::vector> *wire_charge_u_vec = new std::vector>; - std::vector> *wire_charge_v_vec = new std::vector>; - std::vector> *wire_charge_w_vec = new std::vector>; - std::vector> *wire_charge_err_u_vec = new std::vector>; - std::vector> *wire_charge_err_v_vec = new std::vector>; - std::vector> *wire_charge_err_w_vec = new std::vector>; - - TC->SetBranchAddress("cluster_id",&cluster_id_vec); // actual cluster id where this blob belons - TC->SetBranchAddress("parent_cluster_id",&parent_cluster_id); // main cluster id that is used in T_match bundle - TC->SetBranchAddress("time_slice",&time_slice_vec); - TC->SetBranchAddress("q",&q_vec); - TC->SetBranchAddress("uq",&uq_vec); - TC->SetBranchAddress("vq",&vq_vec); - TC->SetBranchAddress("wq",&wq_vec); - TC->SetBranchAddress("udq",&udq_vec); - TC->SetBranchAddress("vdq",&vdq_vec); - TC->SetBranchAddress("wdq",&wdq_vec); - TC->SetBranchAddress("nwire_u",&nwire_u_vec); - TC->SetBranchAddress("nwire_v",&nwire_v_vec); - TC->SetBranchAddress("nwire_w",&nwire_w_vec); - TC->SetBranchAddress("flag_u",&flag_u_vec); - TC->SetBranchAddress("flag_v",&flag_v_vec); - TC->SetBranchAddress("flag_w",&flag_w_vec); - TC->SetBranchAddress("wire_index_u",&wire_index_u_vec); - TC->SetBranchAddress("wire_index_v",&wire_index_v_vec); - TC->SetBranchAddress("wire_index_w",&wire_index_w_vec); - TC->SetBranchAddress("wire_charge_u",&wire_charge_u_vec); - TC->SetBranchAddress("wire_charge_v",&wire_charge_v_vec); - TC->SetBranchAddress("wire_charge_w",&wire_charge_w_vec); - TC->SetBranchAddress("wire_charge_err_u",&wire_charge_err_u_vec); - TC->SetBranchAddress("wire_charge_err_v",&wire_charge_err_v_vec); - TC->SetBranchAddress("wire_charge_err_w",&wire_charge_err_w_vec); -``` - - -Opflash are saved in -```cpp - TTree *T_flash = (TTree*)file->Get("T_flash"); - Double_t time; - Int_t type; - Int_t flash_id; - Int_t temp_run_no, temp_subrun_no, temp_event_no; - T_flash->SetBranchAddress("runNo",&temp_run_no); - T_flash->SetBranchAddress("subRunNo",&temp_subrun_no); - T_flash->SetBranchAddress("eventNo",&temp_event_no); - T_flash->SetBranchAddress("time",&time); - T_flash->SetBranchAddress("type",&type); // flash type, full waveform or Cosmic mode, two different types in MicroBooNE - T_flash->SetBranchAddress("flash_id",&flash_id); // this id is useful for matching with TPC object in bundle - Double_t low_time, high_time, total_PE; - Double_t temp_PE[32], temp_PE_err[32]; - std::vector *fired_channels = new std::vector; - std::vector *l1_fired_time = new std::vector; - std::vector *l1_fired_pe = new std::vector; - T_flash->SetBranchAddress("low_time",&low_time); // start time of flash - T_flash->SetBranchAddress("high_time",&high_time); // end time of flash - T_flash->SetBranchAddress("total_PE",&total_PE); // total PE - T_flash->SetBranchAddress("PE",temp_PE); // PE for each PMT - T_flash->SetBranchAddress("PE_err",temp_PE_err); // PE_err for each PMT - T_flash->SetBranchAddress("fired_channels",&fired_channels); // which channel are included in flash - T_flash->SetBranchAddress("l1_fired_time",&l1_fired_time); // advanced flash info - T_flash->SetBranchAddress("l1_fired_pe",&l1_fired_pe); // advanced flash info -``` - -## Describe WCT version - -### Review uboone blob loading - -The `Trun`, `TC` and `TDC` ROOT trees may be loaded into WCT via [`UbooneBlobSource`](../../../root/src/UbooneBlobSource.cxx) to produce an `IBlobSet` of either "live" or "dead" blobs. Two sources are needed to load both "live" and "dead" blobs simultaneously. - -The [`uboone-blobs.smake` Snakemake workflow](../../../root/test/uboone-blobs.smake) runs `wire-cell` on [`uboone-blobs.jsonnet`](../../../root/test/uboone-blobs.jsonnet) in three kinds of modes: "live", "dead" and "clus". - -The "live" and "dead" modes each produce a *WCT cluster file* with a graph like: - -``` -UbooneBlobSources -> BlobClustering -> GlobalGeomClustering -> ClusterFileSink -``` - -The "live" mode has four sources `live-{uvw,uv,vw,wu}` and "dead" has three sources `dead-{uv,vw,wu}`. - -The "clus" job is like: - -``` -ClusterFileSources -> PointTreeBuilding -> MultiAlgBlobClustering -> TensorFileSink -``` -There are two sources here loading each of "live" and "dead" cluster files. - -The output of ["MABC"](../../../clus/src/MultiAlgBlobClustering.cxx) is a point-cloud tree in *WCT tensor data model* representation (`ITensorSet`). - -:warning: This workflow uses uboone blobs but ignores uboone cluster info (`cluster_id`). - -### Uboone cluster and flash loading - -In order to consider `cluster_id` when loading `TC` and `T_Flash` information into WCT requires a complex procedure. This is in large part due to the fact that WCT clusters are not "identified" per se but rather are emergent from the "connected components" graph operation. After the operation we may identify a cluster by its ID in the "connected components" array which is effectively arbitrary. Thus we must form WCT clusters in a context that still retains the association between blobs and their `cluster_id`. But further, to represent the cluster-flash associations requires point-cloud level data tier. And this requires blob sampling. - -We will call this new component `UbooneClusterSource` which supplies these operations: - -- Job has the usual 4 `UbooneBlobSource`'s to get `IBlobSet`'s spanning uvw/uv/vw/wu view cases. - - The `IBLobs` carry the ROOT `TTree::entry` number for their `TC` origin giving "blob order". -- A `BlobSetMerge` follows to provide 4-to-1 fan-in. - -Then comer `UbooneClusterSource`. -- Each input `IBlobSet` is buffered (reminder: it is per-slice). -- Produces an output queue of `ITensorSet`. -- The output queue is expected to hold either zero or one `ITensorSet`. -- On EOS or change in frame ident, buffered `IBlobs` are converted and flushed to `ITensorSet`. - - This is same behavior as `BlobClustering` - -The flushing entails: - -- Loads `TC` from file to get blob info including `cluster_id` -- Build CLUSTER ID ORDER as ordered `cluster_id` set and map to index. -- Copy-paste parts of `PointTreeBuilding::sample_live()` and use to produce initial pc-tree. - - We must define clusters based on `cluster_id` and not "connected components". - - Cluster nodes are added to the grouping node in CLUSTER ID ORDER. - - Map `cluster_id` to cluster `node_t*`. - - Walk blobs, using their `tpc_cluster_id` to find the cluster node to receive them. -- Load `T_flash` ROOT TTree to produce **light**, **flash** and **flashlight** data as described in the tensor-data-model. - - Store each of these three as arrays in a "local" PC on the "grouping" pc-tree node. -- Load `T_match` to get cluster-flash associations. - - Form map from `cluster_id` to INDEX of **flash** array. - - Store this index as a scalar `flash` array in a `cluster_scalar` local PC. - - Flash-less clusters get index of -1. - - Also store `cluster_id` in this `cluster_scalar` cluster-local PC. -- Convert pc-tree to `ITensorSet` and output. - - Reminder, all local PCs of the same name must be the same "shape". Blob-nodes have a `scalar` local PC thus the name difference with `cluster_scaler`. - -Overall graph configuration constraints: - -- User **must** configure `Uboone*Source` to use the same stream of ROOT files. - - - -## Example WCT jobs or files ... - -See [uboone.org](../../../root/docs/uboone.org). In particular: - -``` -wire-cell -l stderr -L debug -A infiles=nuselEval_5384_137_6852.root root/test/uboone-clusters.jsonnet -``` - -And see `aux/src/ClusterFlashDump.cxx` as an example of a starting point for a real next-stage. - -Some comments on next stage: - -- The `uboone-clusters.jsonnet` graph represents a single face pipeline (that's all of uboone). -- To do cluster-flash matching on other dets, we must merge across face+APAs. - - This merge needs w.rt. optical `light/flash/flashlight` arrays. - - On one hand, these are common across all face+APAs in which case we just take one. - - OTOH, if they differ they must be appended **and** we must rewrite the `flash` scalar array in the `cluster_scalar` PCs. - - So, the merge needs to know what previous people did to prepare the data. - -- The `uboone-clusters.jsonnet` ends with `ClusterFlashDump`. - - Obviously, replaced this with an `ITensorSet -> ITensorSet` filter is needed for cluster-flash making or other refinement. - - This replacement node likely succeed in the graph the "big merge" node described above. - - This new node type should probably look similar to `MABC` in that it allows a pc-tree to be passed through a pipeline of functions without requiring I/O through the TDM. - - This pipeline could be hoisted up to the flow graph by making a new `IData` that passes a pc-tree w/out requiring a round trip through the TDM. - -## WCP's requirements - -N/A - -## WCT's questions to confirm functionality - diff --git a/patrec/docs/QLBundles/Opflash.md b/patrec/docs/QLBundles/Opflash.md deleted file mode 100644 index 79c9b4f0e..000000000 --- a/patrec/docs/QLBundles/Opflash.md +++ /dev/null @@ -1,51 +0,0 @@ -## Describe OpFlash in WCP - -For more detailed information, please refer to the [OpFlash documentation](https://github.com/BNLIF/wire-cell-data/blob/master/docs/OpFlash.md). - -## Example prototype jobs or files ... - -A WCP rootfile can be found @ [this link](https://www.phy.bnl.gov/xqian/talks/wire-cell-porting/nuselEval_5384_137_6852.root) - - -Opflash are saved in -```cpp - TTree *T_flash = (TTree*)file->Get("T_flash"); - Double_t time; - Int_t type; - Int_t flash_id; - Int_t temp_run_no, temp_subrun_no, temp_event_no; - T_flash->SetBranchAddress("runNo",&temp_run_no); - T_flash->SetBranchAddress("subRunNo",&temp_subrun_no); - T_flash->SetBranchAddress("eventNo",&temp_event_no); - T_flash->SetBranchAddress("time",&time); - T_flash->SetBranchAddress("type",&type); // flash type, full waveform or Cosmic mode, two different types in MicroBooNE - T_flash->SetBranchAddress("flash_id",&flash_id); // this id is useful for matching with TPC object in bundle - Double_t low_time, high_time, total_PE; - Double_t temp_PE[32], temp_PE_err[32]; - std::vector *fired_channels = new std::vector; - std::vector *l1_fired_time = new std::vector; - std::vector *l1_fired_pe = new std::vector; - T_flash->SetBranchAddress("low_time",&low_time); // start time of flash - T_flash->SetBranchAddress("high_time",&high_time); // end time of flash - T_flash->SetBranchAddress("total_PE",&total_PE); // total PE - T_flash->SetBranchAddress("PE",temp_PE); // PE for each PMT - T_flash->SetBranchAddress("PE_err",temp_PE_err); // PE_err for each PMT - T_flash->SetBranchAddress("fired_channels",&fired_channels); // which channel are included in flash - T_flash->SetBranchAddress("l1_fired_time",&l1_fired_time); // advanced flash info - T_flash->SetBranchAddress("l1_fired_pe",&l1_fired_pe); // advanced flash info -``` - -## Describe WCT version - -See "light" and "flash" sections of the [WCT tensor data model document](../../../aux/docs/tensor-data-model.org). - -More discussion is found in the [`Bundle`](./Bundle.md) document. - -## Example WCT jobs or files ... - -## WCP's requirements - -N/A - -## WCT's questions to confirm functionality - diff --git a/patrec/docs/tjft/wcp-data.svg b/patrec/docs/tjft/wcp-data.svg deleted file mode 100644 index 7da5c3d4b..000000000 --- a/patrec/docs/tjft/wcp-data.svg +++ /dev/null @@ -1,274 +0,0 @@ - - - - - - -wcpdata - - - -cluster_vertex_graph - -Graph: -cluster-vertex - - - -PR3DCluster - -PR3DCluster - - - -cluster_vertex_graph->PR3DCluster - - -has - - - -ProtoVertex - -ProtoVertex - - - -cluster_vertex_graph->ProtoVertex - - -has - - - -cluster_segment_graph - -Graph: -cluster-segment - - - -cluster_segment_graph->PR3DCluster - - -has - - - -ProtoSegment - -ProtoSegment - - - -cluster_segment_graph->ProtoSegment - - -has - - - -shower_vertex_segment_graph - -Graph: -shower-vertex-segment - - - -WCShower - -WCShower - - - -shower_vertex_segment_graph->WCShower - - -has - - - -shower_vertex_segment_graph->ProtoSegment - - -has - - - -shower_vertex_segment_graph->ProtoVertex - - -has - - - -segment_vertex_graph - -Graph: -segement-vertex - - - -segment_vertex_graph->ProtoSegment - - -has - - - -segment_vertex_graph->ProtoVertex - - -has - - - -NeutrinoID - -NeutrinoID - - - -NeutrinoID->cluster_vertex_graph - - -has - - - -NeutrinoID->cluster_segment_graph - - -has - - - -NeutrinoID->shower_vertex_segment_graph - - -has - - - -WCShower->segment_vertex_graph - - -has - - - -start_segment - -start_segment - - - -WCShower->start_segment - - -has - - - -start_vertex - -start_vertex - - - -WCShower->start_vertex - - -has - - - -segment_points - -segment_points - - - -ProtoSegment->segment_points - - -has - - - -cluster_id - -cluster_id - - - -ProtoVertex->cluster_id - - -has - - - -vertex_point - -vertex_point - - - -ProtoVertex->vertex_point - - -has - - - -Point - -Point - - - -wire-cell-xxx-main - -wire-cell-xxx-main - - - -wire-cell-xxx-main->NeutrinoID - - -has - - - -segment_points->Point - - -is - - - -vertex_point->Point - - -is - - - -start_segment->ProtoSegment - - -is - - - -start_vertex->ProtoVertex - - -is - - - diff --git a/patrec/inc/WireCellPatRec/paal.h b/patrec/inc/WireCellPatRec/paal.h deleted file mode 100644 index 80eb34d64..000000000 --- a/patrec/inc/WireCellPatRec/paal.h +++ /dev/null @@ -1,19 +0,0 @@ -// This collects the few #includes of PAAL that are actually used by -// WireCellPatRec so that we can protect against warnings->errors due to their -// inclusion of boost. - -#ifndef WIRECELLPATREC_PAAL -#define WIRECELLPATREC_PAAL - -// These provide pragmas to tell the compiler to ignore warnings in boost. -#include "WireCellUtil/MultiArray.h" -#include "WireCellUtil/Graph.h" - -// The actual few headers PatRec needs. -#include "WireCellPatRec/paal/data_structures/metric/graph_metrics.hpp" -#include "WireCellPatRec/paal/data_structures/metric/euclidean_metric.hpp" -#include "WireCellPatRec/paal/utils/irange.hpp" - -#include "WireCellPatRec/paal/iterative_rounding/steiner_tree/steiner_tree.hpp" - -#endif diff --git a/patrec/inc/WireCellPatRec/paal/auctions/auction_components.hpp b/patrec/inc/WireCellPatRec/paal/auctions/auction_components.hpp deleted file mode 100644 index f842edf18..000000000 --- a/patrec/inc/WireCellPatRec/paal/auctions/auction_components.hpp +++ /dev/null @@ -1,313 +0,0 @@ -//======================================================================= -// Copyright (c) -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file auction_components.hpp - * @brief - * @author Robert Rosolek - * @version 1.0 - * @date 2014-01-07 - */ -#ifndef PAAL_AUCTION_COMPONENTS_HPP -#define PAAL_AUCTION_COMPONENTS_HPP - -#include "paal/data_structures/components/component_traits.hpp" -#include "paal/data_structures/components/components.hpp" -#include "paal/data_structures/components/components_join.hpp" -#include "paal/utils/concepts.hpp" -#include "paal/utils/functors.hpp" -#include "paal/utils/type_functions.hpp" - -#include -#include -#include - -#include -#include -#include - -namespace paal { -/// Auctions namespace -namespace auctions { - - // Base - - /** - * @brief name for the bidders component - */ - struct bidders; - /** - * @brief name for the items component - */ - struct items; - /** - * @brief name for the get_copies_num component - */ - struct get_copies_num; - - // Value Query Auction - - /** - * @brief name for the value query component - */ - struct value_query; - - // Demand Query Auction - - /** - * @brief name for the demand query component - */ - struct demand_query; - - // Gamma Oracle Auction - - /** - * @brief name for the gamma oracle component - */ - struct gamma_oracle; - /** - * @brief name for the gamma component - */ - struct gamma; - -/// Auctions Concepts namespace -namespace concepts { - template - class auction { - template - using component_to_range_t = typename std::remove_reference< - typename data_structures::component_traits< - typename std::decay::type>:: - template type::type - >::type; - - public: - - auction() = delete; - - using bidders_t = component_to_range_t; - BOOST_CONCEPT_ASSERT((boost::SinglePassRangeConcept)); - BOOST_CONCEPT_ASSERT((utils::concepts::readable_range)); - - using items_t = component_to_range_t; - BOOST_CONCEPT_ASSERT((boost::SinglePassRangeConcept)); - BOOST_CONCEPT_ASSERT((utils::concepts::readable_range)); - using item_val_t = range_to_elem_t; - - BOOST_CONCEPT_USAGE(auction) - { - auto copies = a.template call( - *std::begin(a.template get())); - using get_copies_num_result_t = puretype(copies); - static_assert(std::is_integral::value, - "return type of get_copies_num is not integral!"); - } - - protected: - Auction a; - - auto get_item() -> decltype(*std::begin(a.template get())) - { - return *std::begin(a.template get()); - } - - auto get_bidder() -> decltype(*std::begin(a.template get())) - { - return *std::begin(a.template get()); - } - }; - - template - class value_query_auction : public auction { - using base = auction; - - public: - - BOOST_CONCEPT_USAGE(value_query_auction) - { - auto value_query_ = this->a.template get(); - auto val = value_query_(this->get_bidder(), std::unordered_set< - typename base::item_val_t>{this->get_item()}); - using value_query_result_t = puretype(val); - static_assert(std::is_arithmetic::value, - "return type of value_query is not arithmetic!"); - } - }; - - template - struct demand_query_auction : auction { - - BOOST_CONCEPT_USAGE(demand_query_auction) - { - auto demand_query_ = this->a.template get(); - auto get_price = utils::return_one_functor(); - auto res = demand_query_(this->get_bidder(), get_price); - using demand_query_result_items_t = decltype(res.first); - BOOST_CONCEPT_ASSERT((boost::SinglePassRangeConcept< - demand_query_result_items_t>)); - BOOST_CONCEPT_ASSERT((utils::concepts::readable_range< - demand_query_result_items_t>)); - using demand_query_result_value_t = puretype(res.second); - static_assert(std::is_arithmetic::value, - "second member of the result from demand query oracle is not arithmetic!"); - } - }; - - template - struct gamma_oracle_auction : auction { - - using gamma_t = typename data_structures::component_traits< - typename std::decay::type>:: - template type::type; - static_assert(std::is_arithmetic::value, - "gamma type is not arithmetic!"); - - BOOST_CONCEPT_USAGE(gamma_oracle_auction) - { - auto gamma_oracle_ = this->a.template get(); - auto get_price = utils::return_one_functor(); - auto threshold = 0.; - auto res = gamma_oracle_(this->get_bidder(), get_price, threshold); - if (res) {} - if (!res) {} - using gamma_oracle_result_items_t = decltype(res->first); - BOOST_CONCEPT_ASSERT((boost::SinglePassRangeConcept< - gamma_oracle_result_items_t>)); - BOOST_CONCEPT_ASSERT((utils::concepts::readable_range< - gamma_oracle_result_items_t>)); - using gamma_oracle_result_price_t = puretype(res->second.num); - static_assert(std::is_arithmetic::value, - "numerator of frac returned from gamma oracle is not arithmetic!"); - using gamma_oracle_result_value_t = puretype(res->second.den); - static_assert(std::is_arithmetic::value, - "denominator of frac returned from gamma oracle is not arithmetic!"); - } - }; - -} //!concepts - - // Base - - /** - * @brief Definition for the components class representing an auction. - * This class is not meant to be directly used, it is just a base for the - * more specialized components interfaces. - */ - using base_auction_components = data_structures::components< - bidders, - items, - data_structures::NameWithDefault - >; - - namespace detail { - /// extend base auction components with other components. - template - using add_to_base_auction = - typename data_structures::join< - base_auction_components, - data_structures::components - >::type; - }; //!detail - - // Value Query Auction - - /** - * @brief definition for the components class for a value query auction. - */ - using value_query_components = detail::add_to_base_auction; - - /** - * @brief value query auction components template alias - * - * @tparam Args - */ - template - using value_query_auction_components = typename value_query_components::type; - - /** - * @brief make function for value query components - * - * @tparam Args - * @param args - * - * @return value query components - */ - template - auto make_value_query_auction_components(Args&&... args) -> - decltype(value_query_components::make_components(std::forward(args)...)) - { - auto res = value_query_components::make_components(std::forward(args)...); - BOOST_CONCEPT_ASSERT((concepts::value_query_auction)); - return res; - } - - // Demand Query Auction - - /** - * @brief definition for the components class for a demand query auction - */ - using demand_query_components = detail::add_to_base_auction; - - /** - * @brief demand query auction components template alias - * - * @tparam Args - */ - template - using demand_query_auction_components = typename demand_query_components::type; - - /** - * @brief make function for demand query components - * - * @tparam Args - * @param args - * - * @return demand query components - */ - template - auto make_demand_query_auction_components(Args&&... args) - { - auto res = demand_query_components::make_components(std::forward(args)...); - BOOST_CONCEPT_ASSERT((concepts::demand_query_auction)); - return res; - } - - // Gamma Oracle Auction - - /** - * @brief definition for the components class for a gamma oracle auction. - */ - using gamma_oracle_components = detail::add_to_base_auction; - - /** - * @brief gamma oracle auction components template alias - * - * @tparam Args - */ - template - using gamma_oracle_auction_components = typename gamma_oracle_components::type; - - /** - * @brief make function for gamma oracle components - * - * @tparam Args - * @param args - * - * @return gamma oracle components - */ - template - auto make_gamma_oracle_auction_components(Args&&... args) -> - decltype(gamma_oracle_components::make_components(std::forward(args)...)) - { - auto res = gamma_oracle_components::make_components(std::forward(args)...); - BOOST_CONCEPT_ASSERT((concepts::gamma_oracle_auction)); - return res; - } - -} //!auctions -} //!paal -#endif // PAAL_AUCTION_COMPONENTS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/auctions/auction_traits.hpp b/patrec/inc/WireCellPatRec/paal/auctions/auction_traits.hpp deleted file mode 100644 index 653d40af5..000000000 --- a/patrec/inc/WireCellPatRec/paal/auctions/auction_traits.hpp +++ /dev/null @@ -1,111 +0,0 @@ -/** - * @file auction_traits.hpp - * @brief - * @author Robert Rosolek - * @version 1.0 - * @date 2014-03-24 - */ -#ifndef PAAL_AUCTION_TRAITS_HPP -#define PAAL_AUCTION_TRAITS_HPP - -#include "paal/auctions/auction_components.hpp" -#include "paal/data_structures/fraction.hpp" -#include "paal/utils/functors.hpp" -#include "paal/utils/type_functions.hpp" - -#include - -#include -#include -#include - -namespace paal { -namespace auctions { - -/** - * @brief Types associated with all auctions. - * - * @tparam Auction - */ -template -struct auction_traits { - using bidders_universe_t = - decltype(std::declval().template get()); - using bidder_iterator_t = - typename boost::range_iterator::type; - using bidder_t = range_to_ref_t; - using bidder_val_t = range_to_elem_t; - using items_universe_t = - decltype(std::declval().template get()); - using item_t = range_to_ref_t; - using item_val_t = range_to_elem_t; - using copies_num_t = puretype( - std::declval().template call( - std::declval() - ) - ); -}; - -/** - * @brief Types associated with value query auction. - * - * @tparam ValueQueryAuction - */ -template -class value_query_auction_traits: public auction_traits { - using base = auction_traits; - - public: - using value_t = puretype(std::declval().template call( - std::declval(), - std::unordered_set() // any container of items with count method - )); -}; - -/** - * @brief Types associated with demand query auction. - * - * @tparam DemandQueryAuction - */ -template -struct demand_query_auction_traits : public auction_traits { - - using result_t = puretype( - std::declval().template call( - std::declval::bidder_t>(), - // this is a little tricky, in order to obtain the value type, we pass prices and threshold - // as double types, because value type needs to be able to operate with doubles anyway - utils::make_dynamic_return_constant_functor(double(1.0)) // any functor with double operator() - ) - ); - using items_t = typename result_t::first_type; - using value_t = typename result_t::second_type; -}; - -/** - * @brief Types associated with gamma oracle auction. - * - * @tparam GammaOracleAuction - */ -template -class gamma_oracle_auction_traits: public auction_traits { - using temp_result_t = puretype( - *std::declval(). template call( - std::declval::bidder_t>(), - // this is a little tricky, in order to obtain the value type, we pass prices - // as double types, because value type needs to be able to operate with doubles anyway - utils::make_dynamic_return_constant_functor(double(1.0)), // any functor with double operator() - double(1.0) // any double - ) - ); - - public: - using items_t = typename temp_result_t::first_type; - using value_t = typename temp_result_t::second_type::den_type; - using frac_t = data_structures::fraction; - using result_t = boost::optional>; -}; - -} //!auctions -} //!paal -#endif // PAAL_AUCTION_TRAITS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/auctions/auction_utils.hpp b/patrec/inc/WireCellPatRec/paal/auctions/auction_utils.hpp deleted file mode 100644 index 9f27c617e..000000000 --- a/patrec/inc/WireCellPatRec/paal/auctions/auction_utils.hpp +++ /dev/null @@ -1,75 +0,0 @@ -//======================================================================= -// Copyright (c) -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file auction_utils.hpp - * @brief - * @author Robert Rosolek - * @version 1.0 - * @date 2014-4-10 - */ -#ifndef PAAL_AUCTION_UTILS_HPP -#define PAAL_AUCTION_UTILS_HPP - -#include "paal/auctions/auction_components.hpp" -#include "paal/auctions/auction_traits.hpp" -#include "paal/utils/accumulate_functors.hpp" - -#include -#include -#include -#include - -#include - -namespace paal { -namespace auctions { - -/** - * @brief Returns the number of different kinds of items in an auction. - * - * @param auction - * @tparam Auction - */ -template -auto items_number(Auction&& auction) { - return boost::distance(auction.template get()); -} - -/** - * @brief Returns the number of bidders in an auction. - * - * @param auction - * @tparam Auction - */ -template -auto bidders_number(Auction&& auction) { - return boost::distance(auction.template get()); -} - -/** - * @brief Returns minimum number of copies of an item in an auction. - * - * @param auction - * @tparam Auction - */ -template -typename paal::auctions::auction_traits::copies_num_t -get_minimum_copies_num(Auction&& auction) -{ - assert(!boost::empty(auction.template get())); - using item = typename auction_traits::item_t; - auto get_copies_num_func = [&](item i) - { - return auction.template call(std::forward(i)); - }; - return *min_element_functor(auction.template get(), get_copies_num_func); -} - -} //!auctions -} //!paal -#endif // PAAL_AUCTION_UTILS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/auctions/fractional_winner_determination_in_MUCA/fractional_winner_determination_in_MUCA.hpp b/patrec/inc/WireCellPatRec/paal/auctions/fractional_winner_determination_in_MUCA/fractional_winner_determination_in_MUCA.hpp deleted file mode 100644 index f49a38291..000000000 --- a/patrec/inc/WireCellPatRec/paal/auctions/fractional_winner_determination_in_MUCA/fractional_winner_determination_in_MUCA.hpp +++ /dev/null @@ -1,244 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Robert Rosolek -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file fractional_winner_determination_in_MUCA.hpp - * @brief - * @author Robert Rosolek - * @version 1.0 - * @date 2014-06-09 - */ -#ifndef PAAL_FRACTIONAL_WINNER_DETERMINATION_IN_MUCA_HPP -#define PAAL_FRACTIONAL_WINNER_DETERMINATION_IN_MUCA_HPP - -#include "paal/auctions/auction_components.hpp" -#include "paal/auctions/auction_traits.hpp" -#include "paal/auctions/auction_utils.hpp" -#include "paal/lp/glp.hpp" -#include "paal/lp/lp_row_generation.hpp" -#include "paal/utils/accumulate_functors.hpp" -#include "paal/utils/concepts.hpp" -#include "paal/utils/functors.hpp" -#include "paal/utils/property_map.hpp" - -#include -#include -#include -#include - -#include -#include -#include -#include - -namespace paal { -namespace auctions { - -namespace detail { - - template - struct bid { - Bidder m_bidder; - BidId m_bid_id; - Bundle m_bundle; - bid(Bidder bidder, BidId bid_id, Bundle bundle) : - m_bidder(bidder), m_bid_id(bid_id), m_bundle(bundle) {} - }; -}//! detail - - -/** - * @brief This is fractional determine winners in demand query auction and return - * assignment of fractional bundles to bidders. - * - * Example: - * \snippet fractional_winner_determination_in_MUCA_example.cpp - * - * Complete example is fractional_winner_determination_in_MUCA_example.cpp - * - * @tparam DemandQueryAuction - * @tparam OutputIterator - * @tparam ItemToLpIdMap - * @tparam SeparationOracle - * @param auction - * @param result - * @param item_to_id Stores the current mapping of items to LP column ids. - * @param epsilon Used for floating point comparison. - * @param separation_oracle Separation Oracle Strategy for searching the - * bidder with violated inequality. - */ -template < - class DemandQueryAuction, - class OutputIterator, - class ItemToLpIdMap, - class SeparationOracle = paal::lp::random_violated_separation_oracle -> -BOOST_CONCEPT_REQUIRES( - - ((concepts::demand_query_auction)) - - ((boost::ForwardRangeConcept< - typename demand_query_auction_traits::bidders_universe_t - >)) - - ((boost::ForwardRangeConcept< - typename demand_query_auction_traits::items_t - >)) - - ((utils::concepts::move_constructible< - typename demand_query_auction_traits::items_t - >)) - - ((utils::concepts::output_iterator< - OutputIterator, - std::tuple< - typename demand_query_auction_traits::bidder_t, - typename demand_query_auction_traits::items_t, - double - > - >)) - - ((boost::ReadWritePropertyMapConcept< - ItemToLpIdMap, - typename demand_query_auction_traits::item_t - >)), - - // TODO concept check for SeparationOracle - -(void)) -fractional_determine_winners_in_demand_query_auction( - DemandQueryAuction&& auction, - OutputIterator result, - ItemToLpIdMap item_to_id, - double epsilon, - SeparationOracle separation_oracle = SeparationOracle{} -) { - using traits_t = demand_query_auction_traits; - using bundle_t = typename traits_t::items_t; - using bidder_t = typename traits_t::bidder_t; - using bid_t = detail::bid; - using result_t = typename traits_t::result_t; - - lp::glp dual; - dual.set_optimization_type(lp::MINIMIZE); - - // add items variables to the dual - auto&& items_ = auction.template get(); - for (auto item = std::begin(items_); item != std::end(items_); ++item) { - auto const copies = auction.template call(*item); - auto const id = dual.add_column(copies, 0, lp::lp_traits::PLUS_INF, ""); - put(item_to_id, *item, id); - } - - // add bidders variables to the dual - // TODO allow to change the allocator - std::vector bidder_to_id(bidders_number(auction)); - for (auto& id: bidder_to_id) - id = dual.add_column(1, 0, lp::lp_traits::PLUS_INF, ""); - - // TODO allow to change the allocator - std::vector generated_bids; - - auto item_to_id_func = utils::make_property_map_get(item_to_id); - auto get_price = utils::compose( - [&](lp::col_id id) { return dual.get_col_value(id); }, - item_to_id_func - ); - - boost::optional res; - boost::optional last_bidder; - - auto how_much_violated = - utils::make_tuple_uncurry([&](bidder_t bidder, lp::col_id bidder_id) - { - //check if there is a violated constraint for bidder - last_bidder = bidder; - res = auction.template call(bidder, get_price); - auto const util = res->second; - auto const alpha = util - dual.get_col_value(bidder_id); - if (alpha > epsilon) return boost::optional(alpha); - return boost::optional{}; - }); - - auto add_violated = - utils::make_tuple_uncurry([&](bidder_t bidder, lp::col_id bidder_id) { - assert(last_bidder); - if (bidder != *last_bidder) { - res = auction.template call(bidder, get_price); - } - - auto& items = res->first; - auto const util = res->second; - - // add violated constraint - auto const price = sum_functor(items, get_price); - auto const value = util + price; - auto const expr = accumulate_functor(items, - lp::linear_expression(bidder_id), item_to_id_func); - auto const bid_id = dual.add_row(expr >= value); - generated_bids.emplace_back(bidder, bid_id, std::move(items)); - }); - - auto get_candidates = utils::make_dynamic_return_constant_functor( - boost::combine(auction.template get(), bidder_to_id)); - - // TODO check if max_violated strategy doesn't give better performance - auto find_violated = separation_oracle(get_candidates, how_much_violated, add_violated); - - auto solve_lp = [&]() - { - auto const res = dual.resolve_simplex(lp::DUAL); - assert(res == lp::OPTIMAL); - return res; - }; - - paal::lp::row_generation(find_violated, solve_lp); - - // emit results - for (auto& bid: generated_bids) { - auto const fraction = dual.get_row_dual_value(bid.m_bid_id); - if (fraction <= epsilon) continue; - *result = std::make_tuple(std::move(bid.m_bidder), - std::move(bid.m_bundle), fraction); - ++result; - } -} - -/** - * @brief This is fractional determine winners in demand query auction and return - * assignment of fractional bundles to bidders. - * This is version with default ItemToLpIdMap using std::unordered_map and - * default epsilon. - * - * @tparam DemandQueryAuction - * @tparam OutputIterator - * @param auction - * @param result - * @param epsilon Used for floating point comparison. - */ -template -void fractional_determine_winners_in_demand_query_auction( - DemandQueryAuction&& auction, - OutputIterator result, - double epsilon = 1e-7 -) { - using traits_t = demand_query_auction_traits; - using ItemVal = typename traits_t::item_val_t; - - std::unordered_map map; - return fractional_determine_winners_in_demand_query_auction( - std::forward(auction), - result, - boost::make_assoc_property_map(map), - epsilon - ); -} - -}//!auctions -}//!paal - -#endif /* PAAL_FRACTIONAL_WINNER_DETERMINATION_IN_MUCA_HPP */ diff --git a/patrec/inc/WireCellPatRec/paal/auctions/single_minded_auctions.hpp b/patrec/inc/WireCellPatRec/paal/auctions/single_minded_auctions.hpp deleted file mode 100644 index 4818928f8..000000000 --- a/patrec/inc/WireCellPatRec/paal/auctions/single_minded_auctions.hpp +++ /dev/null @@ -1,245 +0,0 @@ -//======================================================================= -// Copyright (c) -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file single_minded_auctions.hpp - * @brief Interfaces for creating auctions from single minded valuations. - * @author Robert Rosolek - * @version 1.0 - * @date 2014-01-08 - */ -#ifndef PAAL_SINGLE_MINDED_AUCTIONS_HPP -#define PAAL_SINGLE_MINDED_AUCTIONS_HPP - -#include "paal/auctions/xor_bids.hpp" -#include "paal/utils/functors.hpp" -#include "paal/utils/singleton_iterator.hpp" - -#include - -#include - -namespace paal { -namespace auctions { - - namespace concepts { - template < - class Bidders, - class Items, - class GetValue, - class GetItems, - class GetCopiesNum - > - class single_minded { - Bidders bidders; - Items items; - GetValue get_value; - GetItems get_items; - GetCopiesNum get_copies_num; - - single_minded() {} - - public: - BOOST_CONCEPT_USAGE(single_minded) - { - using value_t = puretype(get_value(*std::begin(bidders))); - static_assert(std::is_arithmetic::value, - "get_value return type is not arithmetic!"); - auto&& bid_items = get_items(*std::begin(bidders)); - using bundle_t = puretype(bid_items); - static_assert(std::is_move_constructible::value, - "bundle_t is not move constructible!"); - static_assert(std::is_default_constructible::value, - "bundle_t is not default constructible!"); - BOOST_CONCEPT_ASSERT((boost::ForwardRangeConcept< - decltype(bid_items)>)); - } - }; - } //!concepts - - namespace detail { - - struct get_bids { - template - auto operator()(Bidder&& b) - const -> decltype(utils::make_singleton_range(std::forward(b))) - { - return utils::make_singleton_range(std::forward(b)); - } - }; - - } //!detail - - /** - * @brief Create value query auction from single minded valuations. - * - * @param bidders - * @param items - * @param get_value - * @param get_items - * @param get_copies_num - * @tparam Bidders - * @tparam Items - * @tparam GetValue - * @tparam GetItems - * @tparam GetCopiesNum - */ - template< - class Bidders, - class Items, - class GetValue, - class GetItems, - class GetCopiesNum = utils::return_one_functor - > - auto make_single_minded_to_value_query_auction( - Bidders&& bidders, - Items&& items, - GetValue get_value, - GetItems get_items, - GetCopiesNum get_copies_num = GetCopiesNum{} - ) - -> decltype(make_xor_bids_to_value_query_auction( - std::forward(bidders), - std::forward(items), - detail::get_bids(), - get_value, - get_items, - get_copies_num - )) { - BOOST_CONCEPT_ASSERT((concepts::single_minded)); - return make_xor_bids_to_value_query_auction( - std::forward(bidders), - std::forward(items), - detail::get_bids(), - get_value, - get_items, - get_copies_num - ); - } - - // TODO all constructions in this file are essentially the same, maybe it's possible - // to refactor it using some C++ magic? - - /** - * @brief Create demand query auction from single minded valuations. - * - * @param bidders - * @param items - * @param get_value - * @param get_items - * @param get_copies_num - * @tparam Bidders - * @tparam Items - * @tparam GetValue - * @tparam GetItems - * @tparam GetCopiesNum - */ - template< - class Bidders, - class Items, - class GetValue, - class GetItems, - class GetCopiesNum = utils::return_one_functor - > - auto make_single_minded_to_demand_query_auction( - Bidders&& bidders, - Items&& items, - GetValue get_value, - GetItems get_items, - GetCopiesNum get_copies_num = GetCopiesNum{} - ) - -> decltype(make_xor_bids_to_demand_query_auction( - std::forward(bidders), - std::forward(items), - detail::get_bids(), - get_value, - get_items, - get_copies_num - )) { - BOOST_CONCEPT_ASSERT((concepts::single_minded)); - return make_xor_bids_to_demand_query_auction( - std::forward(bidders), - std::forward(items), - detail::get_bids(), - get_value, - get_items, - get_copies_num - ); - } - - /** - * @brief Create gamma oracle auction from single minded valuations. - * - * @param bidders - * @param items - * @param get_value - * @param get_items, - * @param get_copies_num - * @tparam Bidders - * @tparam Items - * @tparam GetValue - * @tparam GetItems - * @tparam GetCopiesNum - */ - template< - class Bidders, - class Items, - class GetValue, - class GetItems, - class GetCopiesNum = utils::return_one_functor - > - auto make_single_minded_to_gamma_oracle_auction( - Bidders&& bidders, - Items&& items, - GetValue get_value, - GetItems get_items, - GetCopiesNum get_copies_num = GetCopiesNum{} - ) - -> decltype(make_xor_bids_to_gamma_oracle_auction( - std::forward(bidders), - std::forward(items), - detail::get_bids(), - get_value, - get_items, - get_copies_num - )) { - BOOST_CONCEPT_ASSERT((concepts::single_minded)); - return make_xor_bids_to_gamma_oracle_auction( - std::forward(bidders), - std::forward(items), - detail::get_bids(), - get_value, - get_items, - get_copies_num - ); - } - - /** - * @brief Extract all items appearing in all bidders' bids. This function - * doesn't eliminate duplicates, this is left out to the caller. - * - * @param bidders - * @param get_items - * @param output - * @tparam Bidders - * @tparam GetItems - * @tparam OutputIterator - */ - template - void extract_items_from_single_minded(Bidders&& bidders, GetItems get_items, OutputIterator output) - { - extract_items_from_xor_bids( - std::forward(bidders), - detail::get_bids(), - get_items, - output - ); - } - -} //!auctions -} //!paal -#endif // PAAL_SINGLE_MINDED_AUCTIONS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/auctions/winner_determination_in_MUCA/winner_determination_in_MUCA.hpp b/patrec/inc/WireCellPatRec/paal/auctions/winner_determination_in_MUCA/winner_determination_in_MUCA.hpp deleted file mode 100644 index 401f09af4..000000000 --- a/patrec/inc/WireCellPatRec/paal/auctions/winner_determination_in_MUCA/winner_determination_in_MUCA.hpp +++ /dev/null @@ -1,240 +0,0 @@ -/* - * @file winner_determination_in_MUCA.hpp - * @brief - * @author Robert Rosolek - * @version 1.0 - * @date 2014-1-7 - */ -#ifndef PAAL_WINNER_DETERMINATION_IN_MUCA_HPP -#define PAAL_WINNER_DETERMINATION_IN_MUCA_HPP - -#include "paal/auctions/auction_components.hpp" -#include "paal/auctions/auction_traits.hpp" -#include "paal/auctions/auction_utils.hpp" -#include "paal/utils/concepts.hpp" -#include "paal/utils/property_map.hpp" -#include "paal/utils/type_functions.hpp" - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -namespace paal { -namespace auctions { - -namespace detail { - template - struct bidder_info { - Value m_best_items_val; - ItemSet m_best_items; - }; - - template < - class GammaOracleAuction, - class Base = gamma_oracle_auction_traits - > - struct determine_winners_in_gamma_oracle_auction_traits : Base { - using value = promote_with_double_t; - using item_set = typename Base::items_t; - using bidder_info = detail::bidder_info; - using result = std::pair; - }; -}//!detail - -/** - * @brief This is determine winners in gamma oracle auction and return assignment of items to bidders. - * - * Example: - * \snippet winner_determination_in_MUCA_example.cpp Winner Determination In MUCA Example - * - * Complete example is winner_determination_in_MUCA_example.cpp. - * - * @tparam GammaOracleAuction - * @tparam OutputIterator - * @tparam PriceMap - * @tparam Epsilon - * @param auction - * @param result - * @param price Stores the current mapping of items to prices. - * These are prices just for the working purposes of the algorithm, - * not the prices to be paid by the bidders. - * @param epsilon Used for floating point comparison to ensure feasibility. - */ -template< - class GammaOracleAuction, - class OutputIterator, - class PriceMap, - class Epsilon -> -BOOST_CONCEPT_REQUIRES( - ((concepts::gamma_oracle_auction)) - - ((boost::ForwardRangeConcept< - typename gamma_oracle_auction_traits::bidders_universe_t - >)) - - ((utils::concepts::move_constructible< - typename gamma_oracle_auction_traits::items_t - >)) - - ((utils::concepts::output_iterator< - OutputIterator, - std::pair< - typename gamma_oracle_auction_traits::bidder_t, - typename gamma_oracle_auction_traits::items_t - > - >)) - - ((boost::ReadWritePropertyMapConcept< - PriceMap, - typename gamma_oracle_auction_traits::item_t - >)) - - ((utils::concepts::floating_point)), - -(void)) -determine_winners_in_gamma_oracle_auction( - GammaOracleAuction&& auction, - OutputIterator result, - PriceMap price, - Epsilon epsilon -) { - using Price = typename boost::property_traits::value_type; - - using Traits = - detail::determine_winners_in_gamma_oracle_auction_traits; - using Value = typename Traits::value; - using BidderInfo = typename Traits::bidder_info; - using BidderIter = typename Traits::bidder_iterator_t; - using Frac = typename Traits::frac_t; - using Res = typename Traits::result_t; - - Price items_num = 0; - for (auto item = std::begin(auction.template get()); - item != std::end(auction.template get()); - ++item - ) { - ++items_num; - auto const copies = auction.template call(*item); - put(price, *item, 1.0 / copies); - } - Price price_sum = items_num; - - // TODO allow to change the allocator - std::vector bidders_info_vec(bidders_number(auction)); - - auto last_assigned_bidder_info = bidders_info_vec.end(); - BidderIter last_assigned_bidder; - Value total_value = 0, last_value{}; - auto const b = get_minimum_copies_num(auction); - auto const multiplier = std::exp(Value(b) + 1) * items_num; - auto const gamma_ = auction.template get(); - auto get_threshold = [=](const BidderInfo& b) - { - return (1 + 2 * gamma_) * b.m_best_items_val; - }; - do { - Res best = boost::none; - auto get_frac = [](Res r) { return r->second; }; - auto bidder_info = bidders_info_vec.begin(); - auto bidder = std::begin(auction.template get()); - for (; bidder_info != bidders_info_vec.end(); ++bidder_info, ++bidder) { - auto const threshold = get_threshold(*bidder_info); - auto result = auction.template call( - *bidder, utils::make_property_map_get(price), threshold - ); - if (!result) continue; - if (!best || get_frac(result) < get_frac(best)) { - best = std::move(result); - last_assigned_bidder_info = bidder_info; - last_assigned_bidder = bidder; - last_value = get_frac(result).den + threshold; - } - } - if (!best) break; - auto& best_items = best->first; - for (auto item = std::begin(best_items); item != std::end(best_items); ++item) { - auto const copies = auction.template call(*item); - auto const old_price = get(price, *item); - auto const new_price = - old_price * std::pow(multiplier, 1.0 / (copies+ 1)); - put(price, *item, new_price); - price_sum += copies* (new_price - old_price); - } - total_value += last_value - last_assigned_bidder_info->m_best_items_val; - last_assigned_bidder_info->m_best_items = std::move(best_items); - last_assigned_bidder_info->m_best_items_val = last_value; - } while (price_sum + epsilon < multiplier); - - const bool nothing_assigned = - last_assigned_bidder_info == bidders_info_vec.end(); - if (nothing_assigned) return; - - auto output = [&]( - puretype(last_assigned_bidder_info) bidder_info, - BidderIter bidder - ) { - *result = std::make_pair(*bidder, std::move(bidder_info->m_best_items)); - ++result; - }; - if (last_value > total_value - last_value) { - output(last_assigned_bidder_info, last_assigned_bidder); - return; - } - auto bidder_info = bidders_info_vec.begin(); - auto bidder = std::begin(auction.template get()); - for (; bidder_info != bidders_info_vec.end(); ++bidder_info, ++bidder) - if (bidder != last_assigned_bidder) - output(bidder_info, bidder); -} - -/** - * @brief This is determine winners in gamma oracle auction and return assignment of bidders to items. - * This is version with default PriceMap using std::unordered_map and - * default epsilon. - * - * @tparam GammaOracleAuction - * @tparam OutputIterator - * @tparam Epsilon - * @param auction - * @param result - * @param epsilon Used for floating point comparison to ensure feasibility. - */ -template < - class GammaOracleAuction, - class OutputIterator, - class Epsilon = double -> -void determine_winners_in_gamma_oracle_auction( - GammaOracleAuction&& auction, - OutputIterator result, - Epsilon epsilon = 1e-8 -) { - using Traits = gamma_oracle_auction_traits; - using Value = promote_with_double_t; - using ItemVal = typename Traits::item_val_t; - - std::unordered_map umap; - return determine_winners_in_gamma_oracle_auction( - std::forward(auction), - result, - boost::make_assoc_property_map(umap), - epsilon - ); -} - - -}//!auctions -}//!paal - -#endif /* PAAL_WINNER_DETERMINATION_IN_MUCA_HPP */ diff --git a/patrec/inc/WireCellPatRec/paal/auctions/xor_bids.hpp b/patrec/inc/WireCellPatRec/paal/auctions/xor_bids.hpp deleted file mode 100644 index cdb4c9ec7..000000000 --- a/patrec/inc/WireCellPatRec/paal/auctions/xor_bids.hpp +++ /dev/null @@ -1,473 +0,0 @@ -//======================================================================= -// Copyright (c) -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file xor_bids.hpp - * @brief Interfaces for creating auctions from xor bids valuations. - * @author Robert Rosolek - * @version 1.0 - * @date 2014-01-21 - */ -#ifndef PAAL_XOR_BIDS_HPP -#define PAAL_XOR_BIDS_HPP - -#include "paal/auctions/auction_components.hpp" -#include "paal/data_structures/fraction.hpp" -#include "paal/utils/accumulate_functors.hpp" -#include "paal/utils/functors.hpp" -#include "paal/utils/type_functions.hpp" - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -namespace detail { - // forward declaration for making this class a friend of xor_bids_gamma_oracle - template - class test_xor_bids_gamma_oracle; -} - -namespace paal { -namespace auctions { - - namespace concepts { - - template < - class Bidders, - class Items, - class GetBids, - class GetValue, - class GetItems, - class GetCopiesNum - > - class xor_bids { - Bidders bidders; - Items items; - GetBids get_bids; - GetValue get_value; - GetItems get_items; - GetCopiesNum get_copies_num; - - xor_bids() {} - - public: - BOOST_CONCEPT_USAGE(xor_bids) - { - auto&& bids = get_bids(*std::begin(bidders)); - BOOST_CONCEPT_ASSERT((boost::ForwardRangeConcept< - decltype(bids)>)); - auto bid = std::begin(bids); - using value_t = puretype(get_value(*bid)); - static_assert(std::is_arithmetic::value, - "get_value return type is not arithmetic!"); - auto&& bid_items = get_items(*bid); - using bundle_t = puretype(bid_items); - static_assert(std::is_move_constructible::value, - "bundle_t is not move constructible!"); - static_assert(std::is_default_constructible::value, - "bundle_t is not default constructible!"); - BOOST_CONCEPT_ASSERT((boost::ForwardRangeConcept< - decltype(bid_items)>)); - } - }; - } //!concepts - - namespace detail { - - template - struct xor_bids_traits { - using bid_iterator = - typename boost::range_iterator::type>::type; - using bid = typename std::iterator_traits::reference; - using value = pure_result_of_t; - using items = typename std::result_of::type; - using items_val = typename std::decay::type; - using item = range_to_ref_t; - template - using price = pure_result_of_t; - }; - - template - class xor_bids_value_query { - - GetBids m_get_bids; - GetValue m_get_value; - GetItems m_get_items; - - template - using traits = xor_bids_traits; - - public: - xor_bids_value_query(GetBids get_bids, GetValue get_value, GetItems get_items) - : m_get_bids(get_bids), m_get_value(get_value), m_get_items(get_items) {} - - template < - class Bidder, - class ItemSet, - class Traits = traits, - class Value = typename Traits::value - > - Value operator()(Bidder&& bidder, const ItemSet& item_set) const - { - using Bid = typename Traits::bid; - using Item = typename Traits::item; - - auto is_contained = [&](Bid b) - { - return boost::algorithm::all_of( - m_get_items(std::forward(b)), - [&](Item i) { return item_set.count(std::forward(i)) > 0; } - ); - }; - return accumulate_functor( - m_get_bids(std::forward(bidder)) | - boost::adaptors::filtered(is_contained), - Value(0), - m_get_value, - paal::utils::max() - ); - } - }; - }; //!detail - - /** - * @brief Create value query auction from xor bids valuations. - * - * @param bidders - * @param items - * @param get_bids - * @param get_value - * @param get_items - * @param get_copies_num - * @tparam Bidders - * @tparam Items - * @tparam GetBids - * @tparam GetValue - * @tparam GetItems - * @tparam GetCopiesNum - */ - template< - class Bidders, - class Items, - class GetBids, - class GetValue, - class GetItems, - class GetCopiesNum = utils::return_one_functor - > - auto make_xor_bids_to_value_query_auction( - Bidders&& bidders, - Items&& items, - GetBids get_bids, - GetValue get_value, - GetItems get_items, - GetCopiesNum get_copies_num = GetCopiesNum{} - ) -> - decltype(make_value_query_auction_components( - std::forward(bidders), - std::forward(items), - detail::xor_bids_value_query(get_bids, get_value, get_items), - get_copies_num - )) - { - BOOST_CONCEPT_ASSERT((concepts::xor_bids)); - return make_value_query_auction_components( - std::forward(bidders), - std::forward(items), - detail::xor_bids_value_query(get_bids, get_value, get_items), - get_copies_num - ); - } - - namespace detail { - - template - class xor_bids_demand_query { - - GetBids m_get_bids; - GetValue m_get_value; - GetItems m_get_items; - - template - using traits = xor_bids_traits; - - template > - struct price_traits : Base { - using price = typename Base::template price; - using utility = promote_with_t; - }; - - public: - xor_bids_demand_query(GetBids get_bids, GetValue get_value, - GetItems get_items) : m_get_bids(get_bids), - m_get_value(get_value), m_get_items(get_items) {} - - template - auto operator()(Bidder&& bidder, GetPrice get_price) const - { - using Traits = price_traits; - using Items = typename Traits::items_val; - using Res = std::pair; - - Res best = {Items{}, 0}; - auto&& bids = m_get_bids(std::forward(bidder)); - for (auto bid = std::begin(bids); bid != std::end(bids); ++bid) { - auto const value = m_get_value(*bid); - auto const price = - sum_functor(m_get_items(*bid), get_price); - auto const util = value - price; - if (util > best.second) - best = {m_get_items(*bid), util}; - } - return best; - } - }; - - } //!detail - - /** - * @brief Create demand query auction from xor bids valuations. - * - * @param bidders - * @param items - * @param get_bids - * @param get_value - * @param get_items - * @param get_copies_num - * @tparam Bidders - * @tparam Items - * @tparam GetBids - * @tparam GetValue - * @tparam GetItems - * @tparam GetCopiesNum - */ - template< - class Bidders, - class Items, - class GetBids, - class GetValue, - class GetItems, - class GetCopiesNum = utils::return_one_functor - > - auto make_xor_bids_to_demand_query_auction( - Bidders&& bidders, - Items&& items, - GetBids get_bids, - GetValue get_value, - GetItems get_items, - GetCopiesNum get_copies_num = GetCopiesNum{}) - { - BOOST_CONCEPT_ASSERT((concepts::xor_bids)); - return make_demand_query_auction_components( - std::forward(bidders), - std::forward(items), - detail::xor_bids_demand_query(get_bids, - get_value, get_items), - get_copies_num - ); - } - - namespace detail { - - template - class xor_bids_gamma_oracle { - - GetBids m_get_bids; - GetValue m_get_value; - GetItems m_get_items; - - template < - class GetBids_, - class GetValue_, - class GetItems_, - class Gamma_ - > - friend class ::detail::test_xor_bids_gamma_oracle; - - template - using traits = xor_bids_traits; - - template > - struct price_traits : public Base { - using price = typename Base::template price; - using frac = - paal::data_structures::fraction; - using best_bid = - boost::optional>; - }; - - template < - class Bidder, - class GetPrice, - class Threshold, - class IsBetter, - class BestBid = typename price_traits::best_bid - > - BestBid - calculate_best( - Bidder&& bidder, - GetPrice get_price, - Threshold threshold, - IsBetter is_better - ) const - { - BestBid result{}; - auto&& bids = m_get_bids(std::forward(bidder)); - for (auto bid = std::begin(bids); bid != std::end(bids); ++bid) { - auto const value = m_get_value(*bid); - if (value <= threshold) continue; - auto const price = sum_functor(m_get_items(*bid), - get_price); - auto const frac = - data_structures::make_fraction(price, value - threshold); - if (is_better(frac, result)) - result = std::make_pair(bid, frac); - } - return result; - } - - template < - class Bidder, - class GetPrice, - class Threshold, - class Traits = price_traits, - class BestBid = typename Traits::best_bid - > - BestBid - minimum_frac(Bidder&& bidder, GetPrice get_price, Threshold threshold) - const - { - return calculate_best( - std::forward(bidder), - get_price, - threshold, - [&](typename Traits::frac frac, const BestBid& result) - { - return !result || frac < result->second; - } - ); - } - - template - auto output(const Result& result, OutputIterator out) const - { - auto bid = result.first; - auto frac = result.second; - boost::copy(m_get_items(*bid), out); - return frac; - } - - public: - xor_bids_gamma_oracle(GetBids get_bids, GetValue get_value, GetItems get_items) - : m_get_bids(get_bids), m_get_value(get_value), m_get_items(get_items) {} - - template < - class Bidder, - class GetPrice, - class Threshold, - class Traits = price_traits - > - boost::optional> - operator()(Bidder&& bidder, GetPrice get_price, Threshold threshold) const - { - auto const best = minimum_frac(std::forward(bidder), - get_price, threshold); - if (!best) return boost::none; - return std::make_pair(m_get_items(*best->first), best->second); - } - }; - }; //!detail - - /** - * @brief Create gamma oracle auction from xor bids valuations. - * - * @param bidders - * @param items - * @param get_bids - * @param get_value - * @param get_items - * @param get_copies_num - * @tparam Bidders - * @tparam Items - * @tparam GetBids - * @tparam GetValue - * @tparam GetItems - * @tparam GetCopiesNum - */ - template< - class Bidders, - class Items, - class GetBids, - class GetValue, - class GetItems, - class GetCopiesNum = utils::return_one_functor - > - auto make_xor_bids_to_gamma_oracle_auction( - Bidders&& bidders, - Items&& items, - GetBids get_bids, - GetValue get_value, - GetItems get_items, - GetCopiesNum get_copies_num = GetCopiesNum{} - ) - -> decltype(make_gamma_oracle_auction_components( - std::forward(bidders), - std::forward(items), - detail::xor_bids_gamma_oracle(get_bids, get_value, get_items), - 1, - get_copies_num - )) - { - BOOST_CONCEPT_ASSERT((concepts::xor_bids)); - return make_gamma_oracle_auction_components( - std::forward(bidders), - std::forward(items), - detail::xor_bids_gamma_oracle(get_bids, get_value, get_items), - 1, - get_copies_num - ); - } - - /** - * @brief extract all items appearing in all bids. This function - * doesn't eliminate duplicates, this is left out to the caller. - * - * @tparam Bidders - * @tparam GetBids - * @tparam GetItems - * @tparam OutputIterator - * @param bidders - * @param get_bids - * @param get_items - * @param output - */ - template - void extract_items_from_xor_bids( - Bidders&& bidders, - GetBids get_bids, - GetItems get_items, - OutputIterator output - ) { - for (auto&& bidder: bidders) { - for (auto&& bid: get_bids(std::forward(bidder))) { - boost::copy(get_items(std::forward(bid)), output); - } - } - } - -} //!auctions -} //!paal -#endif // PAAL_XOR_BIDS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/clustering/k_means_clustering.hpp b/patrec/inc/WireCellPatRec/paal/clustering/k_means_clustering.hpp deleted file mode 100644 index c471a468c..000000000 --- a/patrec/inc/WireCellPatRec/paal/clustering/k_means_clustering.hpp +++ /dev/null @@ -1,104 +0,0 @@ -/** - * @file k_means_clustering.hpp - * @brief - * @author Piotr Smulewicz, Piotr Wygocki - * @version 1.0 - * @date 2014-06-25 - */ -#ifndef PAAL_K_MEANS_CLUSTERING_HPP -#define PAAL_K_MEANS_CLUSTERING_HPP - -#include "paal/clustering/k_means_clustering_engine.hpp" -#include "paal/utils/irange.hpp" - -#include -#include - -#include - -namespace paal { - -/** - * @brief return centroid that minimize within-cluster sum of squares - */ -template -void centroid_minimalize_w_c_s_s(Cluster && cluster, OutputIterator out) { - assert(!boost::empty(cluster)); - using point_t = range_to_elem_t; - using coordinate_t = range_to_elem_t; - - auto dim = boost::size(*std::begin(cluster)); - for(auto idx : irange(dim)) { - coordinate_t res{}; - for (auto && point : cluster) { - res += std::begin(point)[idx]; - } - *out = res / boost::size(cluster); - ++out; - } -} - -/** - * @brief centroid minimize within cluster sum of squares - * @param clusters - * @param out - * @tparam Clusters - * @tparam OutputIterator - */ -template -void centroids_minimalize_w_c_s_s(Clusters && clusters, OutputIterator out) { - assert(!boost::empty(clusters)); - assert(!boost::empty(*std::begin(clusters))); - - using cluster_t = range_to_elem_t; - using point_t = range_to_elem_t; - using coordinate_t = range_to_elem_t; - - auto size = boost::size(*std::begin(*begin(clusters))); - for (auto && cluster : clusters) { - std::vector point(size); - centroid_minimalize_w_c_s_s(cluster, point.begin()); - *out = point; - ++out; - } -} - -/** - * @brief this is solve k_means_clustering problem - * and return vector of cluster - * example: - * \snippet k_means_clustering_example.cpp K Means Clustering Example - * - * complete example is k_means_clustering_example.cpp - * @param points - * @param centers - * @param result pairs of point and id of cluster - * (number form 0,1,2 ...,number_of_cluster-1) - * @param visitor - * @tparam Points - * @tparam OutputIterator - * @tparam CoordinateType - * @tparam Visitor - */ -template -auto k_means(Points &&points, Centers &¢ers, OutputIterator result, - Visitor visitor = Visitor{}) { - using point_t = range_to_elem_t; - using center_t = range_to_elem_t; - - center_t center{ *std::begin(centers) }; - - return k_means( - points, centers, - [&](std::vector const & points)->center_t const & { - centroid_minimalize_w_c_s_s(points, std::begin(center)); - return center; - }, - [&](point_t const &point) { return closest_to(point, centers); }, - result, utils::equal_to{}, visitor); -} - -} //!paal - -#endif /* PAAL_K_MEANS_CLUSTERING_HPP */ diff --git a/patrec/inc/WireCellPatRec/paal/clustering/k_means_clustering_engine.hpp b/patrec/inc/WireCellPatRec/paal/clustering/k_means_clustering_engine.hpp deleted file mode 100644 index c6b47b419..000000000 --- a/patrec/inc/WireCellPatRec/paal/clustering/k_means_clustering_engine.hpp +++ /dev/null @@ -1,183 +0,0 @@ -/** - * @file k_means_clustering_engine.hpp - * @brief - * @author Piotr Smulewicz - * @version 1.0 - * @date 2014-06-26 - */ -#ifndef PAAL_K_MEANS_CLUSTERING_ENGINE_HPP -#define PAAL_K_MEANS_CLUSTERING_ENGINE_HPP - -#include "paal/utils/type_functions.hpp" -#include "paal/utils/functors.hpp" -#include "paal/utils/irange.hpp" - -#include -#include -#include -#include - -#include -#include -#include - -namespace paal { - -/** - * @param lrange - * @param rrange - * @tparam RangeLeft - * @tparam RangeRight - */ -template -auto distance_square(RangeLeft && lrange, RangeRight && rrange) { - assert(!boost::empty(lrange)); - assert(boost::distance(lrange) == boost::distance(rrange)); - - //TODO change to sum_functors when generic lambdas appears - decltype(*std::begin(lrange) * *std::begin(rrange)) dist{}; - for (auto point_pair : boost::combine(lrange, rrange)) { - auto diff = boost::get<0>(point_pair) - boost::get<1>(point_pair); - dist += diff * diff; - } - return dist; -} - -/** - * @param point - * @param centers - * @tparam Point - * @tparam Centers - */ -template -auto closest_to(Point && point, Centers && centers){ - using coor_t = range_to_elem_t; - auto dist = std::numeric_limits::max(); - int new_center = 0; - for (auto center : centers | boost::adaptors::indexed()){ - auto new_dist = distance_square(center.value(), point); - - if (new_dist < dist) { - dist = new_dist; - new_center = center.index(); - } - } - return new_center; -} - -///k means visitor -struct k_means_visitor { - /** - * @param last_center - * @param new_center - * @tparam Center - * @tparam New_center - */ - template - void move_center(Center &last_center, New_center &new_center) {}; - ///new iteration - void new_iteration() {}; -}; - -/** - * @param points - * @param centers - * @param centroid functor return centroid of set of samples - * @param closest_to - * @param result pairs of point and id of cluster - * (number from 0,1,2 ...,k-1) - * @param c_equal - * @param visitor - * @tparam Points - * @tparam Centers - * @tparam Centroid - * @tparam ClosestTo - * @tparam OutputIterator - * @tparam CentroidEqual - * @tparam Visitor - */ -template -auto k_means(Points &&points, Centers & centers, - Centroid centroid, ClosestTo closest_to, - OutputIterator result, - CentroidEqual c_equal = CentroidEqual{}, - Visitor visitor=Visitor{}) { - using point_t = range_to_elem_t; - using points_bag = std::vector; - - std::vector cluster_points; - cluster_points.resize(centers.size()); - bool zm; - do { - visitor.new_iteration(); - zm = false; - boost::for_each(cluster_points, std::mem_fn(&points_bag::clear)); - - for (auto && point : points) { - cluster_points[closest_to(point)].push_back(point); - } - - for (auto point : cluster_points | boost::adaptors::indexed()) { - if(point.value().empty()) continue; - auto && old_center = centers[point.index()]; - auto && new_center = centroid(point.value()); - if (!c_equal(new_center, old_center)) { - visitor.move_center(old_center, new_center); - old_center = new_center; - zm = true; - } - } - } while (zm == true); - for (int cur_cluster : irange(cluster_points.size())) { - for (auto const & point : cluster_points[cur_cluster]) { - *result = std::make_pair(point, cur_cluster); - ++result; - } - } - return centers; -} - -/** - * @param points - * @param number_of_centers - * @tparam Points - */ -template -auto get_random_centers(Points &&points, int number_of_centers, OutputIterator out, - RNG && rng = std::default_random_engine{}) { - - std::vector centers(points.size()); - boost::iota(centers, 0); - std::shuffle(centers.begin(),centers.end(), rng); - centers.resize(number_of_centers); - for (auto && center : centers) { - *out=points[center]; - ++out; - } -} - -/** - * @param points - * @param number_of_clusters - * @tparam Points - */ -template -auto get_random_clusters(Points &&points, int number_of_clusters, - RNG && rng = std::default_random_engine{}) { - std::vector::type> clusters(number_of_clusters); - std::uniform_int_distribution<> dis(0, number_of_clusters - 1); - - for (auto o : points) { - clusters[distribution(rng)].push_back(o); - } - return clusters; -} - -} //!paal - -#endif /* PAAL_K_MEANS_CLUSTERING_ENGINE_HPP */ diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/bimap.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/bimap.hpp deleted file mode 100644 index d7b761bf9..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/bimap.hpp +++ /dev/null @@ -1,377 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// 2013 Piotr Smulewicz -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file bimap.hpp - * @brief - * @author Piotr Wygocki, Piotr Smulewicz - * @version 1.1 - * @date 2013-09-12 - */ -#ifndef PAAL_BIMAP_HPP -#define PAAL_BIMAP_HPP - -#include "paal/data_structures/bimap_traits.hpp" -#include "paal/utils/irange.hpp" - -#include -#include -#include -#include - -#include - -namespace paal { -namespace data_structures { - -/** - * @class bimap_mic - * @brief the same as Bimap, but implemented using boost::multi_index_container. - * Unfortunately slower - * - * @tparam T - * @tparam Idx - */ -template class bimap_mic { - public: - - bimap_mic() = default; - - /** - * @brief constructor - * - * @tparam Range - * @param range - */ - template bimap_mic(Range && range) { - std::size_t s = boost::distance(range); - m_index.reserve(s); - for (const T &t : range) { - add(t); - } - } - - /** - * @brief get_idx on element t - * - * @param t - * - * @return - */ - Idx get_idx(const T &t) const { - auto const &idx = m_index.template get<1>(); - return m_index.template project<0>(idx.find(t)) - m_index.begin(); - } - - /** - * @brief get element on index i - * - * @param i - * - * @return - */ - const T &get_val(Idx i) const { -#ifdef NDEBUG - return m_index[i]; -#else - return m_index.at(i); -#endif - } - - /** - * @brief number of elements - * - * @return - */ - std::size_t size() const { return m_index.size(); } - - /** - * @brief adds alement to bimap - * - * @param t - * - * @return - */ - Idx add(const T &t) { - m_index.push_back(t); - return m_index.size() - 1; - } - - private: - typedef boost::multi_index_container< - T, boost::multi_index::indexed_by, - boost::multi_index::hashed_unique< - boost::multi_index::identity>>> - bm_type; - bm_type m_index; -}; - -// minor TODO write specification when T is integral (copy instead of reference) -/** - * @class bimap - * @brief implements both sides mapping from the collection to - * (0,size(collection)) interval. - * - * @tparam T - * @tparam Idx - */ -template class bimap { - typedef std::unordered_map> TToID; - - public: - typedef typename TToID::const_iterator Iterator; - - bimap() = default; - - /** - * @brief constructor - * - * @tparam Range - * @param range - */ - template bimap(Range && range) { - std::size_t s = boost::distance(range); - m_id_to_t.reserve(s); - m_t_to_id.reserve(s); - for (const T &t : range) { - add(t); - } - } - - /** - * @brief gets index of element t - * - * @param t - * - * @return - */ - Idx get_idx(const T &t) const { - auto iter = m_t_to_id.find(t); - assert(iter != m_t_to_id.end()); - return iter->second; - } - - /** - * @brief get value for index i - * - * @param i - * - * @return - */ - const T &get_val(Idx i) const { -#ifdef NDEBUG - return m_id_to_t[i]; -#else - return m_id_to_t.at(i); -#endif - } - - /** - * @brief number of elements - * - * @return - */ - std::size_t size() const { return m_id_to_t.size(); } - - /** - * @brief adds element to collection - * - * @param t - * - * @return - */ - Idx add(const T &t) { - assert(m_t_to_id.find(t) == m_t_to_id.end()); - Idx idx = size(); - m_t_to_id[t] = idx; - m_id_to_t.push_back(t); - return idx; - } - - /** - * @brief get range of all element, index pairs - * - * @return - */ - std::pair get_range() const { - return std::make_pair(m_t_to_id.begin(), m_t_to_id.end()); - } - - protected: - /// mapping from id to element - std::vector m_id_to_t; - /// mapping from elements to ids - TToID m_t_to_id; -}; - -/** - * @brief this maps support erasing elements, Alert inefficient!! - * - * @tparam T - * @tparam Idx - */ -template -class eraseable_bimap : public bimap { - typedef bimap base; - using base::m_t_to_id; - using base::m_id_to_t; - - public: - /** - * @brief erases element (takes linear time) - * - * @param t - */ - void erase(const T &t) { - auto iter = m_t_to_id.find(t); - assert(iter != m_t_to_id.end()); - Idx idx = iter->second; - m_t_to_id.erase(iter); - m_id_to_t.erase(m_id_to_t.begin() + idx); - - for (int i : irange(idx, Idx(m_id_to_t.size()))) { - assert(m_t_to_id.at(m_id_to_t[i]) == i + 1); - m_t_to_id[m_id_to_t[i]] = i; - } - } -}; - -/** - * @brief in this bimap we know that elements forms permutation - * this allows optimization - * - * @tparam T - * @tparam Idx - */ -template class bimap_of_consecutive { - // TODO maybe it should be passed but only on debug - static const Idx INVALID_IDX = -1; - - public: - static_assert(std::is_integral::value, "Type T has to be integral"); - bimap_of_consecutive() = default; - - /** - * @brief constructor - * - * @tparam Iter - * @param b - * @param e - */ - template bimap_of_consecutive(Iter b, Iter e) { - if (b == e) return; - - std::size_t size = std::distance(b, e); - m_id_to_t.resize(size); - std::copy(b, e, m_id_to_t.begin()); - - m_t_to_id.resize(size, INVALID_IDX); - rank(m_id_to_t, m_t_to_id, INVALID_IDX); - } - - /** - * @brief gets index of element t - * - * @param t - * - * @return - */ - Idx get_idx(const T &t) const { return m_t_to_id[t]; } - - /** - * @brief gets value for index i - * - * @param i - * - * @return - */ - const T &get_val(Idx i) const { return m_id_to_t[i]; } - - /** - * @brief number of elements - * - * @return - */ - std::size_t size() const { return m_id_to_t.size(); } - - private: - std::vector m_id_to_t; - std::vector m_t_to_id; -}; - -/** - * @brief traits specialization for Bimap - * - * @tparam ValT - * @tparam IdxT - */ -template struct bimap_traits> { - typedef ValT Val; - typedef IdxT Idx; -}; - -/** - * @brief traits specialization for eraseable_bimap - * - * @tparam ValT - * @tparam IdxT - */ -template -struct bimap_traits> { - typedef ValT Val; - typedef IdxT Idx; -}; - -/** - * @brief traits specialization for bimap_of_consecutive - * - * @tparam ValT - * @tparam IdxT - */ -template -struct bimap_traits> { - typedef ValT Val; - typedef IdxT Idx; -}; - -/** - * @brief traits specialization for bimap_mic - * - * @tparam ValT - * @tparam IdxT - */ -template -struct bimap_traits> { - typedef ValT Val; - typedef IdxT Idx; -}; - -/** - * @brief computes rank i.e. index of element in range - * - * @tparam T - * @tparam Idx - * @param m_id_to_t - * @param m_t_to_id - * @param INVALID_IDX - */ -template -void rank(std::vector const &m_id_to_t, std::vector &m_t_to_id, - int INVALID_IDX = 0) { - static_assert(std::is_integral::value, "Type T has to be integral"); - unsigned long size = m_t_to_id.size(); - for (auto i : irange(size)) { - Idx &idx = m_t_to_id[m_id_to_t[i]]; - assert(m_id_to_t[i] < int(size) && idx == INVALID_IDX); - idx = i; - } -} - -} //! data_structures -} //! paal -#endif // PAAL_BIMAP_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/bimap_traits.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/bimap_traits.hpp deleted file mode 100644 index f5fdbea29..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/bimap_traits.hpp +++ /dev/null @@ -1,26 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file bimap_traits.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-11-04 - */ -#ifndef PAAL_BIMAP_TRAITS_HPP -#define PAAL_BIMAP_TRAITS_HPP - -namespace paal { -namespace data_structures { - -template struct bimap_traits; - -} // data_structures -} // paal - -#endif // PAAL_BIMAP_TRAITS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/collection_starts_from_last_change.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/collection_starts_from_last_change.hpp deleted file mode 100644 index 436660269..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/collection_starts_from_last_change.hpp +++ /dev/null @@ -1,109 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file collection_starts_from_last_change.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-07-11 - */ -#ifndef PAAL_COLLECTION_STARTS_FROM_LAST_CHANGE_HPP -#define PAAL_COLLECTION_STARTS_FROM_LAST_CHANGE_HPP - -#include - -#include - -namespace paal { -namespace data_structures { - -/** - * @brief this collection stores some range and expose set_last_change function - * each time begin and end is called this class returns range which - * starts from last change place - * - * @tparam Iterator - * @tparam hash - */ -template ::value_type>> -class collection_starts_from_last_change { - typedef typename std::iterator_traits::value_type Element; - typedef std::unordered_map ElemToIter; - typedef std::pair Range; - typedef boost::joined_range JoinedRange; - typedef typename boost::range_iterator::type JoinedIterator; - - public: - typedef JoinedIterator ResultIterator; - - collection_starts_from_last_change() = default; - - /** - * @brief constructor - * - * @param begin - * @param end - */ - collection_starts_from_last_change(Iterator begin, Iterator end) - : m_begin(begin), m_end(end), m_new_begin(m_begin) { - assert(m_begin != m_end); - for (auto i = m_begin; i != m_end; ++i) { - bool b = m_elem_to_iter.emplace(*i, i).second; - assert(b); - } - } - - /** - * @brief one can set the place of the last change (future start position of - * the range) - * - * @param el - */ - void set_last_change(const Element &el) { - auto i = m_elem_to_iter.find(el); - assert(i != m_elem_to_iter.end()); - m_new_begin = i->second; - } - - /** - * @brief begin - * - * @return - */ - JoinedIterator begin() { return std::begin(get_range()); } - - /** - * @brief end - * - * @return - */ - JoinedIterator end() { return std::end(get_range()); } - - private: - /** - * @brief gets range - * - * @return - */ - JoinedRange get_range() { - Range r1 = std::make_pair(m_new_begin, m_end); - Range r2 = std::make_pair(m_begin, m_new_begin); - return boost::join(r1, r2); - } - - Iterator m_begin; - Iterator m_end; - Iterator m_new_begin; - ElemToIter m_elem_to_iter; -}; -} -} - -#endif // PAAL_COLLECTION_STARTS_FROM_LAST_CHANGE_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/combine_iterator.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/combine_iterator.hpp deleted file mode 100644 index b6873fa63..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/combine_iterator.hpp +++ /dev/null @@ -1,298 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file combine_iterator.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-01 - */ - -#include "paal/utils/type_functions.hpp" - -#include -#include -#include -#include - -#ifndef PAAL_COMBINE_ITERATOR_HPP -#define PAAL_COMBINE_ITERATOR_HPP - -namespace paal { -namespace data_structures { -//TODO change name to product -/** - * @brief class representing set of ranges with two operation next and call - * - * @tparam Ranges - */ -template class combine_iterator_engine; - -/** - * @class combine_iterator_engine - * @brief actual implementation - * - * @tparam Range - * @tparam RangesRest - */ -template -class combine_iterator_engine< - Range, RangesRest...> : private combine_iterator_engine { - - public: - using base = combine_iterator_engine; - using Iterator = typename boost::range_iterator::type; - - /** - * @brief constructor - * - * @param range - * @param rest - */ - combine_iterator_engine(Range &range, RangesRest &... rest) - : base(rest...), m_begin(std::begin(range)), m_curr(std::begin(range)), - m_end(std::end(range)) {} - - combine_iterator_engine() = default; - - /** - * @brief move iterators to the next position - * - * @return - */ - bool next() { - if (!base::next()) { - ++m_curr; - if (m_curr == m_end) { - m_curr = m_begin; - return false; - } - } - return true; - } - - /** - * @brief calls arbitrary function f on (*m_curr)... - * - * @tparam F - * @tparam Args - * @param f - * @param args - * - * @return - */ - template - auto call(F f, Args &&... args)->decltype(std::declval().call( - std::move(f), std::forward(args)..., *std::declval())) { - return base::call(std::move(f), std::forward(args)..., *m_curr); - } - - /** - * @brief operator== - * - * @param left - * @param right - * - * @return - */ - friend bool operator==(const combine_iterator_engine &left, - const combine_iterator_engine &right) { - return left.m_begin == right.m_begin && left.m_end == right.m_end && - left.m_curr == right.m_curr && - static_cast(left) == static_cast(right); - } - - private: - Iterator m_begin; - Iterator m_curr; - Iterator m_end; -}; - -/** - * @brief specialization for empty ranges lists - */ -template <> class combine_iterator_engine<> { - public: - /** - * @brief no next configuration - * - * @return - */ - bool next() { return false; } - - /** - * @brief actually calls function f - * - * @tparam F - * @tparam Args - * @param f - * @param args - * - * @return - */ - template - auto call(F f, Args &&... args)->decltype(f(std::forward(args)...)) { - return f(std::forward(args)...); - } - - /** - * @brief operator==, always true - * - * @param left - * @param right - * - * @return - */ - friend bool operator==(const combine_iterator_engine &left, - const combine_iterator_engine &right) { - return true; - } -}; - -namespace detail { -// TODO can you do this without alias??? -template using rem_ref = typename std::remove_reference::type; -} - -/** - * @brief make for combine_iterator_engine - * - * @tparam Ranges - * @param ranges - * - * @return - */ -template -combine_iterator_engine...> -make_combine_iterator_engine(Ranges &&... ranges) { - // see comments in make_combine_iterator - return combine_iterator_engine...>{ ranges... }; -} - -/** - * @brief combine_iterator iterates through all combinations of values from - * given ranges - * and returns them joined together using given Joiner - * - * @tparam Joiner - * @tparam Ranges - */ -template -class combine_iterator : public boost::iterator_facade< - combine_iterator, - puretype(combine_iterator_engine().call(std::declval())), - boost::forward_traversal_tag // TODO this should be minimal tag of the - // ranges - , - decltype( - combine_iterator_engine().call(std::declval()))> { - public: - /** - * @brief constructor - * - * @param joiner - * @param ranges - */ - combine_iterator(Joiner joiner, Ranges &... ranges) - : m_joiner(joiner), m_iterator_engine(ranges...), - m_end(sizeof...(Ranges) ? is_empty(ranges...) : true) {} - - /** - * @brief default constructor represents end of the range - */ - combine_iterator() : m_end(true) {}; - - private: - /** - * @brief returns true if at least one of given ranges is empty - * - * @tparam Range - * @tparam RangesRest - * @param range - * @param rest - * - * @return - */ - template - bool is_empty(const Range &range, const RangesRest &... rest) { - if (boost::empty(range)) { - return true; - } else { - return is_empty(rest...); - } - } - - /** - * @brief boundary case for is_empty - * - * @return - */ - bool is_empty() { return false; } - - using ref = decltype( - combine_iterator_engine().call(std::declval())); - - friend class boost::iterator_core_access; - - /** - * @brief increments iterator - */ - void increment() { - if (!m_iterator_engine.next()) { - m_end = true; - } - } - - /** - * @brief equal function - * - * @param other - * - * @return - */ - bool equal(combine_iterator const &other) const { - return this->m_end == other.m_end && - (this->m_end || - this->m_iterator_engine == other.m_iterator_engine); - } - - /** - * @brief dereference - * - * @return - */ - ref dereference() const { return m_iterator_engine.call(m_joiner); } - - Joiner m_joiner; - mutable combine_iterator_engine m_iterator_engine; - bool m_end; -}; - -/** - * @brief make for combine_iterator - * - * @tparam Joiner - * @tparam Ranges - * @param joiner - * @param ranges - * - * @return - */ -template -combine_iterator...> -make_combine_iterator(Joiner joiner, Ranges &&... ranges) { - // we do not forward the ranges, because combine_iterator expects lvalues - // we Use Ranges && because, we'd like to cover const/nonconst cases - return combine_iterator...>{ joiner, - ranges... }; -} - -} // data_structures -} // paal - -#endif // PAAL_COMBINE_ITERATOR_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/components/component_traits.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/components/component_traits.hpp deleted file mode 100644 index cf59061ae..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/components/component_traits.hpp +++ /dev/null @@ -1,31 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file component_traits.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-07-22 - */ -#ifndef PAAL_COMPONENT_TRAITS_HPP -#define PAAL_COMPONENT_TRAITS_HPP -#include "components.hpp" - -namespace paal { -namespace data_structures { - -template struct component_traits; - -template -struct component_traits> { - template - using type = detail::type_for_name; -}; -} -} -#endif // PAAL_COMPONENT_TRAITS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/components/components.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/components/components.hpp deleted file mode 100644 index f49878643..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/components/components.hpp +++ /dev/null @@ -1,518 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file components.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-07-16 - */ -#ifndef PAAL_COMPONENTS_HPP -#define PAAL_COMPONENTS_HPP - -#include "paal/data_structures/components/types_vector.hpp" - -#include -#include - -namespace paal { -namespace data_structures { - -/** - * @brief This structure can be passed on Names list and represents Name and the - * default type value - * - * @tparam Name - * @tparam Default - */ -template struct NameWithDefault; - -/** - * @brief Indicates that components constructor is in fact a Copy/Move - * Constructor - */ -struct copy_tag {}; - -// This namespace block contains implementation of the main class -// components and needed meta functions -namespace detail { - -///wraps type to constructible type -template struct wrap_to_constructable { - typedef T type; -}; - - -///If Name is kth on Names list, returns kth Type. -template struct type_for_name { - typedef typename remove_n_first<1, Names>::type NewNames; - typedef typename remove_n_first<1, Types>::type NewTypes; - typedef typename type_for_name::type type; -}; - -///Specialization when found -template -struct type_for_name, - TypesVector> { - typedef Type type; -}; - - -///SFINAE check if the given type has get() member function. -template class has_template_get { - private: - /** - * @brief positive case - * - * @tparam C given type - * - * @return return type is char - */ - template - static char f(wrap_to_constructable().template get())(C::*)() const> *); - - /** - * @brief negative case - * - * @tparam C given type - * - * @return return type is long - */ - template static long f(...); - - public: - /** - * @brief tels if given type has get() memer function. - * - */ - static const bool value = - (sizeof(f::type>(nullptr)) == sizeof(char)); -}; - -/** - * @brief Tag indicating that given object is movable - */ -struct movable_tag {}; -/** - * @brief Tag indicating that given object is not movable - */ -struct Notmovable_tag {}; - -// declaration of main class components -template class components; - -// specialization for empty Names list -template <> class components, TypesVector<>> { - public: - void get() const; - void call() const; - void call2() const; - - template components(const Unused &...) {} -}; - -// specialization for nonempty types list -// class keeps first component as data memer -// rest of the components are kept in superclass. -template -class components, - TypesVector> : public components< - TypesVector, TypesVector> { - typedef components, TypesVector> - base; - typedef TypesVector Names; - typedef TypesVector Types; - - ///Evaluates to valid type iff componentsName == Name - template - using is_my_name = - typename std::enable_if::value>::type; - - public: - using base::get; - - /// constructor - // we do not use = default, cause we'd like to value initialize POD's. - components() : base{}, m_component{} {}; - - // copy constructor - components(components const & other) - : base(static_cast(other)), m_component(other.get()) {} - - // doesn't work on clang 3.2 // change in the standard and visual studio 2015 preview - // components(components &) = default; - // constructor taking nonconst lvalue reference - components(components &other) - : base(static_cast(other)), m_component(other.get()) {} - - // move constructor - components(components &&) = default; - - // assignment operator - components &operator=(components const & other) { - - static_cast(*this) = static_cast(other); - m_component = other.get(); - return *this; - } - - // doesn't work on clang 3.2 // change in the standard - // components& operator=(components &) = default; - // assignment operator taking nonconst lvalue reference - components &operator=(components &other) { - static_cast(*this) = static_cast(other); - m_component = other.get(); - return *this; - } - - // default move operator - components &operator=(components &&) = default; - - /** - * @brief constructor takes some number of arguments, - * This arguments has to be convertible to the same number of the first - * components in components class. - * Arguments can be both rvalue and lvalue references - * - * @tparam T, first component, it must be convertible to Type. - * @tparam TypesPrefix, rest of the components - * @param t - * @param types - */ - template - components(T &&t, TypesPrefix &&... types) - : base(std::forward(types)...), - m_component(std::forward(t)) {} - - // copy constructor takes class wich has get member function - // the get<> function dosn't have to be available for all names. - // @param copy_tag is helps identify this constructor - template - components(const Comps &comps, copy_tag) - : components(comps, Notmovable_tag()) {} - - // move constructor takes class wich has get member function - // the get<> function dosn't have to be available for all names. - // In this version each of the components taken from comps - // is going to be moved. - // @param copy_tag is helps identify this constructor - template - components(Comps &&comps, copy_tag) - : components(comps, movable_tag()) {} - - /** - * @brief This fucntion returns Component for name Name, nonconst version - * - * @tparam ComponentName - * @tparam typename - * @param dummy - * - * @return - */ - template > - Type &get(wrap_to_constructable dummy = - wrap_to_constructable()) { - return m_component; - } - - /** - * @brief This fucntion returns Component for name Name, const version - * - * @tparam ComponentName - * @tparam typename - * @param dummy - * - * @return - */ - template > - const Type &get(wrap_to_constructable dummy = - wrap_to_constructable()) const { - return m_component; - } - - /** - * @brief This function directly calls component. - * m_component(args) has to be valid expresion - * nonconst version - * - * @tparam ComponentName - * @tparam Args - * @param args call arguments - * - * @return - */ - template - auto call(Args &&... args)->decltype(std::declval< - typename type_for_name::type>()( - std::forward(args)...)) { - return this->template get()(std::forward(args)...); - } - - /** - * @brief This function directly calls component. - * m_component(args) has to be valid expresion - * const version - * - * @tparam ComponentName - * @tparam ComponentName - * @tparam Args - * @param args call arguments - * - * @return the same as m_component return type - */ - template - auto call(Args &&... args) const->decltype(std::declval< - const typename type_for_name::type>()( - std::forward(args)...)) { - return this->template get()(std::forward(args)...); - } - - /** - * @brief setter for component assigned to Name. - * - * @tparam ComponentName - * @param comp - */ - template - void - set(const typename type_for_name::type comp) { - this->get() = std::move(comp); - } - - /** - * @brief function creating components class, - * takes arguments only for assigned Names - * - * @tparam NamesSubset - * @tparam SomeTypes - * @param types - * - * @return - */ - template - static components - // make(SomeTypes... types) { - // static_assert(sizeof...(NamesSubset) == sizeof...(SomeTypes), - // "Incorrect number of arguments."); - // return components(components, - // TypesVector>(std::move(types)...), copy_tag()); - make(SomeTypes &&... types) { - static_assert(sizeof...(NamesSubset) == sizeof...(SomeTypes), - "Incorrect number of arguments."); - components, TypesVector> - comps(std::forward(types)...); - return components(std::move(comps), copy_tag()); - } - - protected: - - // object is moved if move = true, otherwise passed by reference - template A move_or_pass_reference(const A &a) { - return std::move(a); - } - - // const reference case - template ::type> - const A &move_or_pass_reference(const A &a) { - return a; - } - - // nonconst reference case - template ::type> - A &move_or_pass_reference(A &a) { - return a; - } - - // All of this constructor takes Comps as r-value reference, - // because they have to win specialization race with normal constructor. - - // case: movable object, has the appropriate get member function - template ::value, int>::type> - components(Comps &&comps, movable_tag m, dummy d = dummy()) - : base(std::forward(comps), std::move(m)), - // if Type is not reference type, comps.get() is moved otherwise - // reference is passed - m_component( - move_or_pass_reference::value>( - comps.template get())) {} - - // case: movable object, does not have the appropriate get member function - template ::value>::type> - components(Comps &&comps, movable_tag m) - : base(std::forward(comps), std::move(m)) {} - - // case: not movable object, has the appropriate get member function - template ::value, int>::type> - components(Comps &&comps, Notmovable_tag m, dummy d = dummy()) - : base(std::forward(comps), std::move(m)), - m_component(comps.template get()) {} - - // case: not movable object, does not have the appropriate get member - // function - template ::value>::type> - components(Comps &&comps, Notmovable_tag m) - : base(std::forward(comps), std::move(m)) {} - - private: - Type m_component; -}; -} // detail - -//This namespace contains class which sets all defaults and all needed meta functions. - -namespace detail { - -template -class set_defaults { - static const int N = size::value; - static const int TYPES_NR = size::value; - static_assert(TYPES_NR <= N, "Incrrect number of parameters"); - - static const int DEFAULTS_NR = size::value; - static_assert(DEFAULTS_NR + TYPES_NR >= N, "Incrrect number of parameters"); - - typedef typename remove_n_first::type - NeededDefaults; - - typedef typename join::type Types; - - public: - typedef detail::components type; -}; -} // detail - -//Here are some meta functions, to parse the arguments -namespace detail { -/** - * @brief get_name, gets name for either Name, or NamesWithDefaults struct - * this is the Name case - * - * @tparam T - */ -template struct get_name { - typedef T type; -}; - -/** - * @brief get_name, gets name for either Name, or NamesWithDefaults struct - * this is the NamesWithDefaults case - * - * @tparam Name - * @tparam Default - */ -template -struct get_name> { - typedef Name type; -}; - -/** - * @brief Meta function takes NameWithDefault and Vector - * the result is new vector with new Name appended Name - */ -struct push_back_name { - template struct apply { - typedef typename push_back< - Vector, typename get_name::type>::type type; - }; -}; - -/* - * @brief Meta function takes NameWithDefault and Vector - * the result is new vector with new Name appended Default - */ -struct push_back_default { - // This case applies to when NameWithDefault is only name - template struct apply { - typedef Vector type; - }; - - // This case applies when NameWithDefault contains Default - template - struct apply> { - typedef typename push_back::type type; - }; -}; -} // detail - -/// this is class sets all defaults and return as type detail::components -/// direct implementation on variadic templates is imposible because of -/// weak support for type detection for inner template classes -template class components { - typedef TypesVector NamesWithDefaults; - - /// get Names list from NamesWithDefaults - typedef typename fold, - detail::push_back_name>::type Names; - - /// get Defaults from NamesWithDefaults - typedef typename fold, - detail::push_back_default>::type Defaults; - - /** - * @brief for detecting references adapters - * - * @tparam T - */ - template struct special_decay { - using type = typename std::decay::type; - }; - - /** - * @brief specialization, when type is surrounded by std::ref - * - * @tparam T - */ - template struct special_decay> { - using type = T &; - }; - - template using special_decay_t = typename special_decay::type; - - public: - template - using type = typename detail::set_defaults< - Names, Defaults, TypesVector>::type; - - /// make function for components - template - static type...> - make_components(components &&... comps) { - return type...>( - std::forward(comps)...); - } - - private: - // in this block we check if the defaults are on the last positions in the - // NamesWithDefaults - static const int N = size::value; - static const int DEFAULTS_NR = size::value; - typedef typename remove_n_first::type - DefaultPart; - typedef typename fold, - detail::push_back_default>::type DefaultsTest; - static_assert(std::is_same::value, - "Defaults values could be only on subsequent number of last " - "parameters"); -}; - -} //! data_structures -} //! paal -#endif // PAAL_COMPONENTS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/components/components_join.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/components/components_join.hpp deleted file mode 100644 index 3a66fc9d8..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/components/components_join.hpp +++ /dev/null @@ -1,144 +0,0 @@ -//======================================================================= -// Copyright (c) -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file components_join.hpp - * @brief - * @author Robert Rosolek - * @version 1.0 - * @date 2014-06-15 - */ -#ifndef PAAL_COMPONENTS_JOIN_HPP -#define PAAL_COMPONENTS_JOIN_HPP - -#include "paal/data_structures/components/components.hpp" - -namespace paal { -namespace data_structures { - -namespace detail { - - // assumes that names with defaults are already at the end of - // concatenation of Components1 and Components2 - template - struct concat; - - template - struct concat< - paal::data_structures::components, - paal::data_structures::components - > { - using type = paal::data_structures::components; - }; - -}//!detail - -/** - * @brief Creates new components class with set of names that is the union of - * names from input components classes. Names are arranged so that all names with - * defaults are at the end. - * - * @tparam Components1 - * @tparam Components2 - */ -template -struct join; - -/** - * @brief First components class has only names with defaults, second components class is empty. - * This case cannot be simplified to just "Second components class is empty" to disambiguate - * pattern matching. - * - * @tparam Name1 - * @tparam Default1 - * @tparam ComponentNamesWithDefaults1 - */ -template -struct join, ComponentNamesWithDefaults1...>, components<>> { - using type = components, ComponentNamesWithDefaults1...>; -}; - -/** - * @brief Both components classes have only names with defaults. - * - * @tparam Name1 - * @tparam Default1 - * @tparam ComponentNamesWithDefaults1 - * @tparam Name2 - * @tparam Default2 - * @tparam ComponentNamesWithDefaults2 - */ -template -struct join, ComponentNamesWithDefaults1...>, -components, ComponentNamesWithDefaults2...>> { - using type = components< - NameWithDefault, - ComponentNamesWithDefaults1..., - NameWithDefault, - ComponentNamesWithDefaults2... - >; -}; - -/** - * @brief First components class has only names with defaults. - * - * @tparam Name1 - * @tparam Default1 - * @tparam ComponentNamesWithDefaults1 - * @tparam ComponentName2 - * @tparam ComponentNamesWithDefaults2 - */ -template -struct join, ComponentNamesWithDefaults1...>, -components> { - using type = typename detail::concat< - components, - typename join< - components, ComponentNamesWithDefaults1...>, - components - >::type - >::type; -}; - -/** - * @brief First components class is empty. - * - * @tparam ComponentNamesWithDefaults2 - */ -template -struct join, components> { - using type = components; -}; - -/** - * @brief Normal case. - * - * @tparam ComponentName1 - * @tparam ComponentNamesWithDefaults1 - * @tparam ComponentNamesWithDefaults2 - */ -template < - typename ComponentName1, - typename... ComponentNamesWithDefaults1, - typename... ComponentNamesWithDefaults2 -> -struct join< - components, - components -> { - using type = typename detail::concat< - components, - typename join, components>::type - >::type; -}; - -} //!data_structures -} //!paal - -#endif /* PAAL_COMPONENTS_JOIN_HPP */ diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/components/components_replace.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/components/components_replace.hpp deleted file mode 100644 index 2e0163ce9..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/components/components_replace.hpp +++ /dev/null @@ -1,152 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file components_replace.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-07-24 - */ -#ifndef PAAL_COMPONENTS_REPLACE_HPP -#define PAAL_COMPONENTS_REPLACE_HPP - -#include "paal/data_structures/components/components.hpp" - -namespace paal { -namespace data_structures { - -/** - * @brief Generic version of replaced_type - * - * @tparam Name - * @tparam NewType - * @tparam components - */ -template -class replaced_type; - -/** - * @class replaced_type - * @brief Returns type of components, with Type for Name change - * to NewType - * - * @tparam Name name of the changed type - * @tparam NewType new type for Name - * @tparam Names names list - * @tparam Types old types list - */ -template -class replaced_type> { - static const int p = pos::value; // position to insert - typedef typename replace_at_pos::type TypesReplace; - - public: - typedef detail::components type; -}; - -namespace detail { - -/** - * @brief generic get_types - * - * @tparam Comp - */ -template struct get_types; - -/** - * @class get_types - * @brief gets types list for components class - * - * @tparam Names - * @tparam Types - */ -template -struct get_types> { - typedef Types type; -}; - -/** - * @class TempReplacecomponents - * @brief This class behavies like partial components, - * with type for Name chanche to Type - * - * @tparam Name changed name - * @tparam NewType new type - * @tparam Names all names - * @tparam Types aol types - */ -template -class temp_replaced_components { - typedef detail::components Comps; - typedef typename replaced_type::type Replaced; - typedef typename detail::get_types::type NewTypes; - - public: - temp_replaced_components(const Comps &comps, const NewType &comp) - : m_comps(comps), m_comp(comp) {} - - template - const typename detail::type_for_name::type & - get() const { - return get(detail::wrap_to_constructable()); - } - - private: - - template - auto get(detail::wrap_to_constructable) const->decltype( - std::declval().template get()) { - return m_comps.template get(); - } - - const NewType &get(detail::wrap_to_constructable) const { - return m_comp; - } - - const Comps &m_comps; - const NewType &m_comp; -}; -} - -/** - * @brief This function, for a specific Name, replaces compoonent in the - * components class. - * The comonent should have deifferent type than prevoius component for - * this Name - * (If the type is the same, set member function from components class - * chould be used). - * The function returns components class fo type replaced_type::type. - * The function creates temporary object wich behaves like result - * components - * and creates final object calling special Copy constructor. - * - * @tparam Name - * @tparam NewType - * @tparam Names - * @tparam Types - * @param comp - * @param components - * - * @return - */ -template -typename replaced_type>::type -replace(NewType comp, detail::components components) { - typedef detail::components Comps; - typedef typename replaced_type::type Replaced; - - return Replaced( - detail::temp_replaced_components( - components, comp), - copy_tag()); -} - -} // data_structures -} // paal - -#endif // PAAL_COMPONENTS_REPLACE_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/components/types_vector.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/components/types_vector.hpp deleted file mode 100644 index 5657a5715..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/components/types_vector.hpp +++ /dev/null @@ -1,159 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file types_vector.hpp - * @brief This is implementation of type vector taking advantage of variadic - * template. - * This implementation is NOT c++11 adaptation of mpl. - * It is small set of functon needed for components class purpose. - * It is also less general than mpl. The implementation is create to - * avoid - * some problems with mpl. The c++11 techniques makes it much simpler and - * clearer. - * When boost::mpl11 apears this code should be removed - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-07-18 - */ -#ifndef PAAL_TYPES_VECTOR_HPP -#define PAAL_TYPES_VECTOR_HPP - -#include - -namespace paal { -namespace data_structures { - -/// TypesVector -template struct TypesVector; - -/// Computes size of TypesVector -template struct size; - -/// Computes size of TypesVector -template struct size> { - enum { - value = sizeof...(Args) - }; -}; - -/// Standard fold function implementation -template struct fold; - -/// Standard fold function implementation -template -struct fold, StartValue, Functor> { - typedef typename fold< - TypesVector, - typename Functor::template apply::type, Functor>::type - type; -}; - -/// Standard fold function implementation, empty list case -template -struct fold, StartValue, Functor> { - typedef StartValue type; -}; - -/// push back given val to TypesVector -template struct push_back; - -/// push back given val to TypesVector -template -struct push_back, Val> { - typedef TypesVector type; -}; - -/// gives element on id in TypesVector -template struct at; - -/// gives element on id in TypesVector -template -struct at, std::integral_constant> { - typedef typename at>::type type; -}; - -/// gives element on id in TypesVector, at 0 case -template -struct at, std::integral_constant> { - typedef Arg type; -}; - -/// joins to TypesVectors -template struct join; - -/// joins to TypesVectors, implementation -template -struct join, TypesVector> { - typedef TypesVector type; -}; - -/// removes first n elements from given TypesVector -template struct remove_n_first; - -/// removes first n elements from given TypesVector -template -struct remove_n_first> { - typedef typename remove_n_first>::type type; -}; - -/// two cases below cannot be one becasuse of ambiguity in instancaition -template -struct remove_n_first<0, TypesVector> { - typedef TypesVector type; -}; - -/// removes first n elements from given TypesVector, n=0 case -template <> struct remove_n_first<0, TypesVector<>> { - typedef TypesVector<> type; -}; - -/// returns pos of the element in the TypesVector -template struct pos; - -/// returns pos of Type in given TypeList -template -struct pos> { - enum { - value = pos < Type, - TypesVector> ::value + 1 - }; -}; - -/// returns pos of Type in given TypeList, specialization for case when type is -/// found -template -struct pos> { - enum { - value = 0 - }; -}; - -/// replace element at pos to NewType -template -struct replace_at_pos; - -/// replace type at pos to new type -template -struct replace_at_pos> { - typedef typename join< - TypesVector, - typename replace_at_pos>::type>::type type; -}; - -/// replace type at pos to new type, specialization for pos = 0 -template -struct replace_at_pos<0, NewType, TypesVector> { - typedef TypesVector type; -}; - -} // data_structures -} // paal - -#endif // PAAL_TYPES_VECTOR_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/cycle/cycle_algo.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/cycle/cycle_algo.hpp deleted file mode 100644 index 9b2a577ca..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/cycle/cycle_algo.hpp +++ /dev/null @@ -1,72 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file cycle_algo.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-01 - */ - -#ifndef PAAL_CYCLE_ALGO_HPP -#define PAAL_CYCLE_ALGO_HPP - -#include "paal/data_structures/cycle/cycle_traits.hpp" -#include "paal/data_structures/vertex_to_edge_iterator.hpp" - -#include -#include -#include - -namespace paal { - -/** - * @brief computes length of the cycle - * - * @tparam Metric - * @tparam Cycle - * @param m - * @param cm - * - * @return - */ -template -typename Metric::DistanceType get_cycle_length(const Metric &m, const Cycle &cm) { - typedef typename data_structures::cycle_traits::CycleElem El; - typedef typename Metric::DistanceType Dist; - - auto ebegin = - data_structures::make_vertex_to_edge_iterator(cm.vbegin(), cm.vend()); - auto eend = - data_structures::make_vertex_to_edge_iterator(cm.vend(), cm.vend()); - return std::accumulate(ebegin, eend, Dist(), - [&m](Dist a, const std::pair & p)->Dist{ - return a + m(p.first, p.second); - }); -} - -/// pints cycle to std out -template -void print_cycle(const Cycle &cm, Stream &o, const std::string &endl = "\n") { - auto ebegin = - data_structures::make_vertex_to_edge_iterator(cm.vbegin(), cm.vend()); - auto eend = - data_structures::make_vertex_to_edge_iterator(cm.vend(), cm.vend()); - typedef typename data_structures::cycle_traits::CycleElem El; - - for (auto const &p : - boost::make_iterator_range(ebegin, eend)) { - o << "(" << p.first << "," << p.second << ")->"; - } - - o << endl; -} - -} //! paal - -#endif // PAAL_CYCLE_ALGO_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/cycle/cycle_concept.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/cycle/cycle_concept.hpp deleted file mode 100644 index db61c927f..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/cycle/cycle_concept.hpp +++ /dev/null @@ -1,44 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file cycle_concept.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-28 - */ -#ifndef PAAL_CYCLE_CONCEPT_HPP -#define PAAL_CYCLE_CONCEPT_HPP - -#include "paal/data_structures/cycle/cycle_traits.hpp" - -#include - -namespace paal { -namespace data_structures { -namespace concepts { - -template class Cycle { - public: - BOOST_CONCEPT_USAGE(Cycle) { - ve = x.vbegin(); - ve = x.vbegin(ce); - ve = x.vend(); - x.flip(ce, ce); - } - - private: - X x; - typename cycle_traits::CycleElem ce; - typename cycle_traits::vertex_iterator ve; -}; -} -} -} - -#endif // PAAL_CYCLE_CONCEPT_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/cycle/cycle_start_from_last_change.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/cycle/cycle_start_from_last_change.hpp deleted file mode 100644 index 4ff0565a3..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/cycle/cycle_start_from_last_change.hpp +++ /dev/null @@ -1,98 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file cycle_start_from_last_change.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-26 - */ -#ifndef PAAL_CYCLE_START_FROM_LAST_CHANGE_HPP -#define PAAL_CYCLE_START_FROM_LAST_CHANGE_HPP - -#include "cycle_traits.hpp" - -namespace paal { -namespace data_structures { - -/** - * @brief adopts any cycle to start (vbegin) i place of the last change(flip) - * - * @tparam Cycle - */ -template class cycle_start_from_last_change { - public: - typedef typename cycle_traits::CycleElem CycleElem; - typedef typename cycle_traits::vertex_iterator vertex_iterator; - - /** - * @brief constructor - * - * @param c - */ - cycle_start_from_last_change(Cycle &c) - : m_cycle(c), m_element(*c.vbegin()) {} - - /** - * @brief flip stores place of this flip - * - * @param begin - * @param end - */ - void flip(const CycleElem &begin, const CycleElem &end) { - m_element = end; - m_cycle.flip(begin, end); - } - - /** - * @brief vbegin starts from last flip - * - * @return - */ - vertex_iterator vbegin() const { return m_cycle.vbegin(m_element); } - - /** - * @brief vbegin starts from ce - * - * @param ce - * - * @return - */ - vertex_iterator vbegin(const CycleElem &ce) const { - return m_cycle.vbegin(ce); - } - - /** - * @brief vertices end - * - * @return - */ - vertex_iterator vend() const { return m_cycle.vend(); } - - /** - * @brief cycle getter - * - * @return - */ - Cycle &get_cycle() { return m_cycle; } - - /** - * @brief cycle getter const version - * - * @return - */ - const Cycle &get_cycle() const { return m_cycle; } - - private: - Cycle &m_cycle; - CycleElem m_element; -}; -} -} - -#endif // PAAL_CYCLE_START_FROM_LAST_CHANGE_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/cycle/cycle_traits.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/cycle/cycle_traits.hpp deleted file mode 100644 index d864e9720..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/cycle/cycle_traits.hpp +++ /dev/null @@ -1,36 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file cycle_traits.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-26 - */ -#ifndef PAAL_CYCLE_TRAITS_HPP -#define PAAL_CYCLE_TRAITS_HPP - -#include "paal/utils/type_functions.hpp" - -namespace paal { -namespace data_structures { - -/** - * @brief traits for \ref cycle concept - * - * @tparam Cycle - */ -template struct cycle_traits { - /// Vertex iterator type - typedef decltype(std::declval().vbegin()) vertex_iterator; - typedef typename std::iterator_traits::value_type - CycleElem; -}; -} -} -#endif // PAAL_CYCLE_TRAITS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/cycle/simple_cycle.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/cycle/simple_cycle.hpp deleted file mode 100644 index b8bc3a112..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/cycle/simple_cycle.hpp +++ /dev/null @@ -1,514 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file simple_cycle.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-01 - */ - -#ifndef PAAL_SIMPLE_CYCLE_HPP -#define PAAL_SIMPLE_CYCLE_HPP - -#include "paal/data_structures/bimap.hpp" - -#include -#include -#include - -namespace paal { -namespace data_structures { - -/** - * @class simple_cycle - * @brief This is the simplest implementation of the \ref cycle concept based on - * the list. - * - * @tparam CycleEl - * @tparam IdxT - */ -template class simple_cycle { - public: - using cycle_el_pair = std::pair; - using cycle_element = CycleEl; - - /** - * @brief constructor - * - * @tparam Iter - * @param begin - * @param end - */ - template simple_cycle(Iter begin, Iter end) { - if (begin == end) { - return; - } - - std::size_t size = std::distance(begin, end); - - m_predecessor_map.reserve(size); - m_successor_map.reserve(size); - - IdxT prev_idx = add(*(begin++)); - IdxT firstIdx = prev_idx; - for (; begin != end; ++begin) { - IdxT lastIdx = add(*begin); - link(prev_idx, lastIdx); - prev_idx = lastIdx; - } - link(prev_idx, firstIdx); - } - - /// after flip the order will be reversed, ie it will be from 'end' to - /// 'begin' - void flip(const CycleEl &begin, const CycleEl &end) { - IdxT e1 = to_idx(begin); - IdxT b1 = prev_idx(e1); - IdxT b2 = to_idx(end); - IdxT e2 = next_idx(b2); - - partial_reverse(b2, e1); - link(b1, b2); - link(e1, e2); - } - - /** - * @brief number of elements in the cycle - * - * @return - */ - std::size_t size() const { return m_predecessor_map.size(); } - - /** - * @brief next element in the cycle - * - * @param ce - * - * @return - */ - CycleEl next(const CycleEl &ce) const { - return from_idx(next_idx(to_idx(ce))); - } - - // TODO use iterator_fascade - /** - * @brief iterator over vertices of the cycle - */ - class vertex_iterator - : public std::iterator { - public: - - /** - * @brief constructor - * - * @param cm - * @param ce - */ - vertex_iterator(const simple_cycle &cm, CycleEl ce) - : m_cycle(&cm), m_idx(m_cycle->to_idx(ce)), m_first(m_idx) {} - - /** - * @brief default constructor - */ - vertex_iterator() : m_cycle(NULL), m_idx(-1) {} - - /** - * @brief operator++() - * - * @return - */ - vertex_iterator &operator++() { - m_idx = next_idx(m_idx); - - if (m_idx == m_first) { - m_idx = -1; - } - - return *this; - } - - /** - * @brief operator++(int) - * - * @return - */ - vertex_iterator operator++(int) { - vertex_iterator i(*this); - operator++(); - return i; - } - - /** - * @brief operator!= - * - * @param ei - * - * @return - */ - bool operator!=(vertex_iterator ei) const { return !operator==(ei); } - - /** - * @brief operator== - * - * @param ei - * - * @return - */ - bool operator==(vertex_iterator ei) const { return m_idx == ei.m_idx; } - - /** - * @brief operator->() - * - * @return - */ - const CycleEl *const operator->() const { return &operator*(); } - - /** - * @brief operator*() - * - * @return - */ - const CycleEl &operator*() const { return m_cycle->from_idx(m_idx); } - - private: - - /** - * @brief next element in the cycle - * - * @param i index of the element - * - * @return - */ - IdxT next_idx(IdxT i) const { return m_cycle->next_idx(i); } - - const simple_cycle *m_cycle; - IdxT m_idx; - IdxT m_first; - }; - - using vertices = boost::iterator_range; - - /** - * @brief begin of the vertices range starting at el - * - * @param el - * - * @return - */ - vertex_iterator vbegin(const CycleEl &el) const { - return vertex_iterator(*this, el); - } - - /** - * @brief begin of the vertices range - * - * @return - */ - vertex_iterator vbegin() const { return vbegin(from_idx(0)); } - - /** - * @brief end of the vertices range - * - * @return - */ - vertex_iterator vend() const { return vertex_iterator(); } - - /** - * @brief returns range of vertices starting at el - * - * @param el - * - * @return - */ - vertices get_vertices_range(const CycleEl &el) const { - return vertices(vbegin(el), vend()); - } - - /** - * @brief returns range of vertices - * - * @return - */ - vertices get_vertices_range() const { - return get_vertices_range(from_idx(0)); - } - - // TODO use iterator_fascade - /** - * @brief Iterator on cycle edges - */ - class edge_iterator - : public std::iterator { - public: - /** - * @brief constructor - * - * @param cm - * @param ce - */ - edge_iterator(const simple_cycle &cm, CycleEl ce) - : m_cycle(&cm), m_idx(m_cycle->to_idx(ce)), m_first(m_idx) { - - move_curr(); - } - - /** - * @brief default constructor - */ - edge_iterator() : m_cycle(NULL), m_idx(-1) {} - - /** - * @brief operator++() - * - * @return - */ - edge_iterator &operator++() { - m_idx = next_idx(m_idx); - move_curr(); - - if (m_idx == m_first) { - m_idx = -1; - } - - return *this; - } - - /** - * @brief operator++(int) - * - * @return - */ - edge_iterator operator++(int) { - edge_iterator i(*this); - operator++(); - return i; - } - - /** - * @brief operator!= - * - * @param ei - * - * @return - */ - bool operator!=(edge_iterator ei) const { return !operator==(ei); } - - /** - * @brief operator== - * - * @param ei - * - * @return - */ - bool operator==(edge_iterator ei) const { return m_idx == ei.m_idx; } - - /** - * @brief operator-> - * - * @return - */ - const cycle_el_pair *const operator->() const { return &m_curr; } - - /** - * @brief operator*() - * - * @return - */ - const cycle_el_pair &operator*() const { return m_curr; } - - private: - /** - * @brief move to the next edge - */ - void move_curr() { - m_curr.first = m_cycle->from_idx(m_idx); - m_curr.second = m_cycle->from_idx(next_idx(m_idx)); - } - - /** - * @brief gets next id in the cycle - * - * @param i - * - * @return - */ - IdxT next_idx(IdxT i) const { return m_cycle->next_idx(i); } - - const simple_cycle *m_cycle; - IdxT m_idx; - IdxT m_first; - cycle_el_pair m_curr; - }; - - using edges = boost::iterator_range; - - /** - * @brief returns edges range starting at el - * - * @param el - * - * @return - */ - edges get_edge_range(const CycleEl &el) const { - return edges(edge_iterator(*this, el), edge_iterator()); - } - - /** - * @brief returns edges range - * - * @return - */ - edges get_edge_range() const { - return get_edge_range(from_idx(0)); - } - - protected: - /** - * @brief connects two vertices represented by ids - * - * @param x - * @param y - */ - void link(IdxT x, IdxT y) { - m_successor_map[x] = y; - m_predecessor_map[y] = x; - } - - /** - * @brief after this operation links from x to y are connected i reverse - * order, after this function call cycle is in inconsistent state - * - * @param x - * @param y - */ - void partial_reverse(IdxT x, IdxT y) { - if (x == y) return; - IdxT t_next = prev_idx(x); - IdxT t; - do { - t = t_next; - t_next = prev_idx(t); - link(x, t); - x = t; - } while (t != y); - } - - /** - * @brief vertex to idx - * - * @param ce - * - * @return - */ - IdxT to_idx(const CycleEl &ce) const { return m_cycle_idx.get_idx(ce); } - - /** - * @brief returns next idx in the cycle - * - * @param i - * - * @return - */ - IdxT next_idx(IdxT i) const { return m_successor_map[i]; } - - /** - * @brief returns previous idx - * - * @param i - * - * @return - */ - IdxT prev_idx(IdxT i) const { return m_predecessor_map[i]; } - - /** - * @brief idx to vertex - * - * @param i - * - * @return - */ - const CycleEl &from_idx(IdxT i) const { return m_cycle_idx.get_val(i); } - - /** - * @brief ads new element to cycle data structures - * - * @param el - * - * @return - */ - IdxT add(const CycleEl &el) { - m_predecessor_map.push_back(-1); - m_successor_map.push_back(-1); - return m_cycle_idx.add(el); - } - - /// mapping from elements to indexes - bimap m_cycle_idx; - - using SorsMap = std::vector; - - /// predecessors - SorsMap m_predecessor_map; - /// successors - SorsMap m_successor_map; -}; - -/** - * @brief this class adapts Simple cycle to start from last changed position - * - * @tparam CycleEl - * @tparam IdxT - */ -template -class Simplecycle_start_from_last_change : public simple_cycle { - using Base = simple_cycle; - - public: - /** - * @brief constructor - * - * @tparam Iter - * @param b - * @param e - */ - template - Simplecycle_start_from_last_change(Iter b, Iter e) - : Base(b, e), m_last_id(0) {} - - /** - * @brief flip remembers last changed position - * - * @param begin - * @param end - */ - void flip(const CycleEl &begin, const CycleEl &end) { - IdxT e1 = to_idx(begin); - m_last_id = prev_idx(e1); - Base::flip(begin, end); - } - - /** - * @brief vbegin starts from last flip position - * - * @return - */ - typename Base::vertex_iterator vbegin() const { - return Base::vbegin(from_idx(m_last_id)); - } - - private: - IdxT m_last_id; -}; - -} // data_structures -} // paal - -#endif // PAAL_SIMPLE_CYCLE_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/cycle/splay_cycle.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/cycle/splay_cycle.hpp deleted file mode 100644 index 6fd54ac61..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/cycle/splay_cycle.hpp +++ /dev/null @@ -1,111 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file splay_cycle.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-03-08 - */ -#ifndef PAAL_SPLAY_CYCLE_HPP -#define PAAL_SPLAY_CYCLE_HPP - -#include "paal/data_structures/splay_tree.hpp" -#include "paal/data_structures/bimap.hpp" -#include "paal/data_structures/cycle_iterator.hpp" - -namespace paal { -namespace data_structures { - -/** - * @brief Cycle based on splay tree - * - * @tparam T - */ -template -class splay_cycle { - using SIter = typename splay_tree::iterator; -public: - typedef cycle_iterator VIter; - - splay_cycle() = default; - - /** - * @brief constructor from range - * - * @tparam Iter - * @param begin - * @param end - */ - template - splay_cycle(Iter begin, Iter end) - : m_splay_tree(begin, end), m_size(m_splay_tree.size()) {} - - /** - * @brief vertices begin - * - * @return - */ - VIter vbegin() const { - return VIter(m_splay_tree.begin(), m_splay_tree.begin(), - m_splay_tree.end()); - } - - /** - * @brief vertices begin (from t) - * - * @param t - * - * @return - */ - VIter vbegin(const T &t) const { - std::size_t i = m_splay_tree.get_idx(t); - assert(i != std::size_t(-1)); - return VIter(m_splay_tree.splay(i), m_splay_tree.begin(), m_splay_tree.end()); - } - - /** - * @brief vertices end - * - * @return - */ - VIter vend() const { - auto e = m_splay_tree.end(); - return VIter(e, e, e); - } - - /** - * @brief flips range from begin to end - * - * @param begin - * @param end - */ - void flip(const T &begin, const T &end) { - if (begin == end) { - return; - } - std::size_t b = m_splay_tree.get_idx(begin); - assert(b != std::size_t(-1)); - std::size_t e = m_splay_tree.get_idx(end); - assert(e != std::size_t(-1)); - if (b < e) { - m_splay_tree.reverse(b, e); - } else { - m_splay_tree.reverse(e + 1, b - 1); - m_splay_tree.reverse(0, m_size - 1); - } - } - - private: - splay_tree m_splay_tree; - const std::size_t m_size; -}; - -} //! data_structures -} //! paal -#endif // PAAL_SPLAY_CYCLE_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/cycle_iterator.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/cycle_iterator.hpp deleted file mode 100644 index cd4b443b7..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/cycle_iterator.hpp +++ /dev/null @@ -1,108 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file cycle_iterator.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-03-11 - */ -#ifndef PAAL_CYCLE_ITERATOR_HPP -#define PAAL_CYCLE_ITERATOR_HPP - -#include "paal/utils/type_functions.hpp" - -#include - -namespace paal { -namespace data_structures { - -// could have been done by simple boost::join -// minor TODO class can specialized for randomaccess iterators - -/** - * @class cycle_iterator - * @brief For given collection (begin -> end) and start iterator pointing to an - * element inside - * collection (begin -> ... -> start -> ... ->end), returns new - * collection created by shifting the old collection to start. - * - * example: - * WE are given collection of 5 elemeents and start points to the third - * one: - * 1 -> 2 -> 3 (start) -> 4 -> 5 -> end - * - * The collection - * ( cycle_iterator(start, begin, end), cycle_iterator() ) - * describes collection - * 3 -> 4 -> 5 -> 1 -> 2 -> end - * - * @tparam Iter type of iterator - */ -template -class cycle_iterator : public boost::iterator_facade< - cycle_iterator, typename std::iterator_traits::value_type, - typename boost::forward_traversal_tag, - typename std::iterator_traits::reference, - typename std::iterator_traits::difference_type> { - - typedef std::iterator_traits IT; - typedef typename IT::reference ref; - - public: - - /** - * @brief constructing of cycle_iterator - * - * @param start new start - * @param begin old start - * @param end old end - */ - cycle_iterator(Iter start, Iter begin, Iter end) - : m_curr(start), m_start(start), m_begin(begin), m_end(end), - m_is_end(false) {} - - /** - * @brief Points to end of the collection - */ - cycle_iterator() : m_is_end(true) {} - - private: - friend class boost::iterator_core_access; - /** - * Standard iterator facade implementation: - */ - - void increment() { - ++m_curr; - - if (m_curr == m_end) { - m_curr = m_begin; - } - - if (m_curr == m_start) { - m_is_end = true; - m_curr = m_end; - } - } - - bool equal(cycle_iterator ei) const { - return (m_is_end && ei.m_is_end) || m_curr == ei.m_curr; - } - - ref dereference() const { return *m_curr; } - - Iter m_curr; - Iter m_start; - Iter m_begin; - Iter m_end; - bool m_is_end = false; -}; -} -} -#endif // PAAL_CYCLE_ITERATOR_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/facility_location/facility_location_solution.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/facility_location/facility_location_solution.hpp deleted file mode 100644 index 0a4cf6018..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/facility_location/facility_location_solution.hpp +++ /dev/null @@ -1,133 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file facility_location_solution.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-01 - */ - -#ifndef PAAL_FACILITY_LOCATION_SOLUTION_HPP -#define PAAL_FACILITY_LOCATION_SOLUTION_HPP - -#define BOOST_RESULT_OF_USE_DECLTYPE - -#include "facility_location_solution_traits.hpp" - -#include "paal/data_structures/voronoi/voronoi.hpp" - -#include -#include -#include - -namespace paal { -namespace data_structures { - -/** - * @brief describes solution to facility location - * The initial solution is passed as voronoi, which has to be the model of the - * \ref voronoi concept. - * The generators of the voronoi are the facilities and the vertices are the - * clients. - * - * @tparam FacilityCost - * @tparam VoronoiType - */ -template -class facility_location_solution { -public: - typedef voronoi_traits VT; - typedef typename VT::VertexType VertexType; - typedef typename VT::DistanceType Dist; - typedef typename VT::GeneratorsSet ChosenFacilitiesSet; - typedef std::unordered_set> - UnchosenFacilitiesSet; - -private: - - VoronoiType m_voronoi; - UnchosenFacilitiesSet m_unchosen_facilities; - const FacilityCost &m_fac_costs; -public: - - /** - * @brief constructor - * - * @param voronoi - * @param uf - * @param c - */ - facility_location_solution(VoronoiType voronoi, UnchosenFacilitiesSet uf, - const FacilityCost &c) - : m_voronoi(std::move(voronoi)), m_unchosen_facilities(std::move(uf)), - m_fac_costs(c) {} - - /// returns diff between new cost and old cost - Dist add_facility(VertexType f) { - assert(m_unchosen_facilities.find(f) != m_unchosen_facilities.end()); - m_unchosen_facilities.erase(f); - - return m_fac_costs(f) + m_voronoi.add_generator(f); - } - - /// returns diff between new cost and old cost - Dist rem_facility(VertexType f) { - assert(m_unchosen_facilities.find(f) == m_unchosen_facilities.end()); - m_unchosen_facilities.insert(f); - - return -m_fac_costs(f) + m_voronoi.rem_generator(f); - } - - /// getter for unchosen facilities - const UnchosenFacilitiesSet &get_unchosen_facilities() const { - return m_unchosen_facilities; - } - - /// setter for unchosen facilities - const ChosenFacilitiesSet &get_chosen_facilities() const { - return m_voronoi.get_generators(); - } - - /// gets clients assigned to specific facility - auto get_clients_for_facility(VertexType f) const -> - decltype(m_voronoi.get_vertices_for_generator(f)) - { - return m_voronoi.get_vertices_for_generator(f); - } - - /// gets voronoi - const VoronoiType &get_voronoi() const { return m_voronoi; } - -}; - -/** - * @brief traits for facility_location_solution - * - * @tparam FacilityCost - * @tparam voronoi - */ -template -class facility_location_solution_traits< - facility_location_solution> { - typedef voronoi_traits VT; - typedef facility_location_solution FLS; - - public: - typedef typename VT::VertexType VertexType; - typedef typename VT::DistanceType Dist; - typedef typename VT::GeneratorsSet ChosenFacilitiesSet; - /// unchosen facilities set - typedef puretype(std::declval().get_unchosen_facilities()) - UnchosenFacilitiesSet; -}; - -} // !data_structures -} // !paal - -#endif // PAAL_FACILITY_LOCATION_SOLUTION_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/facility_location/facility_location_solution_traits.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/facility_location/facility_location_solution_traits.hpp deleted file mode 100644 index a1b775544..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/facility_location/facility_location_solution_traits.hpp +++ /dev/null @@ -1,24 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file facility_location_solution_traits.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-03-25 - */ -#ifndef PAAL_FACILITY_LOCATION_SOLUTION_TRAITS_HPP -#define PAAL_FACILITY_LOCATION_SOLUTION_TRAITS_HPP -namespace paal { -namespace data_structures { - -template -class facility_location_solution_traits {}; -} -} -#endif // PAAL_FACILITY_LOCATION_SOLUTION_TRAITS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/facility_location/fl_algo.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/facility_location/fl_algo.hpp deleted file mode 100644 index 723a2049c..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/facility_location/fl_algo.hpp +++ /dev/null @@ -1,110 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file fl_algo.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-15 - */ -#ifndef PAAL_FL_ALGO_HPP -#define PAAL_FL_ALGO_HPP - -#include "paal/data_structures/metric/metric_traits.hpp" -#include "paal/utils/accumulate_functors.hpp" -#include "paal/utils/functors.hpp" - - -namespace paal { -namespace simple_algo { - -/** - * @brief returns cost for capacitated facility location - * - * @tparam Metric - * @tparam FCosts - * @tparam FLSolution - * @param m - * @param fcosts - * @param fls - * - * @return - */ -template -typename data_structures::metric_traits::DistanceType -get_cfl_cost(const Metric &m, const FCosts &fcosts, const FLSolution &fls) { - auto const &ch = fls.get_chosen_facilities(); - - typedef data_structures::metric_traits MT; - typedef typename MT::DistanceType Dist; - - //TODO use sum_functor when appears - Dist d = accumulate_functor(ch, Dist(0), fcosts); - - for (auto && f : ch) { - for (auto && v : fls.get_clients_for_facility(f)) { - d += m(v.first, f) * v.second; - } - } - - return d; -} - -/** - * @brief return cost for facility location - * - * @tparam Metric - * @tparam FCosts - * @tparam FLSolution - * @param m - * @param fcosts - * @param fls - * - * @return - */ -template -typename data_structures::metric_traits::DistanceType -get_fl_cost(const Metric &m, const FCosts &fcosts, const FLSolution &fls) { - auto const &ch = fls.get_chosen_facilities(); - - typedef data_structures::metric_traits MT; - typedef typename MT::DistanceType Dist; - - //TODO use sum_functor when appears - Dist d = accumulate_functor(ch, Dist(0), fcosts); - - for (auto && f : ch) { - for (auto && v : fls.get_clients_for_facility(f)) { - d += m(v, f); - } - } - - return d; -} - -/** - * @brief returns cost for k-median - * - * @tparam Metric - * @tparam FLSolution - * @param m - * @param fls - * - * @return - */ -template -typename data_structures::metric_traits::DistanceType -get_km_cost(const Metric &m, const FLSolution &fls) { - utils::return_zero_functor m_zero_func; - return paal::simple_algo::get_fl_cost(std::move(m), m_zero_func, - std::move(fls)); -} - -} //! simple_algo -} //! paal -#endif // PAAL_FL_ALGO_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/facility_location/k_median_solution.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/facility_location/k_median_solution.hpp deleted file mode 100644 index fb05e45dd..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/facility_location/k_median_solution.hpp +++ /dev/null @@ -1,79 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file k_median_solution.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-03-08 - */ -#ifndef PAAL_K_MEDIAN_SOLUTION_HPP -#define PAAL_K_MEDIAN_SOLUTION_HPP - -#include "paal/utils/functors.hpp" -#include "paal/data_structures/facility_location/facility_location_solution.hpp" - -namespace paal { -namespace data_structures { - -/** - * @brief solution for k median problem - * - * @tparam voronoiType - */ -template -class k_median_solution : public data_structures::facility_location_solution< - utils::return_zero_functor, voronoiType> { - typedef data_structures::facility_location_solution< - utils::return_zero_functor, voronoiType> base; - - public: - /** - * @brief constructor - * - * @param voronoi - * @param uf - * @param k - */ - k_median_solution(voronoiType voronoi, - typename base::UnchosenFacilitiesSet uf, int k) - : base(std::move(voronoi), std::move(uf), m_zero_func) { - assert(int(base::get_chosen_facilities().size()) == k); - } - - private: - utils::return_zero_functor m_zero_func; -}; - -} // data_structures - -namespace data_structures { -/** - * @brief specialization of facility_location_solution_traits - * - * @tparam voronoi - */ -template -class facility_location_solution_traits< - data_structures::k_median_solution> { - typedef voronoi_traits VT; - typedef data_structures::k_median_solution KMS; - - public: - typedef typename VT::VertexType VertexType; - typedef typename VT::DistanceType Dist; - typedef typename VT::GeneratorsSet ChosenFacilitiesSet; - /// unchosen facilities set type - typedef puretype(std::declval().get_unchosen_facilities()) - UnchosenFacilitiesSet; -}; -} - -} // paal - -#endif // PAAL_K_MEDIAN_SOLUTION_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/fraction.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/fraction.hpp deleted file mode 100644 index f62e51091..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/fraction.hpp +++ /dev/null @@ -1,174 +0,0 @@ -//======================================================================= -// Copyright (c) -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file fraction.hpp - * @brief Implementation of fractions, which are used only for comparison purposes, - * and thus they can be used with floating point types as well. - * @author Robert Rosolek - * @version 1.0 - * @date 2013-06-06 - */ - -#ifndef PAAL_FRACTION_HPP -#define PAAL_FRACTION_HPP - -#include "paal/utils/floating.hpp" - -namespace paal { -namespace data_structures { - -/** - * @brief simple class to represent fraction - * - * @tparam A - * @tparam B - */ -template struct fraction { - ///numerator type - using num_type = A; - /// denominator type - using den_type = B; - /// numerator - A num; - /// denominator - B den; - /// constructor - fraction(A num, B den) : num(num), den(den) {} -}; - -/** - * @brief operator< - * - * @tparam A - * @tparam B - * @tparam C - * @tparam D - * @param f1 - * @param f2 - * - * @return - */ -template -bool operator<(const fraction &f1, const fraction &f2) -{ - return f1.num * f2.den < f2.num * f1.den; -} - -/** - * @brief operator> - * - * @tparam A - * @tparam B - * @tparam C - * @tparam D - * @param f1 - * @param f2 - * - * @return - */ -template -bool operator>(const fraction &f1, const fraction &f2) -{ - return f2 < f1; -} - -/** - * @brief operator<= - * - * @tparam A - * @tparam B - * @tparam C - * @tparam D - * @param f1 - * @param f2 - * - * @return - */ -template -bool operator<=(const fraction &f1, const fraction &f2) -{ - return !(f2 < f1); -} - -/** - * @brief operator>= - * - * @tparam A - * @tparam B - * @tparam C - * @tparam D - * @param f1 - * @param f2 - * - * @return - */ -template -bool operator>=(const fraction &f1, const fraction &f2) -{ - return !(f1 < f2); -} - -/** - * @brief operator== - * - * @tparam A - * @tparam B - * @tparam C - * @tparam D - * @tparam EPS - * @param f1 - * @param f2 - * @param eps - * - * @return - */ -template -bool are_fractions_equal(const fraction& f1, const fraction& f2, EPS eps = A{}) -{ - auto x = f1.num * f2.den - f2.num * f1.den; - utils::compare cmp(eps); - return cmp.e(x, 0); -} - -/** - * @brief make function for fraction - * - * @tparam A - * @tparam B - * @param a - * @param b - * - * @return - */ -template -fraction make_fraction(A a, B b) -{ - return fraction(a, b); -} - -/** - * @brief operator* - * - * @tparam A - * @tparam B - * @tparam C - * @param c - * @param f - * - * @return - */ -template -auto operator*(C c, const fraction& f) { - - return make_fraction(c * f.num, f.den); -} - -} //!data_structures -} //!paal - -#endif // PAAL_FRACTION_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/mapped_file.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/mapped_file.hpp deleted file mode 100644 index 5ec3437aa..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/mapped_file.hpp +++ /dev/null @@ -1,182 +0,0 @@ -//======================================================================= -// Copyright (c) 2014 Karol Wegrzycki -// -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= - -/** - * @file mapped_file.hpp - * @brief Interface for using mmaped files with threads. - * @author Karol Wegrzycki - * @version 1.0 - * @date 2014-12-17 - */ - -#ifndef PAAL_MAPPED_FILE_HPP -#define PAAL_MAPPED_FILE_HPP - -#define BOOST_ERROR_CODE_HEADER_ONLY -#define BOOST_SYSTEM_NO_DEPRECATED - -#include "paal/utils/type_functions.hpp" -#include "paal/utils/irange.hpp" - -#include "paal/data_structures/thread_pool.hpp" - -#include - -#include -#include -#include - -namespace paal { -namespace data_structures { - -/** - * @class mapped_file - * @brief data structure that gets new lines for many threads - * - */ -class mapped_file { -private: - - char const * m_current; - char const * m_file_begin; - char const * m_file_end; - char const * m_chunk_suggested_end; - -public: - - /** - * @brief Initializes mmaped file with the specific chunk - so that every - * thread could use different part of the file. - * - * @param file mmap file pointer - * @param file_size size of the mmaped file - * @param chunk_index m_current chunk index - * @param chunk_cnt number of the chunks (usually equal the number of the threads) - */ - mapped_file(char const * file, size_t file_size, unsigned chunk_index, unsigned chunk_cnt): - mapped_file(file, file_size) { - assert(chunk_cnt > 0); - assert(chunk_index < chunk_cnt); - m_current = m_file_begin + file_size * chunk_index / chunk_cnt; - m_chunk_suggested_end = m_file_begin + file_size * (chunk_index + 1) / chunk_cnt; - if (m_current > m_file_begin && *(m_current-1) != '\n') { - get_line(); - } - } - - /** - * @brief Initializes mmaped file. - * - * @param file - mmap file pointer - * @param file_size - size of the mmaped file - */ - mapped_file(char const * file, size_t file_size) : - m_current(file), - m_file_begin(file), - m_file_end(file+file_size), - m_chunk_suggested_end(m_file_end) {} - - /** - * @brief Gets line from the m_current file. Eof and End Of Chunk - * aren't checked here. - * - * @return copied line - */ - std::string get_line() { - auto result_begin = m_current; - auto result_end = std::find(m_current, m_file_end, '\n'); - - m_current = result_end + 1; - return std::string(result_begin, result_end-result_begin); - } - - /** - * @brief is m_currently at the end of file - */ - bool eof() const { - return m_current >= m_file_end; - } - - /** - * @brief is m_currently at the end of requested part of the file - */ - bool end_of_chunk() const { - return m_current >= m_chunk_suggested_end; - } - /** - * @brief Computes functor on every line of the file. It takes care of - * the chunks and end of file. - * - * @tparam Functor - * @param f - Functor that should be computed - * - */ - template - void for_each_line(Functor f) { - while (!eof() && !end_of_chunk()) { - f(get_line()); - } - } - -}; - - -/** - * @brief for_every_line function provides basic functionality for processing - * text files quickly and clearly. Thanks to mmap() functionality it doesn't - * have to seek through file but it loads it to virtual memory instantly and - * uses only ram cache to do that. Furthermore file is split instantly - thanks - * to that it can be processed effectively using threads. Downside of using mmap - * is that this functionality will not work effectively if threads have small jobs - * to be done comparing reading the line charge. - * It's supposed to work with O(threads_count) memory usage but remember - - * RES (resident size) stands for how much memory of this process is loaded in - * physical memory, so file pages loaded in ram cache are added to that value. - * - * @tparam Functor = std::string -> Result - * @param f - Functor that should be evaluated for every line in file - * @param file_path - path to the file for which values should be computed - * @param threads_count - default std::thread::hardware_concurrency() - */ -template -auto for_each_line(Functor f, std::string const & file_path, - unsigned threads_count = std::thread::hardware_concurrency()) { - - using results_t = std::vector>; - - std::vector results(threads_count); - thread_pool threads(threads_count); - - boost::iostreams::mapped_file_source mapped(file_path); - auto data = mapped.data(); - - for (auto i : irange(threads_count)) { - threads.post([&, i]() { - mapped_file file_chunk(data, mapped.size(), i, threads_count); - file_chunk.for_each_line( - [&](std::string const & line) { - results[i].push_back(f(line)); - } - ); - }); - } - - threads.run(); - mapped.close(); - - results_t joined_results; - for (auto const & v: results) { - joined_results.insert(end(joined_results), std::begin(v), std::end(v)); - } - return joined_results; -} - -} //! data_structures -} //! paal -#endif // PAAL_MAPPED_FILE_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/metric/basic_metrics.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/metric/basic_metrics.hpp deleted file mode 100644 index 395bc6bf4..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/metric/basic_metrics.hpp +++ /dev/null @@ -1,169 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file basic_metrics.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-15 - */ -#ifndef PAAL_BASIC_METRICS_HPP -#define PAAL_BASIC_METRICS_HPP - -#include "metric_traits.hpp" - -#include -#include - -#include - -namespace paal { -namespace data_structures { - -/** - * @class rectangle_array_metric - * @brief \ref metric implementation on 2 dimensional array - * distance calls on this metric are valid opnly when x < N and y < M - * (N and M given in the constructor) - * when we know that only certain calls occurs it might be worthwhile to - * use this metric - * - * @tparam DistanceTypeParam - */ -template class rectangle_array_metric { - public: - typedef DistanceTypeParam DistanceType; - typedef int VertexType; - /** - * @brief constructor - * - * @param N - * @param M - */ - rectangle_array_metric(int N = 0, int M = 0) - : m_matrix(boost::extents[N][M]) {} - - /** - * @brief operator(), valid only when v < N and w < M - * - * @param v - * @param w - * - * @return - */ - DistanceType operator()(const VertexType &v, const VertexType &w) const { - return m_matrix[v][w]; - } - - /** - * @brief operator(), valid only when v < N and w < M, nonconst version - * - * @param v - * @param w - * - * @return - */ - DistanceType &operator()(const VertexType &v, const VertexType &w) { - return m_matrix[v][w]; - } - - /** - * @brief constructor from another metric - * - * @tparam OtherMetrics - * @param other - * @param xrange - * @param yrange - */ - template - rectangle_array_metric(const OtherMetrics &other, XRange && xrange - , YRange && yrange) - : rectangle_array_metric(boost::distance(xrange), - boost::distance(yrange)) { - int i = 0; - for (auto && v : xrange) { - int j = 0; - for (auto && w : yrange) { - m_matrix[i][j] = other(v, w); - ++j; - } - ++i; - } - } - - /** - * @brief operator= - * - * @param am - * - * @return - */ - rectangle_array_metric &operator=(const rectangle_array_metric &am) { - auto shape = am.m_matrix.shape(); - std::vector dim(shape, shape + DIM_NR); - m_matrix.resize(dim); - m_matrix = am.m_matrix; - return *this; - } - - ///operator== - bool operator==(const rectangle_array_metric & other) const { - return m_matrix == other.m_matrix; - } - - protected: - /** - * @brief dimention of multi array - */ - static const int DIM_NR = 2; - typedef boost::multi_array matrix_type; - /// matrix with data - matrix_type m_matrix; -}; - - - -/** - * @brief this metric is rectangle_array_metric with N == M. - * - * @tparam DistanceTypeParam - */ -template -class array_metric : public rectangle_array_metric { - typedef rectangle_array_metric base; - - public: - /** - * @brief constructor - * - * @param N - */ - array_metric(int N = 0) : base(N, N) {} - - /** - * @brief returns N - * - * @return - */ - int size() const { return this->m_matrix.size(); } - - /** - * @brief constructor from another metric - * - * @tparam OtherMetrics - * @tparam Items - * @param other - * @param items - */ - template - array_metric(const OtherMetrics &other, Items && items) - : base(other, items, items) {} -}; -} -} -#endif // PAAL_BASIC_METRICS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/metric/euclidean_metric.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/metric/euclidean_metric.hpp deleted file mode 100644 index 148786c9a..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/metric/euclidean_metric.hpp +++ /dev/null @@ -1,44 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// 2014 Piotr Smulewicz -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file euclidean_metric.hpp - * @brief - * @author Piotr Wygocki, Piotr Smulewicz - * @version 1.0 - * @date 2013-10-28 - */ -#ifndef PAAL_EUCLIDEAN_METRIC_HPP -#define PAAL_EUCLIDEAN_METRIC_HPP - -#include "paal/data_structures/metric/metric_traits.hpp" -#include "paal/utils/type_functions.hpp" - -#include - -namespace paal { -namespace data_structures { - - -/// metric with euclidean distance -template struct euclidean_metric { - /// operator() - auto operator()(const std::pair &p1, const std::pair &p2) const - -> decltype(std::hypot(p1.first - p2.first, p1.second - p2.second)) { - return std::hypot(p1.first - p2.first, p1.second - p2.second); - } -}; - -template -struct metric_traits> - : public _metric_traits, std::pair> {}; -} // data_structures - -} // paal - -#endif // PAAL_EUCLIDEAN_METRIC_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/metric/graph_metrics.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/metric/graph_metrics.hpp deleted file mode 100644 index e40eab8b2..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/metric/graph_metrics.hpp +++ /dev/null @@ -1,144 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file graph_metrics.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-01 - */ -#ifndef PAAL_GRAPH_METRICS_HPP -#define PAAL_GRAPH_METRICS_HPP - -#include "basic_metrics.hpp" - -#include -#include -#include - -namespace paal { -namespace data_structures { - -namespace graph_type { -class sparse_tag; -class dense_tag; -class large_tag; -} - -/** - * @brief traits for graph metric - * - * @tparam Graph - */ -template struct graph_metric_traits { - //default graph_type - using graph_tag_type = graph_type::sparse_tag; -}; - - -/// generic strategies of computing metric -template struct graph_metric_filler_impl; - -/** - * @brief specialization for sparse_tag graphs - */ -template <> struct graph_metric_filler_impl { - /** - * @brief fill_matrix function - * - * @tparam Graph - * @tparam ResultMatrix - * @param g - * @param rm - */ - template - void fill_matrix(const Graph &g, ResultMatrix &rm) { - boost::johnson_all_pairs_shortest_paths(g, rm); - } -}; - -/** - * @brief specialization strategies of computing metric for dense_tag graphs - */ -template <> struct graph_metric_filler_impl { - template - /** - * @brief fill_matrixFunction - * - * @param g - * @param rm - */ - void fill_matrix(const Graph &g, ResultMatrix &rm) { - boost::floyd_warshall_all_pairs_shortest_paths(g, rm); - } -}; - -/** - * @class graph_metric - * @brief Adopts boost graph as \ref metric. - * - * @tparam Graph - * @tparam DistanceType - * @tparam GraphType - */ -// GENERIC -// GraphType could be sparse, dense, large ... -template < - typename Graph, typename DistanceType, - typename GraphType = typename graph_metric_traits::graph_tag_type> -struct graph_metric : public array_metric, - public graph_metric_filler_impl< - typename graph_metric_traits::graph_tag_type> { - typedef array_metric GMBase; - typedef graph_metric_filler_impl< - typename graph_metric_traits::graph_tag_type> GMFBase; - - /** - * @brief constructor - * - * @param g - */ - graph_metric(const Graph &g) : GMBase(num_vertices(g)) { - GMFBase::fill_matrix(g, GMBase::m_matrix); - } -}; - -// TODO implement -/// Specialization for large graphs -template -struct graph_metric { - /** - * @brief constructor - * - * @param g - */ - graph_metric(const Graph &g) { assert(false); } -}; - -/// Specialization for adjacency_list -template -struct graph_metric_traits< - boost::adjacency_list> { - typedef graph_type::sparse_tag graph_tag_type; -}; - -/// Specialization for adjacency_matrix -template -struct graph_metric_traits> { - typedef graph_type::dense_tag graph_tag_type; -}; - -} //!data_structures -} //!paal - -#endif // PAAL_GRAPH_METRICS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/metric/metric_on_idx.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/metric/metric_on_idx.hpp deleted file mode 100644 index 7d72d7c08..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/metric/metric_on_idx.hpp +++ /dev/null @@ -1,123 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file metric_on_idx.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-14 - */ -#ifndef PAAL_METRIC_ON_IDX_HPP -#define PAAL_METRIC_ON_IDX_HPP - -#include "paal/data_structures/bimap_traits.hpp" -#include "paal/data_structures/metric/basic_metrics.hpp" - -namespace paal { -namespace data_structures { - - -struct read_values_tag{}; -struct read_indexes_tag{}; - -/** - * @brief This metric keeps inner metric and index. - * Given vertices are reindex and passed to inner metric. - * - * @tparam Metric - * @tparam Bimap - */ -template class metric_on_idx { - Metric m_metric; - Bimap m_idx; - using btraits = bimap_traits::type>; - - auto read(typename btraits::Val v, read_values_tag) const -> decltype(m_idx.get_idx(v)) { - return m_idx.get_idx(v); - } - - auto read(typename btraits::Idx v, read_indexes_tag) const -> decltype(m_idx.get_val(v)) { - return m_idx.get_val(v); - } - - template - auto read(Vertex && v) const -> decltype(this->read(v, Strategy())) { - return read(v, Strategy{}); - } - - public: - - /** - * @brief constructor - * - * @param m - * @param idx - */ - metric_on_idx(Metric m, Bimap idx) - : m_metric(m), m_idx(idx) {} - - /** - * @brief operator() - * - * @param i - * @param j - * - * @return - */ - template - auto operator()(const Vertex & i, const Vertex & j) { - return m_metric(read(i), read(j)); - } - - /** - * @brief operator() const - * - * @param i - * @param j - * - * @return - */ - template - auto operator()(const Vertex & i, const Vertex & j) const { - return m_metric(read(i), read(j)); - } -}; - -/** - * @brief make for metric_on_idx - * - * @tparam Metric - * @tparam Bimap - * @param m - * @param b - * - * @return - */ -template -metric_on_idx make_metric_on_idx(Metric && m, - Bimap && b) { - return metric_on_idx(std::forward(m), std::forward(b)); -} - - -template -struct metric_traits> : -public _metric_traits, - typename bimap_traits::type>::Idx> -{}; - -template -struct metric_traits> : -public _metric_traits, - typename bimap_traits::type>::Val> -{}; - - -} //! data_structures -} //! paal -#endif // PAAL_METRIC_ON_IDX_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/metric/metric_to_bgl.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/metric/metric_to_bgl.hpp deleted file mode 100644 index 991fde8c5..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/metric/metric_to_bgl.hpp +++ /dev/null @@ -1,99 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file metric_to_bgl.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-01 - */ -#ifndef PAAL_METRIC_TO_BGL_HPP -#define PAAL_METRIC_TO_BGL_HPP - -#define BOOST_RESULT_OF_USE_DECLTYPE - -#include "paal/data_structures/bimap.hpp" -#include "paal/data_structures/metric/metric_traits.hpp" -#include "paal/data_structures/metric/metric_on_idx.hpp" -#include "paal/utils/functors.hpp" - -#include -#include - -namespace paal { -namespace data_structures { -// TODO it would be nice to adapt Matrix + something to bgl - -/** - * @brief type of adjacency_matrix, for given metric - * - * @tparam Metric - */ -template struct adjacency_matrix { - using MT = data_structures::metric_traits; - using type = boost::adjacency_matrix< - boost::undirectedS, boost::no_property, - boost::property>; -}; - -/** - * @brief we assume that vertices is sequence - * of values (0, vertices.size()). - * - * @param m - * @param vertices - */ -template -typename adjacency_matrix::type metric_to_bgl(const Metric &m, - Vertices && vertices) { - using Graph = typename adjacency_matrix::type; - const unsigned N = boost::distance(vertices); - using MT = metric_traits; - using Dist = typename MT::DistanceType; - Graph g(N); - for (auto && v : vertices) { - for (auto && w : vertices) { - if (v < w) { - bool succ = add_edge( - v, w, boost::property(m(v, w)), - g).second; - assert(succ); - } - } - } - return g; -} - -/** - * @brief produces graph from metric with index - * - * @tparam Metric - * @tparam Vertices - * @param m - * @param vertices - * @param idx - * - * @return - */ -template -typename adjacency_matrix::type metric_to_bgl_with_index( - const Metric &m, Vertices && vertices, - bimap::type> &idx) { - using MT = data_structures::metric_traits; - using VertexType = typename MT::VertexType; - idx = data_structures::bimap(vertices); - auto idxMetric = data_structures::make_metric_on_idx(m, idx); - auto get_idx = [&](VertexType v) { return idx.get_idx(v); }; - - return metric_to_bgl(idxMetric, vertices | - boost::adaptors::transformed(get_idx)); -} - -} //!data_structures -} //!paal -#endif // PAAL_METRIC_TO_BGL_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/metric/metric_traits.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/metric/metric_traits.hpp deleted file mode 100644 index 4766ebd36..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/metric/metric_traits.hpp +++ /dev/null @@ -1,47 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file metric_traits.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-03-04 - */ -#ifndef PAAL_METRIC_TRAITS_HPP -#define PAAL_METRIC_TRAITS_HPP - -#include "paal/utils/type_functions.hpp" - -#include - -namespace paal { -namespace data_structures { - -/** - * @brief base for metric traits - * - * @tparam Metric - * @tparam _VertexType - */ -template struct _metric_traits { - using VertexType = _VertexType; - /// Distance type - using DistanceType = puretype(std::declval()( - std::declval(), std::declval())); -}; - -/** - * @brief metric traits - * - * @tparam Metric - */ -template -struct metric_traits : public _metric_traits {}; -} -} -#endif // PAAL_METRIC_TRAITS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/object_with_copy.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/object_with_copy.hpp deleted file mode 100644 index 51864dbc9..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/object_with_copy.hpp +++ /dev/null @@ -1,116 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file object_with_copy.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-01 - */ - -#ifndef PAAL_OBJECT_WITH_COPY_HPP -#define PAAL_OBJECT_WITH_COPY_HPP - -namespace paal { -namespace data_structures { - -/** - * @class object_with_copy - * @brief keeps object and its copy. Invoke all the member functions on both: - * object and its copy. - * If you want to invoke member function on both objects, you run the - * object_with_copy::invoke. - * If you want to run member function only on the copy you run - * object_with_copy::invoke_on_copy. - * - * @tparam T type of the contain object - */ -template class object_with_copy { - public: - typedef T ObjectType; - - /** - * @brief constructor - * - * @param t - */ - object_with_copy(T t) : m_obj(std::move(t)), m_copy(m_obj) {} - - /** - * @brief invokes member function on object and copy - * - * @param f - pointer to member function of T - * @param args - arguments for f - * - * @tparam F type of pointer to member function - * @tparam Args... types of member function arguments - * - * @return the same as f - */ - // if you use *. in decltype instead of -> you get - // "sorry, unimplemented: mangling dotstar_expr" :) - template - typename std::result_of::type - invoke(F f, Args... args) { - (m_copy.*(f))(args...); - return (m_obj.*(f))(args...); - } - - /** - * @brief invokes member function on copy - * - * @param f - pointer to member function of T - * @param args - arguments for f - * - * @tparam F type of pointer to member function - * @tparam Args... types of member function arguments - * - * @return the same as f - */ - template - typename std::result_of::type - invoke_on_copy(F f, Args... args) const { - return (m_copy.*(f))(args...); - } - - /** - * @brief easier way for invoking const member functions - * - * @return T* - */ - const T *operator->() const { return &m_obj; } - - /** - * @brief getter for inner object - * - * @return member object - */ - T &get_obj() { return m_obj; } - - /** - * @brief getter for inner object - * - * @return member object - */ - const T &get_obj() const { return m_obj; } - - private: - /** - * @brief Object - */ - T m_obj; - /** - * @brief Copy of the object - */ - mutable T m_copy; -}; - -} // data_structures -} // paal - -#endif // PAAL_OBJECT_WITH_COPY_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/splay_tree.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/splay_tree.hpp deleted file mode 100644 index 825b8c616..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/splay_tree.hpp +++ /dev/null @@ -1,653 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file splay_tree.hpp - * @brief - * @author unknown - * @version 1.0 - * @date 2013-07-24 - */ -#ifndef PAAL_SPLAY_TREE_HPP -#define PAAL_SPLAY_TREE_HPP - -#include -#include -#include - -#include -#include -#include -#include -#include - -namespace paal { -namespace data_structures { - -template class splay_tree; - -namespace detail { -/** - * @param node root of a subtree - * @returns size of subtree - */ -template std::size_t node_size(const NPtr &node) { - return !node ? 0 : node->size(); -} - -template class Node; - -/** - * @brief copy node pointer - * - * @tparam N - * @param node - * - * @return - */ -template -std::unique_ptr> copy_node(std::unique_ptr> const &node) { - // TODO on c++14 change to make_unique - return (!node) ? nullptr : std::unique_ptr>(new Node(*node)); -} - -class forward_tag {}; -class reversed_tag {}; - -inline reversed_tag other(forward_tag) { - return reversed_tag{}; -} - -inline forward_tag other(reversed_tag) { - return forward_tag{}; -} - - -/** - * Node of a splay_tree. - * - * Left/right relaxation should be understood as follows. - * Meaning of left/right field changes iff xor of all bits on the path to the - * root is 1. This enables us to reverse entire subtree in constant time (by - * flipping bit in the root). Normalization is needed to determine which child -* is - * the real left/right. */ -template class Node { - public: - using value_type = V; - using node_type = Node; - using node_ptr = std::unique_ptr; - - /** @param val stored value */ - explicit Node(const value_type &val) - : val_(val), parent_(nullptr), reversed_(false), size_(1) {} - - /// constructor - Node(const Node &n) - : val_(n.val_), left_(copy_node(n.left_)), right_(copy_node(n.right_)), - parent_(nullptr), reversed_(n.reversed_), size_(n.size_) { - if (right_) { - right_->parent_ = this; - } - if (left_) { - left_->parent_ = this; - } - } - - /** @returns parent node */ - node_type *parent() { return parent_; } - - void set_parent(node_type *p) { parent_ = p; } - - /** @brief detaches this node from parent */ - void make_root() { parent_ = nullptr; } - - /** @returns left child pointer */ - node_ptr &left() { - normalize(); - return left_; - } - - /** - * @brief sets left child pointer - * @param node new child - */ - void set_left(node_ptr node) { - set(std::move(node), reversed_tag{}); - } - - /** @returns true right child pointer */ - node_ptr &right() { - normalize(); - return right_; - } - - void set_right(node_ptr node) { - set(std::move(node), forward_tag{}); - } - - /** - * @brief sets child pointer - * @param node new child - */ - template void set(node_ptr node, Direction dir_tag) { - normalize(); - set_internal(std::move(node), dir_tag); - update_size(); - } - - /** - * @brief sets child pointer (no relaxation) - * @param node new child - */ - template - void set_internal(node_ptr node, Direction dir_tag) { - if (node != nullptr) { - node->parent_ = this; - } - child(dir_tag) = std::move(node); - } - - /** @brief recomputes subtree size from sizes of children's subtrees */ - void update_size() { size_ = 1 + node_size(left_) + node_size(right_); } - - node_ptr &child(reversed_tag) { return left(); } - - node_ptr &child(forward_tag) { return right(); } - - template node_type *extreme(Direction dir_tag) { - node_type *node = this; - normalize(); - while (node->child(dir_tag).get() != nullptr) { - node = node->child(dir_tag).get(); - node->normalize(); - } - return node; - } - - - /** @returns next in same tree according to infix order - * WARNING, we assume that path from root to the this node is normalized*/ - template node_type *advance(Direction dir_tag) { - node_type *node = child(dir_tag).get(); - if (node != nullptr) { - return node->extreme(other(dir_tag)); - } else { - node_type *last = nullptr; - node = this; - while (true) { - last = node; - node = node->parent(); - if (node == nullptr) { - return nullptr; - } else if (node->child(other(dir_tag)).get() == last) { - return node; - } - } - } - } - - - /** @returns next in same tree according to infix order - * WARNING, we assume that path from root to the this node is normalized*/ - node_type *next() { - return advance(forward_tag{}); - } - - /** @returns previous in same tree according to infix order */ - node_type *prev() { - return advance(reversed_tag{}); - } - - /** @returns size of subtree */ - std::size_t size() { return size_; } - - /** @brief lazily reverses order in subtree */ - void subtree_reverse() { reversed_ ^= 1; } - - /** @brief locally relaxes tree */ - void normalize() { - if (reversed_) { - std::swap(left_, right_); - if (left_ != nullptr) { - left_->subtree_reverse(); - } - if (right_ != nullptr) { - right_->subtree_reverse(); - } - reversed_ = false; - } - } - - /** @brief relaxes all nodes on path from root to this */ - void normalize_root_path() { - node_type *node = parent(); - if (node != nullptr) { - node->normalize_root_path(); - } - normalize(); - } - - /// value of the node - value_type val_; - - private: - static const bool k_def_left = 0; - node_ptr left_, right_; - node_type *parent_; - bool reversed_; - std::size_t size_; -}; - - -/** - * splay_tree elements iterator. - * - * Traversing order is determined by template argument. - */ -template -class Iterator - : public boost::iterator_facade, Node *, - boost::bidirectional_traversal_tag, V &> { - using ST = splay_tree; - using node_ptr = Node *; - - public: - using value_type = V; - using node_type = Node; - - /** @brief iterator after last element */ - Iterator() : current_(nullptr), rotation_cnt_(0), splay_(nullptr) {} - - /** - * @brief iterator to element in given node - * @param node node storing element pointed by iterator - * @param splay pointer to the splay tree - */ - explicit Iterator(node_ptr node, const ST *splay) - : current_(node), rotation_cnt_(0), splay_(splay) {} - - /** - * @brief copy constructor - * @param other iterator to be copied - */ - Iterator(const Iterator &other) - : current_(other.current_), rotation_cnt_(0), splay_(other.splay_) {} - - private: - friend class boost::iterator_core_access; - friend class splay_tree; - - void normalize() { - if (rotation_cnt_ != splay_->get_rotation_cnt()) { - current_->normalize_root_path(); - rotation_cnt_ = splay_->get_rotation_cnt(); - } - } - - /** @brief increments iterator */ - void increment() { - normalize(); - current_ = current_->advance(direction_tag{}); - } - - /** @brief decrements iterator */ - void decrement() { - normalize(); - current_ = current_->advance(other(direction_tag{})); - } - - /** - * @param other iterator to be compared with - * @returns true iff iterators point to the same node - */ - bool equal(const Iterator &other) const { - return this->current_ == other.current_; - } - - /** @returns reference to pointed element */ - value_type &dereference() const { return current_->val_; } - - /** pointed node */ - node_ptr current_; - std::size_t rotation_cnt_; - const ST *splay_; -}; - -} //!detail - -/** - * Splay trees with logarithmic reversing of any subsequence. - * - * All tree operations are amortized logarithmic time in size of tree, - * each element is indexed by number of smaller elements than this element. - * Note that lookups are also amortized logarithmic in size of tree. Order of - * elements is induced from infix ordering of nodes storing these elements. - */ -template class splay_tree { - detail::forward_tag forward_tag; - detail::reversed_tag reversed_tag; - - public: - using value_type = T; - using node_type = detail::Node; - using node_ptr = typename node_type::node_ptr; - using iterator = detail::Iterator; - using const_iterator = const iterator; - using reverse_iterator = detail::Iterator; - using const_reverse_iterator = const reverse_iterator; - - splay_tree() = default; - - /** - * @brief constructs tree from elements between two iterators - * @param b iterator to first element - * @param e iterator to element after last - */ - template splay_tree(const I b, const I e) { - root_ = build_tree(b, e); - } - - /// constructor - splay_tree(splay_tree &&splay) = default; - - /// operator= - splay_tree &operator=(splay_tree &&splay) = default; - // splay.rotation_cnt_ is not 0 after this move but it doesn't matter; - - /// operator= - splay_tree &operator=(splay_tree &splay) { - if (&splay == this) return *this; - splay_tree sp(splay); - *this = std::move(sp); - return *this; - } - - /// constructor - splay_tree(const splay_tree &splay) : root_(copy_node(splay.root_)) { - auto i = begin(); - auto e = end(); - for (; i != e; ++i) { - t_to_node_.insert(std::make_pair(*i, i.current_)); - } - } - - /** - * @brief creates tree from elements in std::vector - * @param array vector container - */ - template explicit splay_tree(const A &array) { - root_ = build_tree(std::begin(array), std::end(array)); - } - - /** @returns forward iterator to first element in container */ - iterator begin() const { - return (root_ == nullptr) - ? end() - : iterator(root_->extreme(reversed_tag), this); - } - - /** @returns forward iterator to element after last in container */ - iterator end() const { return iterator(); } - - /** @returns reverse iterator to last element in container */ - reverse_iterator rbegin() { - return (root_ == nullptr) - ? rend() - : reverse_iterator(root_->extreme(forward_tag), this); - } - - /** @returns reverse iterator to element before first in container */ - reverse_iterator rend() { return reverse_iterator(); } - - /** @returns number of elements in tree */ - std::size_t size() const { return (root_ == nullptr) ? 0 : root_->size(); } - - /** @returns true iff tree contains no elements */ - bool empty() { return (root_ == nullptr); } - - /** @param i index of referenced element */ - value_type &operator[](std::size_t i) const { return find(i)->val_; } - - /** @param t referenced element */ - std::size_t get_idx(const T &t) const { - node_type *node = t_to_node_.at(t); - if (node == nullptr) { - return -1; - } - node->normalize_root_path(); - - std::size_t i = node_size(node->left()); - while (node != root_.get()) { - if (node->parent()->left().get() == node) { - node = node->parent(); - } else { - node = node->parent(); - i += node_size(node->left()) + 1; - } - } - return i; - } - - /** - * @brief gets rotationCnt() - * - * @return - */ - std::size_t get_rotation_cnt() const { return rotation_cnt_; } - - /** - * @brief splays tree according to splay policy - * @param i index of element to become root - */ - iterator splay(std::size_t i) const { - splay_internal(find(i)); - return iterator(root_.get(), this); - } - - /** - * @brief splits sequence, modified this contains elements {0, ..., i} - * @param i index of last element of this after modification - * @returns tree containing elements {i+1, ...} - */ - splay_tree split_higher(std::size_t i) { return split(i, forward_tag); } - - /** - * @brief splits sequence, modified this contains elements {i, ...} - * @param i index of first element of this after modification - * @returns tree containing elements {0, ..., i-1} - */ - splay_tree split_lower(std::size_t i) { return split(i, reversed_tag); } - - /** - * @brief merges given tree to the right of the biggest element of this - * @param other tree to be merged - */ - void merge_right(splay_tree other) { merge(std::move(other), forward_tag); } - - /** - * @brief merges given tree to the left of the smallest element of this - * @param other tree to be merged - */ - void merge_left(splay_tree other) { merge(std::move(other), reversed_tag); } - - /** - * @brief reverses subsequence of elements with indices in {i, ..., j} - * @param i index of first element of subsequence - * @param j index of last element of subsequence - */ - void reverse(std::size_t i, std::size_t j) { - assert(i <= j); - // split lower - splay_tree ltree = split_lower(i); - // split higher - splay_tree rtree = split_higher(j - i); - // reverse - root_->subtree_reverse(); - // merge - merge_left(std::move(ltree)); - merge_right(std::move(rtree)); - } - - private: - /** @brief creates tree with given node as a root */ - explicit splay_tree(node_ptr root) : root_(std::move(root)) {} - - template - splay_tree split(std::size_t i, Direction dir_tag) { - splay(i); - node_ptr new_root = std::move(root_->child(dir_tag)); - if (new_root != nullptr) { - new_root->make_root(); - } - if (root_ != nullptr) { - root_->update_size(); - } - - return splay_tree(std::move(new_root)); - } - - iterator splay(detail::forward_tag) const { - return splay(root_->size() - 1); - } - - iterator splay(detail::reversed_tag) const { return splay(0); } - - template - void merge(splay_tree other, Direction dir_tag) { - if (other.root_ == nullptr) { - return; - } - splay(dir_tag); - assert(root_->child(dir_tag) == nullptr); - root_->set(std::move(other.root_), dir_tag); - } - - - node_ptr &get_parent(node_ptr &node) const { - assert(node); - node_type *parent = node->parent(); - assert(parent != nullptr); - node_type *granpa = parent->parent(); - if (granpa == nullptr) { - return root_; - } else { - if (granpa->left().get() == parent) { - return granpa->left(); - } else { - assert(granpa->right().get() == parent); - return granpa->right(); - } - } - } - - /** - * @brief splays given node to tree root - * @param node node of a tree to be moved to root - */ - void splay_internal(node_ptr &node) const { - assert(node); - if (node == root_) { - return; - } - node_ptr &parent = get_parent(node); - if (parent == root_) { - if (node == parent->left()) { - rotate(root_, forward_tag); - } else { - assert(node == parent->right()); - rotate(root_, reversed_tag); - } - } else { - node_ptr &grand = get_parent(parent); - if (node == parent->left() && parent == grand->left()) { - rotate(grand, forward_tag); - rotate(grand, forward_tag); - } else if (node == parent->right() && parent == grand->right()) { - rotate(grand, reversed_tag); - rotate(grand, reversed_tag); - } else if (node == parent->right() && parent == grand->left()) { - rotate(parent, reversed_tag); - rotate(grand, forward_tag); - } else if (node == parent->left() && parent == grand->right()) { - rotate(parent, forward_tag); - rotate(grand, reversed_tag); - } - splay_internal(grand); - } - } - - /** - * @brief rotates tree over given node - * @param parent pivot of rotation - */ - template - void rotate(node_ptr &parent, Direction dir_tag) const { - auto other_tag = other(dir_tag); - node_ptr node = std::move(parent->child(other_tag)); - node.swap(parent); - parent->set_parent(node->parent()); - node->set(std::move(parent->child(dir_tag)), - other_tag); // node size is updated here - parent->set(std::move(node), dir_tag); // node size is updated here - } - - - /** - * @brief recursively creates balanced tree from a structure described - * by two random access iterators - * @param b iterator to first element - * @param e iterator to element after last - */ - template node_ptr build_tree(const I b, const I e) { - if (b >= e) { - return nullptr; - } - std::size_t m = (e - b) / 2; - node_ptr node{ new node_type(*(b + m)) }; - bool ret = - t_to_node_.insert(std::make_pair(*(b + m), node.get())).second; - assert(ret); - node->set_left(build_tree(b, b + m)); - node->set_right(build_tree(b + m + 1, e)); - return node; - } - - - /** - * @brief find n-th element in tree (counting from zero) - * @param i number of elements smaller than element to be returned - * @returns pointer to found node or nullptr if doesn't exist - */ - node_ptr &find(std::size_t i) const { - node_ptr *node = &root_; - while (true) { - if (!*node) { - return *node; - } - node_ptr *left = &((*node)->left()); - std::size_t left_size = node_size(*left); - if (left_size == i) { - return *node; - } else if (left_size > i) { - node = left; - } else { - i -= left_size + 1; - node = &(*node)->right(); - } - } - } - - /** root node of a tree */ - std::size_t rotation_cnt_ = 0; // to keep iterators consistent with tree - mutable node_ptr root_; - std::unordered_map t_to_node_; -}; -} -} - -#endif // PAAL_SPLAY_TREE_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/stack.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/stack.hpp deleted file mode 100644 index 6173ae8e0..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/stack.hpp +++ /dev/null @@ -1,42 +0,0 @@ -/** - * @file stack.hpp - * @brief stack that doesn't call destructor on pop - * @author Piotr Smulewicz, Robert Rosołek - * @version 1.0 - * @date 2014-08-12 - */ -#ifndef PAAL_STACK_HPP -#define PAAL_STACK_HPP - -#include - -namespace paal { -namespace data_structures { - -/// Stack -template class stack { - std::vector m_v; - std::size_t m_size; - public: - /// constructor - stack() : m_v(std::vector()), m_size(0) {} - /// push - void push() { - if (++m_size > m_v.size()) m_v.resize(m_size); - } - /// pop doesn't call destructor - void pop() { --m_size; } - /// top - const T &top() const { return m_v[m_size - 1]; } - /// top - T &top() { return m_v[m_size - 1]; } - /// empty - bool empty() const { return m_size == 0; } - /// size - std::size_t size() const { return m_size; } -}; - -} // !data_structures -} // !paal - -#endif /* PAAL_STACK_HPP */ diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/subset_iterator.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/subset_iterator.hpp deleted file mode 100644 index 02fd8a563..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/subset_iterator.hpp +++ /dev/null @@ -1,352 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file subset_iterator.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-01 - */ - -#include "paal/utils/type_functions.hpp" -#include "paal/utils/make_tuple.hpp" - -#include -#include -#include - -#ifndef PAAL_SUBSET_ITERATOR_HPP -#define PAAL_SUBSET_ITERATOR_HPP - -namespace paal { -namespace data_structures { -/** - * @tparam Iterator - * @tparam k - */ -template -class subsets_iterator_engine - : private subsets_iterator_engine { - protected: - /** - * @brief current being - * - * @return - */ - Iterator get_begin() { return m_begin; } - - /** - * @brief end is stored in the subsets_iterator_engine<0> - * - * @return - */ - Iterator get_end() { return base::get_end(); } - - /** - * @brief sets all iterators to m_end - */ - void set_to_end() { - m_begin = get_end(); - base::set_to_end(); - } - - public: - using base = subsets_iterator_engine; - - /** - * @brief constructor - * - * @param begin - * @param end - */ - subsets_iterator_engine(Iterator begin, Iterator end) : base(begin, end) { - if (k == 1) { - m_begin = begin; - } else { - auto baseBegin = base::get_begin(); - if (baseBegin != end) { - m_begin = ++baseBegin; - if (m_begin == end) { - // when we are at the end all iterators are set to m_end - base::set_to_end(); - } - } else { - // when we are at the end all iterators are set to m_end - set_to_end(); - } - } - } - - subsets_iterator_engine() = default; - - /** - * @brief sets next configuration of iterators, pointing to next subset - * - * @return - */ - bool next() { - ++m_begin; - while (m_begin == get_end()) { - if (base::next()) { - m_begin = base::get_begin(); - if (m_begin == get_end()) { - // when we are at the end all iterators are set to m_end - base::set_to_end(); - return false; - } - ++m_begin; - } else { - return false; - } - } - return true; - } - - // TODO copy paste (combine_iterator) - /** - * @brief calls arbitrary function f on (*m_curr)... - * - * @tparam F - * @tparam Args - * @param f - * @param args - * - * @return - */ - template - auto call(F f, Args &&... args) const->decltype(std::declval().call( - std::move(f), std::forward(args)..., *std::declval())) { - return base::call(std::move(f), *m_begin, std::forward(args)...); - } - - /** - * @brief operator== - * - * @param left - * @param right - * - * @return - */ - friend bool operator==(const subsets_iterator_engine &left, - const subsets_iterator_engine &right) { - return left.m_begin == right.m_begin && - static_cast(left) == static_cast(right); - } - - private: - Iterator m_begin; -}; - -/** - * @brief specialization for k==0 for boundary cases. - * This class stores iterator pointing to the end of the input collection - * - * @tparam Iterator - */ -template class subsets_iterator_engine<0, Iterator> { - protected: - /** - * @brief constructor - * - * @param begin - * @param end - */ - subsets_iterator_engine(Iterator begin, Iterator end) : m_end(end) {} - - subsets_iterator_engine() = default; - - /** - * @brief get_begin, fake returns m_end - * - * @return - */ - Iterator get_begin() { return m_end; } - - /** - * @brief get_end, returns end of the input collection - * - * @return - */ - Iterator get_end() { return m_end; } - - /** - * @brief boundary case, does nothing - */ - void set_to_end() {} - - public: - /** - * @brief boundary case, does nothing - * - * @return - */ - bool next() const { return false; } - - /** - * @brief actually calls f for given arguments - * - * @tparam F - * @tparam Args - * @param f - * @param args - * - * @return - */ - template - auto call(F f, - Args &&... args) const->decltype(f(std::forward(args)...)) { - return f(std::forward(args)...); - } - - /** - * @brief operator==, always true - * - * @param left - * @param right - * - * @return - */ - friend bool operator==(const subsets_iterator_engine &left, - const subsets_iterator_engine &right) { - return true; - } - - private: - Iterator m_end; -}; - -/** - * @brief make for subsets_iterator_engine - * - * @tparam k - * @tparam Iterator - * @param b - * @param e - * - * @return - */ -template -subsets_iterator_engine make_subsets_iterator_engine(Iterator b, - Iterator e) { - return subsets_iterator_engine(b, e); -} - -/** - * @class subsets_iterator - * @brief Iterator to all k-subsets of given collection. - * - * @tparam Iterator - * @tparam k - * @tparam Joiner - */ -template -class subsets_iterator : public boost::iterator_facade< - subsets_iterator, - puretype( - (subsets_iterator_engine().call(std::declval()))) - // , typename - // std::iterator_traits::iterator_category //TODO above forward - // tags are not yet implemented - , - typename boost::forward_traversal_tag, - decltype( - subsets_iterator_engine().call(std::declval()))> { - public: - /** - * @brief constructor - * - * @param begin - * @param end - * @param joiner - */ - subsets_iterator(Iterator begin, Iterator end, Joiner joiner = Joiner{}) - : m_joiner(joiner), m_iterator_engine(begin, end) {} - - /** - * @brief default constructor represents end of the range - */ - subsets_iterator() = default; - - private: - - /** - * @brief reference type of the iterator - */ - using ref = decltype( - subsets_iterator_engine().call(std::declval())); - - friend class boost::iterator_core_access; - - /** - * @brief increments iterator - */ - void increment() { m_iterator_engine.next(); } - - /** - * @brief equal function - * - * @param other - * - * @return - */ - bool equal(subsets_iterator const &other) const { - return this->m_iterator_engine == other.m_iterator_engine; - } - - /** - * @brief dereference - * - * @return - */ - ref dereference() const { return m_iterator_engine.call(m_joiner); } - // TODO add random access support - - Joiner m_joiner; - subsets_iterator_engine m_iterator_engine; -}; - -/** - * @brief make for subsets_iterator - * - * @tparam Iterator - * @tparam k - * @tparam Joiner - * @param b - * @param e - * @param joiner - * - * @return - */ -//TODO change name to subset_range() -template -boost::iterator_range> -make_subsets_iterator_range(Iterator b, Iterator e, Joiner joiner = Joiner{}) { - typedef subsets_iterator SI; - return boost::make_iterator_range(SI(b, e, joiner), SI(e, e, joiner)); -} - -/** - * @brief - * - * @tparam k - * @tparam Range - * @tparam Joiner - * @param range - * @param joiner - * - * @return - */ -template -auto make_subsets_iterator_range(const Range & range, Joiner joiner = Joiner{}) { - return make_subsets_iterator_range(std::begin(range), std::end(range), std::move(joiner)); -} - -} // data_structures -} // paal - -#endif // PAAL_SUBSET_ITERATOR_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/tabu_list/tabu_list.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/tabu_list/tabu_list.hpp deleted file mode 100644 index 4c2308a36..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/tabu_list/tabu_list.hpp +++ /dev/null @@ -1,135 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file tabu_list.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2014-01-09 - */ -#ifndef PAAL_TABU_LIST_HPP -#define PAAL_TABU_LIST_HPP - -#include - -#include -#include - -namespace paal { -namespace data_structures { - -/** - * @brief This Tabu list remember some number of last moves - * - * @tparam Move - */ -template struct tabu_list_remember_move { - - /** - * @brief tabu_list_remember_move constructor - * - * @param size - */ - tabu_list_remember_move(unsigned size) - : m_size(size), m_forbiden_moves_set(size) {} - - /** - * @brief is tabu member function - * - * @tparam Solution - * @param move - * - * @return - */ - template - bool is_tabu(const Solution &, Move move) const { - return is_tabu(std::move(move)); - } - - /** - * @brief accept member function - * - * @tparam Solution - * @param move - */ - template void accept(const Solution &, Move move) { - assert(!is_tabu(move)); - m_forbiden_moves_set.insert(move); - if (m_forbiden_moves_fifo.size() == m_size) { - m_forbiden_moves_set.erase(m_forbiden_moves_fifo.front()); - m_forbiden_moves_fifo.pop_front(); - } - m_forbiden_moves_fifo.push_back(std::move(move)); - } - - private: - /** - * @brief is tabu does not depend on Solution here - * - * @param move - * - * @return - */ - bool is_tabu(const Move &move) const { - return m_forbiden_moves_set.find(move) != m_forbiden_moves_set.end(); - } - - unsigned m_size; - std::unordered_set> m_forbiden_moves_set; - std::deque m_forbiden_moves_fifo; -}; - -/** - * @brief This Tabu list remember both current solution and move - * It is implemented as tabu_list_remember_move> - * with nullptr passed as dummy solution - * - * @tparam Solution - * @tparam Move - */ -template -class tabu_list_remember_solution_and_move - : tabu_list_remember_move> { - typedef tabu_list_remember_move> base; - - public: - /** - * @brief constructor - * - * @param size - */ - tabu_list_remember_solution_and_move(unsigned size) : base(size) {} - - /** - * @brief is_tabu redirects work to base class - * - * @param s - * @param move - * - * @return - */ - bool is_tabu(Solution s, Move move) const { - return base::is_tabu(nullptr, - std::make_pair(std::move(s), std::move(move))); - } - - /** - * @brief accept redirects work to base class - * - * @param s - * @param move - */ - void accept(Solution &s, const Move &move) { - base::accept(nullptr, std::make_pair(std::move(s), std::move(move))); - } -}; - -} //!data_structures -} //!paal - -#endif // PAAL_TABU_LIST_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/thread_pool.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/thread_pool.hpp deleted file mode 100644 index 8cb08c73f..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/thread_pool.hpp +++ /dev/null @@ -1,60 +0,0 @@ -/** - * @file thread_pool.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2014-12-03 - */ -#ifndef PAAL_THREAD_POOL_HPP -#define PAAL_THREAD_POOL_HPP - -#define BOOST_ERROR_CODE_HEADER_ONLY -#define BOOST_SYSTEM_NO_DEPRECATED - -#include - -#include -#include -#include -#include -#include - -namespace paal { - -///simple threadpool, class uses also current thread! -class thread_pool { - boost::asio::io_service m_io_service; - std::vector m_threadpool; - std::size_t m_threads_besides_current; - -public: - ///constructor - thread_pool(std::size_t size) : m_threads_besides_current(size - 1) { - assert(size > 0); - m_threadpool.reserve(m_threads_besides_current); - } - - ///post new task - template - void post(Functor f) { - //TODO when there is only one thread in thread pool task could be run instantly - m_io_service.post(std::move(f)); - } - - ///run all posted tasks (blocking) - void run() { - auto io_run = [&](){m_io_service.run();}; - for(std::size_t i = 0; i < m_threads_besides_current; ++i) { - m_threadpool.emplace_back(io_run); - } - // if threads_count == 1, we run all tasks in current thread - io_run(); - - for (auto & thread : m_threadpool) thread.join(); - } -}; - - -}//!paal - -#endif /* PAAL_THREAD_POOL_HPP */ diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/ublas_traits.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/ublas_traits.hpp deleted file mode 100644 index ad6a5499c..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/ublas_traits.hpp +++ /dev/null @@ -1,62 +0,0 @@ -//======================================================================= -// Copyright (c) -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file ublas_traits.hpp - * @brief - * @author Tomasz Strozak - * @version 1.0 - * @date 2015-07-09 - */ -#ifndef PAAL_UBLAS_TRAITS_HPP -#define PAAL_UBLAS_TRAITS_HPP - -#include "paal/utils/type_functions.hpp" - -#include -#include -#include -#include -#include -#include - -namespace paal { -namespace data_structures { - -template -struct is_sparse_row : public std::false_type {}; - -template -struct is_sparse_row::container_type::storage_category, - boost::numeric::ublas::sparse_tag>::value>::type> : - public std::true_type {}; - -/// Traits class for matrix related types. -template -struct matrix_type_traits {}; - -/// Specialization matrix_type_traits for ublas matrix. -template -struct matrix_type_traits> { - using coordinate_t = T; - using matrix_row_t = boost::numeric::ublas::matrix_row>; - using vector_t = boost::numeric::ublas::vector; - using matrix_column_major_t = boost::numeric::ublas::matrix; - using matrix_diagonal_t = boost::numeric::ublas::banded_matrix; - /// Return the number of rows of the matrix - static std::size_t num_rows (boost::numeric::ublas::matrix &m) { return m.size1(); } - /// Return the number of columns of the matrix - static std::size_t num_columns (boost::numeric::ublas::matrix &m) { return m.size2(); } -}; - -} // data_structures -} // paal - -#endif // PAAL_UBLAS_TRAITS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/vertex_to_edge_iterator.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/vertex_to_edge_iterator.hpp deleted file mode 100644 index 6a2a9576f..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/vertex_to_edge_iterator.hpp +++ /dev/null @@ -1,170 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file vertex_to_edge_iterator.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-20 - */ -#ifndef PAAL_VERTEX_TO_EDGE_ITERATOR_HPP -#define PAAL_VERTEX_TO_EDGE_ITERATOR_HPP - -#include "paal/utils/type_functions.hpp" - -namespace paal { -namespace data_structures { - -// TODO use boost:::iterator_fascade -/** - * @class vertex_to_edge_iterator - * @brief transforms collection to collection of pairs consecutive elements of - * the input collection. - * The last element and the first element are considered consecutive. - * - * @tparam vertex_iterator - */ -template class vertex_to_edge_iterator { - public: - typedef typename std::iterator_traits::value_type Vertex; - typedef std::pair Edge; - - typedef std::forward_iterator_tag iterator_category; - typedef Edge value_type; - typedef ptrdiff_t difference_type; - typedef Edge *pointer; - typedef const Edge &reference; - - /** - * @brief constructor - * - * @param b - * @param e - */ - vertex_to_edge_iterator(vertex_iterator b, vertex_iterator e) - : m_idx(b), m_begin(b), m_end(e) { - move_curr(); - } - - vertex_to_edge_iterator() = default; - - /** - * @brief operator++ post increment - * - * @return - */ - vertex_to_edge_iterator &operator++() { - ++m_idx; - move_curr(); - - return *this; - } - - /** - * @brief operator++ pre increment - * - * @return - */ - vertex_to_edge_iterator operator++(int) { - vertex_to_edge_iterator i(*this); - operator++(); - return i; - } - - /** - * @brief operator != - * - * @param ei - * - * @return - */ - bool operator!=(vertex_to_edge_iterator ei) const { - return !operator==(ei); - } - - /** - * @brief operator== - * - * @param ei - * - * @return - */ - bool operator==(vertex_to_edge_iterator ei) const { - return m_idx == ei.m_idx; - } - - /** - * @brief operator-> - * - * @return - */ - const Edge *const operator->() const { return &m_curr; } - - /** - * @brief operator* - * - * @return - */ - const Edge &operator*() const { return m_curr; } - - private: - /** - * @brief moves iterators to next position - */ - void move_curr() { - if (m_idx != m_end) { - m_curr.first = *m_idx; - vertex_iterator next = m_idx; - ++next; - if (next == m_end) { - m_curr.second = *m_begin; - } else { - m_curr.second = *next; - } - } - } - - vertex_iterator m_idx; - vertex_iterator m_begin; - vertex_iterator m_end; - Edge m_curr; -}; - -/** - * @brief make for vertex_to_edge_iterator - * - * @tparam vertex_iterator - * @param b - * @param e - * - * @return - */ -template -vertex_to_edge_iterator -make_vertex_to_edge_iterator(vertex_iterator b, vertex_iterator e) { - return vertex_to_edge_iterator(b, e); -} - -/** - * @brief make for vertex_to_edge_iterator form Vertex iterator pair - * - * @tparam vertex_iterator - * @param r - * - * @return - */ -template -vertex_to_edge_iterator -make_vertex_to_edge_iterator(std::pair r) { - return vertex_to_edge_iterator(r.first, r.second); -} - -} // data_structures -} // paal - -#endif // PAAL_VERTEX_TO_EDGE_ITERATOR_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/voronoi/capacitated_voronoi.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/voronoi/capacitated_voronoi.hpp deleted file mode 100644 index c401f908a..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/voronoi/capacitated_voronoi.hpp +++ /dev/null @@ -1,545 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file capacitated_voronoi.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-03-20 - */ -#ifndef PAAL_CAPACITATED_VORONOI_HPP -#define PAAL_CAPACITATED_VORONOI_HPP - -#include "paal/data_structures/metric/metric_traits.hpp" -#include "paal/utils/irange.hpp" - -#include -#include -#include -#include -#include -#include - -#include - -namespace paal { -namespace data_structures { - -/** - * @class capacitated_voronoi - * @brief This class is assigning vertices demands to capacitated generators in - * such a way that the total cost is minimized. - * The solution is based on the min cost max flow algorithm. - * - * @tparam Metric - * @tparam GeneratorsCapacieties is a functor which for each Generator returns - * its capacity . - * @tparam VerticesDemands is a functor which for each vertex returns its - * demand. - */ -template -class capacitated_voronoi { - public: - /** - * @brief this class store as a distance: - * - sum of distances of assigned vertices to its generators - * - number of vertices without generator - * in the optimum all vertices should be assigned. - */ - class Dist { - public: - typedef typename metric_traits::DistanceType DistI; - Dist() = default; - - /** - * @brief constructor - * - * @param real - * @param distToFullAssign - */ - Dist(DistI real, DistI distToFullAssign) - : m_real_dist(real), m_dist_to_full_assignment(distToFullAssign) {} - - /** - * @brief operator- - * - * @param d - */ - Dist operator-(Dist d) { - return Dist( - m_real_dist - d.m_real_dist, - m_dist_to_full_assignment - d.m_dist_to_full_assignment); - } - - /** - * @brief how many vertices are not covered - */ - DistI get_dist_to_full_assignment() const { - return m_dist_to_full_assignment; - } - - /** - * @brief sum of distances from vertices to facilities - */ - DistI get_real_dist() const { return m_real_dist; } - - /** - * @brief operator== - * - * @param d - */ - bool operator==(Dist d) const { - return m_real_dist == d.m_real_dist && - m_dist_to_full_assignment == d.m_dist_to_full_assignment; - } - - /** - * @brief operator> - * - * @param d - */ - bool operator>(Dist d) const { - if (m_dist_to_full_assignment > d.m_dist_to_full_assignment) { - return true; - } else if (m_dist_to_full_assignment < d.m_dist_to_full_assignment) { - return false; - } - return m_real_dist > d.m_real_dist; - } - - /** - * @brief operator+= - * - * @param d - */ - const Dist &operator+=(Dist d) { - m_real_dist += d.m_real_dist; - m_dist_to_full_assignment += d.m_dist_to_full_assignment; - return *this; - } - - /** - * @brief operator+ - * - * @param d - */ - Dist operator+(Dist d) { - Dist ret(d); - ret += *this; - return ret; - } - - /** - * @brief unary -operator - * - * @return - */ - Dist operator-() { - return Dist(-m_real_dist, -m_dist_to_full_assignment); - } - - /** - * @brief Dist + scalar (interpreted as real distance) - * - * @param di - * @param d - * - * @return - */ - friend Dist operator+(DistI di, Dist d) { - return Dist(d.m_real_dist + di, d.m_dist_to_full_assignment); - } - - /** - * @brief operator<< - * - * @tparam Stream - * @param s - * @param d - * - * @return - */ - template - friend Stream &operator<<(Stream &s, Dist d) { - return s << d.m_dist_to_full_assignment << " " << d.m_real_dist; - } - - private: - DistI m_real_dist; - DistI m_dist_to_full_assignment; - }; - typedef typename Dist::DistI DistI; - typedef typename metric_traits::VertexType VertexType; - typedef std::set Generators; - typedef std::vector Vertices; - - private: - typedef boost::adjacency_list< - boost::listS, boost::vecS, boost::bidirectionalS, - boost::property, - boost::property< - boost::edge_capacity_t, DistI, - boost::property< - boost::edge_residual_capacity_t, DistI, - boost::property::edge_descriptor, - boost::property>>>> - Graph; - typedef boost::graph_traits GTraits; - typedef typename GTraits::edge_descriptor ED; - typedef typename GTraits::edge_iterator EI; - typedef typename GTraits::in_edge_iterator IEI; - typedef typename GTraits::vertex_descriptor VD; - typedef typename boost::property_map< - Graph, boost::edge_residual_capacity_t>::type ResidualCapacity; - typedef typename std::unordered_map> - VertexToGraphVertex; - - /** - * @brief functor transforming edge descriptor into pair : - * (reindexed source, flow on the edge) - */ - struct Trans { - std::pair operator()(const ED &e) const { - return std::make_pair(m_v->get_vertex_for_edge(e), - m_v->get_flow_on_edge(e)); - } - const capacitated_voronoi *m_v; - }; - - typedef boost::transform_iterator> - VForGenerator; - - public: - - /** - * @brief constructor - * - * @param gen - * @param ver - * @param m - * @param gc - * @param vd - * @param costOfNoGenerator - */ - capacitated_voronoi(const Generators &gen, Vertices ver, const Metric &m, - const GeneratorsCapacieties &gc, - const VerticesDemands &vd, - DistI costOfNoGenerator = - std::numeric_limits::max()) - : m_s(add_vertex_to_graph()), m_t(add_vertex_to_graph()), - m_vertices(std::move(ver)), m_metric(m), m_generators_cap(gc), - m_first_generator_id(m_vertices.size() + 2), - m_cost_of_no_generator(costOfNoGenerator) { - for (VertexType v : m_vertices) { - VD vGraph = add_vertex_to_graph(v); - m_v_to_graph_v.insert(std::make_pair(v, vGraph)); - add_edge_to_graph(m_s, vGraph, 0, vd(v)); - } - for (VertexType g : gen) { - add_generator(g); - } - } - - /** - * @brief copy constructor is not default because of rev graph property - * - * @param other - */ - capacitated_voronoi(const capacitated_voronoi &other) - : m_dist(other.m_dist), m_dist_prev(other.m_dist_prev), - m_pred(other.m_pred), m_g(other.m_g), m_s(other.m_s), m_t(other.m_t), - m_generators(other.m_generators), m_vertices(other.m_vertices), - m_metric(other.m_metric), m_generators_cap(other.m_generators_cap), - m_first_generator_id(other.m_first_generator_id), - m_cost_of_no_generator(other.m_cost_of_no_generator), - m_v_to_graph_v(other.m_v_to_graph_v), - m_g_to_graph_v(other.m_g_to_graph_v) { - auto rev = get(boost::edge_reverse, m_g); - for (auto e : boost::as_array(edges(m_g))) { - auto eb = edge(target(e, m_g), source(e, m_g), m_g); - assert(eb.second); - rev[e] = eb.first; - } - } - - /// returns diff between new cost and old cost - Dist add_generator(VertexType gen) { - Dist costStart = get_cost(); - m_generators.insert(gen); - VD genGraph = add_vertex_to_graph(gen); - m_g_to_graph_v.insert(std::make_pair(gen, genGraph)); - for (const std::pair &v : m_v_to_graph_v) { - add_edge_to_graph(v.second, genGraph, m_metric(v.first, gen), - std::numeric_limits::max()); - } - - add_edge_to_graph(genGraph, m_t, 0, m_generators_cap(gen)); - - boost::successive_shortest_path_nonnegative_weights( - m_g, m_s, m_t, predecessor_map(&m_pred[0]).distance_map(&m_dist[0]) - .distance_map2(&m_dist_prev[0])); - - return get_cost() - costStart; - } - - /// returns diff between new cost and old cost - Dist rem_generator(VertexType gen) { - Dist costStart = get_cost(); - m_generators.erase(gen); - auto genGraph = m_g_to_graph_v.at(gen); - auto rev = get(boost::edge_reverse, m_g); - auto residual_capacity = get(boost::edge_residual_capacity, m_g); - - // removing flow from the net - for (const ED &e : - boost::as_array(in_edges(genGraph, m_g))) { - bool b; - VD v = source(e, m_g); - if (v == m_t) { - continue; - } - DistI cap = residual_capacity[rev[e]]; - ED edgeFromStart; - std::tie(edgeFromStart, b) = edge(m_s, v, m_g); - assert(b); - residual_capacity[edgeFromStart] += cap; - residual_capacity[rev[edgeFromStart]] -= cap; - } - clear_vertex(genGraph, m_g); - assert(!edge(m_t, genGraph, m_g).second); - assert(!edge(genGraph, m_t, m_g).second); - remove_vertex(genGraph, m_g); - restore_index(); - - boost::successive_shortest_path_nonnegative_weights( - m_g, m_s, m_t, predecessor_map(&m_pred[0]).distance_map(&m_dist[0]) - .distance_map2(&m_dist_prev[0])); - - return get_cost() - costStart; - } - - /** - * @brief getter for generators - * - * @return - */ - const Generators &get_generators() const { return m_generators; } - - /** - * @brief getter for vertices - * - * @return - */ - const Vertices &get_vertices() const { return m_vertices; } - - /** - * @brief member function for getting assignment, for generator. - * - * @return returns range of pairs; the first element of pair is the Vertex - * and the second element is the flow from this vertex to given generator - * - */ - boost::iterator_range - get_vertices_for_generator(VertexType gen) const { - IEI ei, end; - VD v = m_g_to_graph_v.at(gen); - auto r = in_edges(v, m_g); - Trans t; - t.m_v = this; - return boost::make_iterator_range(VForGenerator(r.first, t), - VForGenerator(r.second, t)); - } - - /** - * @brief get total cost of the assignment - * - * @return - */ - Dist get_cost() const { - auto residual_capacity = get(boost::edge_residual_capacity, m_g); - DistI resCap = - boost::accumulate(out_edges(m_s, m_g), DistI(0), [&](DistI d, const ED & e) { - return d + residual_capacity[e]; - }); - - DistI cost = boost::find_flow_cost(m_g); - return Dist(cost, resCap); - } - - /** - * @brief operator<< - * - * @tparam OStream - * @param s - * @param v - * - * @return - */ - template - friend OStream &operator<<(OStream &s, capacitated_voronoi &v) { - s << num_vertices(v.m_g) << ", "; - s << v.m_s << ", " << v.m_t << "\n"; - auto verticesToDisplay = vertices(v.m_g); - auto edgesToDisplay = edges(v.m_g); - auto capacity = get(boost::edge_capacity, v.m_g); - auto residual_capacity = get(boost::edge_residual_capacity, v.m_g); - auto name = get(boost::vertex_name, v.m_g); - for (auto v : boost::as_array(verticesToDisplay)) { - s << v << "-> " << name[v] << ", "; - } - s << "\n"; - for (auto e : boost::as_array(edgesToDisplay)) { - s << e << "-> " << residual_capacity[e] << "-> " << capacity[e] - << ", "; - } - s << "\n"; - for (int g : v.m_generators) { - s << g << "\n"; - } - s << "\n"; - for (int g : v.m_vertices) { - s << g << "\n"; - } - s << "\n"; - s << v.m_first_generator_id << "\n"; - s << v.m_cost_of_no_generator << "\n"; - s << "\n"; - for (std::pair g : v.m_v_to_graph_v) { - s << g.first << ", " << g.second << "\n"; - } - s << "\n"; - for (std::pair g : v.m_g_to_graph_v) { - s << g.first << ", " << g.second << "\n"; - } - s << "\n"; - return s; - } - - private: - - /** - * @brief resores index (name property in the graph) - */ - void restore_index() { - const unsigned N = num_vertices(m_g); - m_g_to_graph_v.clear(); - auto name = get(boost::vertex_name, m_g); - for (unsigned i : irange(unsigned(m_first_generator_id), N)) { - m_g_to_graph_v[name[i]] = i; - } - } - - /** - * @brief add vertex to auxiliary graph - * - * @param v - * - * @return - */ - VD add_vertex_to_graph(VertexType v = VertexType()) { - VD vG = add_vertex(boost::property(v), - m_g); - int N = num_vertices(m_g); - - m_dist.resize(N); - m_dist_prev.resize(N); - m_pred.resize(N); - return vG; - } - - /** - * @brief add edge to auxiliary graph - * - * @param v - * @param w - * @param weight - * @param capacity - */ - void add_edge_to_graph(VD v, VD w, DistI weight, DistI capacity) { - auto rev = get(boost::edge_reverse, m_g); - ED e, f; - e = add_dir_edge(v, w, weight, capacity); - f = add_dir_edge(w, v, -weight, 0); - rev[e] = f; - rev[f] = e; - } - - /** - * @brief add directed edge - * - * @param v - * @param w - * @param weight - * @param capacity - * - * @return - */ - ED add_dir_edge(VD v, VD w, DistI weight, DistI capacity) { - bool b; - ED e; - auto weightMap = get(boost::edge_weight, m_g); - auto capacityMap = get(boost::edge_capacity, m_g); - auto residual_capacity = get(boost::edge_residual_capacity, m_g); - std::tie(e, b) = add_edge(v, w, m_g); - assert(b); - capacityMap[e] = capacity; - residual_capacity[e] = capacity; - weightMap[e] = weight; - return e; - } - - /** - * @brief gets flow on edge - * - * @param e - * - * @return - */ - DistI get_flow_on_edge(const ED &e) const { - auto capacityMap = get(boost::edge_capacity, m_g); - auto residual_capacity = get(boost::edge_residual_capacity, m_g); - return capacityMap[e] - residual_capacity[e]; - } - - /** - * @brief get reindexed source for edge - * - * @param e - * - * @return - */ - VertexType get_vertex_for_edge(const ED &e) const { - auto name = get(boost::vertex_name, m_g); - return name[source(e, m_g)]; - } - - typedef std::vector VPropMap; - VPropMap m_dist; - VPropMap m_dist_prev; - std::vector m_pred; - - Graph m_g; - VD m_s, m_t; - - Generators m_generators; - Vertices m_vertices; - const Metric &m_metric; - const GeneratorsCapacieties &m_generators_cap; - const VD m_first_generator_id; - DistI m_cost_of_no_generator; - VertexToGraphVertex m_v_to_graph_v; - VertexToGraphVertex m_g_to_graph_v; -}; - -} //! data_structures -} //! paal -#endif // PAAL_CAPACITATED_VORONOI_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/voronoi/voronoi.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/voronoi/voronoi.hpp deleted file mode 100644 index 8a9b83abb..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/voronoi/voronoi.hpp +++ /dev/null @@ -1,306 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file voronoi.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-02-01 - */ -#ifndef PAAL_VORONOI_HPP -#define PAAL_VORONOI_HPP - -#include "voronoi_traits.hpp" - -#include "paal/data_structures/metric/metric_traits.hpp" -#include "paal/utils/functors.hpp" - -#include -#include -#include - -#include -#include -#include -#include -#include - -namespace paal { -namespace data_structures { - -/** - * @class voronoi - * @brief simple implementation of the \ref voronoi concept. - * - * @tparam Metric - */ -template class voronoi { - public: - typedef typename metric_traits::VertexType VertexType; - typedef std::multimap GeneratorsToVertices; - typedef std::unordered_set> - GeneratorsSet; - typedef typename metric_traits::DistanceType Dist; - // TODO change to vector - typedef GeneratorsSet Vertices; - private: - typedef std::unordered_map> VerticesToGenerators; - - VerticesToGenerators m_vertices_to_generators; - GeneratorsToVertices m_generators_to_vertices; - Vertices m_vertices; - GeneratorsSet m_generators; - - const Metric &m_metric; - const Dist m_cost_of_no_generator; -public: - - /** - * @brief Constructor - * - * @param generators - * @param vertices - * @param m - * @param costOfNoGenerator - */ - voronoi(const GeneratorsSet &generators, Vertices vertices, const Metric &m, - Dist costOfNoGenerator = std::numeric_limits::max()) - : m_vertices(std::move(vertices)), m_metric(m), - m_cost_of_no_generator(costOfNoGenerator) { - for (VertexType f : generators) { - add_generator(f); - } - } - - /** - * @brief Copy constructor - * - * @param v - */ - voronoi(const voronoi &v) - : m_generators_to_vertices(v.m_generators_to_vertices), - m_vertices(v.m_vertices), m_generators(v.m_generators), - m_metric(v.m_metric), - m_cost_of_no_generator(v.m_cost_of_no_generator) { - auto b = m_generators_to_vertices.begin(); - auto e = m_generators_to_vertices.end(); - for (; b != e; ++b) { - m_vertices_to_generators.insert(std::make_pair(b->second, b)); - } - } - - /** - * @brief Move constructor - * - * @param v - */ - voronoi(voronoi &&v) - : m_vertices_to_generators(std::move(v.m_vertices_to_generators)), - m_generators_to_vertices(std::move(v.m_generators_to_vertices)), - m_vertices(std::move(v.m_vertices)), - m_generators(std::move(v.m_generators)), m_metric(v.m_metric), - m_cost_of_no_generator(v.m_cost_of_no_generator) {} - - /// returns diff between new cost and old cost - Dist add_generator(VertexType f) { - Dist cost = Dist(); - m_generators.insert(f); - - // first generatorsility - if (m_generators.size() == 1) { - m_vertices_to_generators.clear(); - m_generators_to_vertices.clear(); - for (VertexType v : m_vertices) { - m_vertices_to_generators[v] = - m_generators_to_vertices.insert(std::make_pair(f, v)); - cost += m_metric(v, f); - } - - cost = cost - m_cost_of_no_generator; - - } else { - for (VertexType v : m_vertices) { - Dist d = m_metric(v, f) - dist(v); - if (d < 0) { - cost += d; - assign(v, f); - } - } - } - return cost; - } - - /// returns diff between new cost and old cost - Dist rem_generator(VertexType f) { - Dist cost = Dist(); - if (m_generators.size() == 1) { - cost = m_cost_of_no_generator; - for (VertexType v : m_vertices) { - cost -= dist(v); - } - m_vertices_to_generators.clear(); - m_generators_to_vertices.clear(); - } else { - auto op = - std::bind(utils::not_equal_to(), f, std::placeholders::_1); - auto begin = m_generators_to_vertices.lower_bound(f); - auto end = m_generators_to_vertices.upper_bound(f); - for (; begin != end;) { - auto v = begin->second; - // using the fact that generators is a map - //(with other containers you have to be careful cause of iter - // invalidation) - ++begin; - cost -= dist(v); - cost += adjust_vertex(v, op); - } - } - m_generators.erase(f); - return cost; - } - - /** - * @brief getter for generators - * - * @return - */ - const GeneratorsSet &get_generators() const { return m_generators; } - - /** - * @brief getter for vertices - * - * @return - */ - const Vertices &get_vertices() const { return m_vertices; } - - /** - * @brief getter for vertices assigned to specific generator - * - * @param g - */ - auto get_vertices_for_generator(VertexType g) const -> - decltype(boost::as_array(m_generators_to_vertices.equal_range(g) | - boost::adaptors::map_values)) - { - return boost::as_array(m_generators_to_vertices.equal_range(g) | - boost::adaptors::map_values); - } - - ///operator== - bool operator==(const voronoi & vor) const { - return boost::equal(m_generators_to_vertices, vor.m_generators_to_vertices) && - m_cost_of_no_generator == vor.m_cost_of_no_generator && - m_metric == vor.m_metric; - } - - private: - - /** - * @brief distance of vertex to closest generator - * - * @param v - * - * @return - */ - Dist dist(VertexType v) { return m_metric(v, vertex_to_generators(v)); } - - /** - * @brief find new generator for vertex - * only generators satisfying filer condition are considered - * - * @tparam Filter - * @param v - * @param filter - * - * @return - */ - template - Dist adjust_vertex(VertexType v, Filter filter = Filter()) { - bool init = true; - Dist d = Dist(); - VertexType f_best = VertexType(); - for (VertexType f : m_generators) { - if (filter(f)) { - Dist td = m_metric(v, f); - if (init || td < d) { - f_best = f; - d = td; - init = false; - } - } - } - assert(!init); - assign(v, f_best); - return d; - } - - /** - * @brief get generator for given vertex - * - * @param v - * - * @return - */ - VertexType vertex_to_generators(VertexType v) const { - auto i = m_vertices_to_generators.find(v); - assert(i != m_vertices_to_generators.end()); - return i->second->first; - } - - /** - * @brief assign vertex to generator - * - * @param v - * @param f - */ - void assign(VertexType v, VertexType f) { - auto prev = m_vertices_to_generators.at(v); - m_generators_to_vertices.erase(prev); - m_vertices_to_generators[v] = - m_generators_to_vertices.insert(std::make_pair(f, v)); - } - -}; - - -namespace detail { - template - using v_t = typename metric_traits::VertexType; - - template - using generators_set_t = std::unordered_set, boost::hash>>; - - template - using dist_t = typename metric_traits::DistanceType; -} - -///make for voronoi -template -voronoi make_voronoi( - const detail::generators_set_t & generators, - detail::generators_set_t vertices, - const Metric & metric, - detail::dist_t costOfNoGenerator = std::numeric_limits>::max()) -{ - return voronoi(generators, std::move(vertices), metric, costOfNoGenerator); -} - -/** - * @brief specialization of voronoi_traits - * - * @tparam Metric - */ -template -struct voronoi_traits> : public _voronoi_traits< - voronoi, typename metric_traits::VertexType> {}; -}; -}; - -#endif // PAAL_VORONOI_HPP diff --git a/patrec/inc/WireCellPatRec/paal/data_structures/voronoi/voronoi_traits.hpp b/patrec/inc/WireCellPatRec/paal/data_structures/voronoi/voronoi_traits.hpp deleted file mode 100644 index 287dfebf7..000000000 --- a/patrec/inc/WireCellPatRec/paal/data_structures/voronoi/voronoi_traits.hpp +++ /dev/null @@ -1,46 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file voronoi_traits.hpp - * @brief voronoi traits - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-03-06 - */ -#ifndef PAAL_VORONOI_TRAITS_HPP -#define PAAL_VORONOI_TRAITS_HPP - -#include "paal/utils/type_functions.hpp" - -namespace paal { -namespace data_structures { - -/** - * @brief voronoi traits base - * - * @tparam V - * @tparam Vertex - */ -template struct _voronoi_traits { - typedef Vertex VertexType; - /// distance type - typedef decltype(std::declval().add_generator( - std::declval())) DistanceType; - - /// Generators set - typedef puretype(std::declval().get_generators()) GeneratorsSet; - - /// vertices set - typedef puretype(std::declval().get_vertices()) VerticesSet; -}; - -/// default VertexType is int. -template struct voronoi_traits : public _voronoi_traits {}; -} -} -#endif // PAAL_VORONOI_TRAITS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/distance_oracle/vertex_vertex/thorup_2kminus1.hpp b/patrec/inc/WireCellPatRec/paal/distance_oracle/vertex_vertex/thorup_2kminus1.hpp deleted file mode 100644 index 3625a49c4..000000000 --- a/patrec/inc/WireCellPatRec/paal/distance_oracle/vertex_vertex/thorup_2kminus1.hpp +++ /dev/null @@ -1,536 +0,0 @@ -//======================================================================= -// Copyright (c) -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file thorup_2kminus1.hpp - * @brief - * @author Jakub Ocwieja - * @version 1.0 - * @date 2014-04-28 - */ - -#ifndef PAAL_THORUP_2KMINUS1_HPP -#define PAAL_THORUP_2KMINUS1_HPP - -#include "paal/utils/functors.hpp" -#include "paal/utils/irange.hpp" -#include "paal/utils/assign_updates.hpp" - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -namespace paal { - -/** -* @brief 2k-1 approximate distance oracle -* -* @tparam Graph graph -* @tparam EdgeWeightMap edge weight map -* @tparam VertexIndexMap vertex index map -* @tparam Rand random engine -*/ -template < typename Graph, - typename VertexIndexMap, - typename EdgeWeightMap, - typename Rand=std::default_random_engine - > -class distance_oracle_thorup2kminus1approximation { - using DT = typename boost::property_traits::value_type; - using VT = typename boost::graph_traits::vertex_descriptor; - - //! List of pairs (vertex index, distance to vertex) - using DVect = std::vector< std::pair >; - - //! Maps vertex in a bunch into a distance to it - using BunchMap = std::unordered_map; - - //! Index map stored to access internal structures - VertexIndexMap m_index; - - //! For each vertex v a maximal layer number for which v belongs - /** A_0 = V - * A_{i+1} \subset A_i - * A_k = \emptyset - */ - std::vector< int > m_layer_num; - - //! For each vertex v a list of vertices of consecutive layers closest to v - std::vector< DVect > m_parent; - - //! For each vertex v a set of vertices w closer to v than any vertex in layer m_layer_num[w]+1 - std::vector< BunchMap > m_bunch; - - /** - * @brief Fills m_layer_num - * - * @param g Graph - * @param k Approximation parameter = maximal number of layers - * @param p Probability of being chosen to a next layer - * @param random_engine Random engine - * - * @return Number of nonempty layers - */ - int choose_layers(const Graph& g, int k, long double p, Rand & random_engine) { - std::uniform_real_distribution<> dist(0,1); - - int max_layer_num = 0; - long double logp = log(p); - - for (int ind: irange(num_vertices(g))) { - m_layer_num[ind] = std::min(k-1, (int)(log(dist(random_engine)) / logp)); - assign_max(max_layer_num, m_layer_num[ind]); - } - - return max_layer_num+1; - } - - /** - * @brief A visitor implementation for compute_parents Dijkstra's algorithm call - * - * @tparam NearestMap - * @tparam Tag - */ - template - class nearest_recorder : boost::base_visitor< nearest_recorder > { - - //! Stores distances to the closest vertex in a particular layer - NearestMap m_nearest_map; - - public: - using event_filter = Tag; - - //! Constructor - explicit nearest_recorder(NearestMap const nearest_map) : m_nearest_map(nearest_map) {} - - //! Copies nearest value from precdessor - template - void operator()(Edge const e, Graph const &g) const { - auto nearest = get(m_nearest_map, source(e,g)); - put(m_nearest_map, target(e,g), nearest); - } - - }; - - //! Constructs a visitor for compute_parents Dijkstra's algorithm call - template - nearest_recorder - make_nearest_recorder(NearestMap nearest_map, Tag) { - return nearest_recorder{nearest_map}; - } - - /** - * @brief Fills m_parent for a single layer - * - * @param g Graph - * @param layer_num Number of layer - */ - void compute_parents(const Graph& g, EdgeWeightMap edge_weight, int layer_num) { - std::vector
distance(num_vertices(g), std::numeric_limits
::max()); - std::vector nearest(num_vertices(g), -1); - std::vector roots; - - for (auto v: boost::as_array(vertices(g))) { - int v_ind = m_index[v]; - if (m_layer_num[v_ind] >= layer_num) { - nearest[v_ind] = v_ind; - distance[v_ind] = DT{}; - roots.push_back(v); - } - } - - boost::dijkstra_shortest_paths_no_init( - g, - roots.begin(), - roots.end(), - boost::dummy_property_map(), - make_iterator_property_map(distance.begin(), m_index, distance[0]), - edge_weight, - m_index, - utils::less{}, - boost::closed_plus
(), - DT{}, - boost::make_dijkstra_visitor(make_nearest_recorder( - make_iterator_property_map(nearest.begin(), m_index, nearest[0]), - boost::on_edge_relaxed{}) - ) - ); - - for (int ind: irange(num_vertices(g))) { - m_parent[ind].push_back(std::make_pair(nearest[ind], distance[ind])); - } - } - - //! A distance type crafted to control logic of compute_cluster Dijkstra's algorithm call - class cluster_dist { - //! An actual distance - DT m_value; - //! Marks unmodified vertices - bool m_unmodified; - - //! Private constructor - cluster_dist(DT value, bool unmodified) : - m_value(value), m_unmodified(unmodified) {} - - public: - //! Public constructor - cluster_dist(DT value = DT{}) : - m_value(value), m_unmodified(false) {} - - //! Allows to create unmodified distances - static cluster_dist - make_limit(DT value = std::numeric_limits
::max()) { - return cluster_dist(value, true); - } - - //! A comparator struct adjusted to recognize unmodified values - /** Unmodified values are not smaller then any modified value. To recognize an umodified value we compare it - * with a maximal modified value of DT - */ - struct less { - //! A comparison operator - bool operator()(cluster_dist a, cluster_dist b) const { - return (a.m_value < b.m_value) && (b.m_unmodified || !a.m_unmodified); - } - }; - - //! Plus operation struct - struct plus { - //! Sum operator - cluster_dist operator()(cluster_dist a, cluster_dist b) const { - return cluster_dist(a.m_value + b.m_value, a.m_unmodified || b.m_unmodified); - } - }; - - //! An accessor to the distance - const DT value() const { - return m_value; - } - }; - - /** - * @brief A property_map with lazy initialization of distances - * - * For each vertex of a graph a distance is initialized with an upper limit on a value which causes edge - * relaxation in compute_cluster Dijkstra's algorithm. - */ - class cluster_distance_wrapper : - public boost::put_get_helper - { - //! A wrapped distance table - std::vector< cluster_dist > *m_distance; - - //! An index map stored to access table structures - VertexIndexMap m_index; - - //! A pointer to a parent table containing initial values of fields of m_distance - /** The values stored here are copied into m_distance table when m_distance fields are accessed for the - * first time. For each vertex of a graph it contains an upper limit on a value which causes edge relaxation - * in compute_cluster Dijkstra's algorithm. - */ - std::vector< DT > *m_limit; - - //! A table storing last access time to m_distance fields - std::vector *m_last_accessed; - - //! A value necessary to interpret m_last_accessed - /** The value is initially different than any value in m_last_accessed. - * However, it is not necessarily bigger. - */ - int m_now; - - public: - typedef VT key_type; - typedef cluster_dist value_type; - typedef value_type& reference; - typedef boost::lvalue_property_map_tag category; - - /** - * @brief Constructor - * - * @param distance A helper vector - required to have num_vertices(g) fields - * @param index An index map - * @param limit Compute_cluster Dijkstra's algorithm relaxation limits - * @param last_accessed A helper vector - required to have num_vertices(g) fields - * @param now Required to be different than any last_accessed value - */ - cluster_distance_wrapper( - std::vector< cluster_dist > *distance, - VertexIndexMap index, - std::vector< DT > *limit, - std::vector< int > *last_accessed, - int now) : - m_distance(distance), - m_index(index), - m_limit(limit), - m_last_accessed(last_accessed), - m_now(now) {} - - /** - * @brief Map values accessor - * - * @param key Key - * - * @return Value - */ - reference operator[](const key_type& key) const { - int k_ind = m_index[key]; - if ((*m_last_accessed)[k_ind] != m_now) { - (*m_last_accessed)[k_ind] = m_now; - (*m_distance)[k_ind] = cluster_dist::make_limit((*m_limit)[k_ind]); - } - return (*m_distance)[k_ind]; - } - }; - - /** - * @brief A visitor implementation for a compute_cluster Dijkstra's algorithm call - * - * @tparam DistanceMap - * @tparam Tag - */ - template - class cluster_recorder : boost::base_visitor< cluster_recorder > { - //! Vertex whose cluster is recorded - int m_w_ind; - - //! A pointer to m_bunch field of the oracle - std::vector< BunchMap >* m_bunch; - - //! Index map stored to access internal structures - VertexIndexMap m_index; - - //! A distance map - DistanceMap m_distance; - - public: - using event_filter = Tag; - - explicit cluster_recorder(int w_ind, std::vector *bunch, - VertexIndexMap index, DistanceMap distance) : - m_w_ind(w_ind), m_bunch(bunch), m_index(index), m_distance(distance) {} - - template - void operator()(Vertex const v, Graph const &g) const { - (*m_bunch)[m_index[v]].insert(std::make_pair(m_w_ind, m_distance[v].value())); - } - }; - - /** - * @brief - * - * @tparam DistanceMap - * @tparam Tag - * @param cluster - * @param index - * @param distance - * @param Tag - * - * @return A visitor for a compute_cluster Dijkstra's algorithm call - */ - template - cluster_recorder - make_cluster_recorder(int w_ind, std::vector *bunch, VertexIndexMap index, - DistanceMap distance, Tag) { - return cluster_recorder{w_ind, bunch, index, distance}; - }; - - /** - * @brief Fills bunchs with vertices inside a cluster - a set of vertices - * which contains w in its bunch - * - * @param g Graph - * @param edge_weight Edge weights - * @param w Vertex - * @param k Number of layers - * @param limit Dijkstra's algorithm relaxation limits for each layer - * @param distance A helper vector - required to have num_vertices(g) fields - * @param last_accessed A helper vector - require to be initialized with negative values - */ - void compute_cluster(const Graph& g, EdgeWeightMap edge_weight, VT w, int k, - std::vector< std::vector
> &limit, - std::vector &distance, std::vector &last_accessed) { - DVect cluster; - int w_ind = m_index[w]; - int w_layer_num = m_layer_num[w_ind]; - - cluster_distance_wrapper distance_wrapper( - &distance, m_index, &limit[w_layer_num + 1], - &last_accessed, w_ind); - distance_wrapper[w] = cluster_dist(DT{}); - - boost::dijkstra_shortest_paths_no_color_map_no_init( - g, - w, - boost::dummy_property_map(), - distance_wrapper, - edge_weight, - m_index, - typename cluster_dist::less(), - typename cluster_dist::plus(), - cluster_dist(std::numeric_limits
::max()), - cluster_dist(DT{}), - boost::make_dijkstra_visitor(make_cluster_recorder( - w_ind, &m_bunch, m_index, distance_wrapper, - boost::on_examine_vertex{}) - ) - ); - } - - /** - * @brief Fills m_bunch - * - * @param g Graph - * @param edge_weight Edge weight - * @param k Number of layers - */ - void compute_bunchs(const Graph& g, EdgeWeightMap edge_weight, int k) { - //! Initialization of reusable structures - std::vector< std::vector
> limit(k+1, - std::vector
(num_vertices(g), std::numeric_limits
::max())); - for (int l: irange(k)) { - for (int i: irange(num_vertices(g))) { - limit[l][i] = m_parent[i][l].second; - } - } - std::vector distance(num_vertices(g)); - std::vector last_accessed(num_vertices(g), -1); - - for (auto v: boost::as_array(vertices(g))) { - compute_cluster(g, edge_weight, v, k, limit, distance, last_accessed); - } - } - -public: - - /** - * @brief Constructor - * - * @param g graph - * @param index vertex index map - * @param edge_weight edge weight map - * @param k approximation parameter - * @param random_engine random engine - */ - distance_oracle_thorup2kminus1approximation(const Graph &g, - VertexIndexMap index, - EdgeWeightMap edge_weight, - int k, - Rand && random_engine = Rand(5426u)) : - m_index(index), - m_layer_num(num_vertices(g)), - m_parent(num_vertices(g)), - m_bunch(num_vertices(g)) - { - long double p = powl(num_vertices(g), -1./k); - k = choose_layers(g, k, p, random_engine); - for (int layer_num: irange(k)) { - compute_parents(g, edge_weight, layer_num); - } - compute_bunchs(g, edge_weight, k); - } - - //! Returns an 2k-1 approximate distance between two vertices in O(k) time - /** Returns a distance of path going through one of parents of u or v */ - DT operator()(VT u, VT v) const { - int u_ind = m_index[u], v_ind = m_index[v]; - typename std::unordered_map::const_iterator it; - int l = 0; - std::pair middle_vertex = m_parent[u_ind][l]; - while ((it = m_bunch[v_ind].find(middle_vertex.first)) == m_bunch[v_ind].end()) { - ++l; - middle_vertex = m_parent[v_ind][l]; - std::swap(u_ind, v_ind); - } - //! Returns d(v, middle) + d(middle, u) - return it->second + middle_vertex.second; - } -}; - -/** -* @brief -* -* @tparam Graph -* @tparam EdgeWeightMap -* @tparam VertexIndexMap -* @tparam Rand -* @param g - given graph -* @param k - approximation parameter -* @param index - graph index map -* @param edge_weight - graph edge weight map -* @param random_engine - random engine -* -* @return 2k-1 approximate distance oracle -*/ -template < typename Graph, - typename EdgeWeightMap, - typename VertexIndexMap, - typename Rand=std::default_random_engine - > -distance_oracle_thorup2kminus1approximation -make_distance_oracle_thorup2kminus1approximation( - const Graph &g, - const int k, - VertexIndexMap index, - EdgeWeightMap edge_weight, - Rand && random_engine = Rand(5426u)) { - return distance_oracle_thorup2kminus1approximation(g, index, edge_weight, k, std::move(random_engine)); -} - -/** -* @brief -* -* @tparam Graph -* @tparam P -* @tparam T -* @tparam R -* @tparam Rand -* @param g - given graph -* @param k - approximation parameter -* @param params - named parameters -* @param random_engine - random engine -* -* @return 2k-1 approximate distance oracle -*/ -template < typename Graph, - typename P = char, - typename T = boost::detail::unused_tag_type, - typename R = boost::no_property, - typename Rand=std::default_random_engine - > -auto -make_distance_oracle_thorup2kminus1approximation( - const Graph &g, - const int k, - const boost::bgl_named_params& params = boost::no_named_parameters(), - Rand && random_engine = Rand(5426u)) - -> distance_oracle_thorup2kminus1approximation { - return make_distance_oracle_thorup2kminus1approximation(g, - k, - choose_const_pmap(get_param(params, boost::vertex_index), g, boost::vertex_index), - choose_const_pmap(get_param(params, boost::edge_weight), g, boost::edge_weight), - std::move(random_engine)); -} - -} //paal - -#endif // PAAL_THORUP_2KMINUS1_HPP diff --git a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack/fill_knapsack_dynamic_table.hpp b/patrec/inc/WireCellPatRec/paal/dynamic/knapsack/fill_knapsack_dynamic_table.hpp deleted file mode 100644 index cd4ba69b1..000000000 --- a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack/fill_knapsack_dynamic_table.hpp +++ /dev/null @@ -1,91 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file fill_knapsack_dynamic_table.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-09-29 - */ -#ifndef PAAL_FILL_KNAPSACK_DYNAMIC_TABLE_HPP -#define PAAL_FILL_KNAPSACK_DYNAMIC_TABLE_HPP - -#include "paal/utils/knapsack_utils.hpp" - -namespace paal { -/** - * @brief Computes dynamic algorithm table (valuesBegin, valuesEnd) - * The values collection has element type ValueOrNull, - * The default constructed ValueOrNull should represent empty object. - * This collection is filled using init, compare and combine functors. - * - * @param valuesBegin begin of the table which will store - * the values for specific positions in dynamic algorithm computation - * @param valuesEnd - * @param objects - possible object collection - * @param size - functor, for given opbjedt return its size - * @param combine - for given Objects and value gives new object - * representing adding *Objects to value - * @param compare - compares to values. - * @param init - discover element and assign the 0 value - * @param get_range - * - * @tparam ValueIterator has to be RandomAccess output iterator - * @tparam Objects - * @tparam ObjectSizeFunctor - * @tparam Combine - * @tparam Compare - * @tparam Init - * @tparam GetPositionRange - */ -template -detail::FunctorOnRangePValue -fill_knapsack_dynamic_table(ValueIterator valuesBegin, ValueIterator valuesEnd, - Objects &&objects, ObjectSizeFunctor size, - Combine combine, Compare compare, Init init, - GetPositionRange get_range) { - using Size = detail::FunctorOnRangePValue; - - Size maxSize = std::distance(valuesBegin, valuesEnd); - - std::fill(valuesBegin + 1, valuesEnd, boost::none); - init(*valuesBegin); - - auto posRange = get_range(0, maxSize); - - auto objIter = std::begin(objects); - auto oEnd = std::end(objects); - for (; objIter != oEnd; ++objIter) { - auto &&obj = *objIter; - auto objSize = size(obj); - // for each position, from largest to smallest - for (auto pos : posRange) { - auto stat = *(valuesBegin + pos); - // if position was reached before - if (stat != boost::none) { - Size newPos = pos + objSize; - auto &newStat = *(valuesBegin + newPos); - // if we're not exceeding maxSize - if (newPos < maxSize) { - auto newValue = combine(stat, objIter); - // if the value is bigger than previous - if (newStat == boost::none || compare(newStat, newValue)) { - // update value - newStat = newValue; - } - } - } - } - } - return maxSize - 1; -} - -} //! paal -#endif // PAAL_FILL_KNAPSACK_DYNAMIC_TABLE_HPP diff --git a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack/get_bound.hpp b/patrec/inc/WireCellPatRec/paal/dynamic/knapsack/get_bound.hpp deleted file mode 100644 index a6da3ce82..000000000 --- a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack/get_bound.hpp +++ /dev/null @@ -1,117 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file get_bound.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-10-04 - */ -#ifndef PAAL_GET_BOUND_HPP -#define PAAL_GET_BOUND_HPP - -#include "paal/utils/accumulate_functors.hpp" -#include "paal/utils/knapsack_utils.hpp" -#include "paal/greedy/knapsack/knapsack_greedy.hpp" - -#include - -namespace paal { -namespace detail { - -template -using GetIntegralTag = typename std::conditional< - std::is_integral::value && std::is_integral::value, - integral_value_and_size_tag, - typename std::conditional< - std::is_integral::value, integral_size_tag, - typename std::conditional< - std::is_integral::value, integral_value_tag, - non_integral_value_and_size_tag>::type>::type>::type; - -template -using Getarithmetic_size_tag = typename std::conditional< - std::is_arithmetic::value, arithmetic_size_tag, - Nonarithmetic_size_tag>::type; - -// this overloads checks if SizeType and ValueType are integral - -struct upper_tag{}; -struct lower_tag{}; - -/** - * @brief upper bound is computed as biggest density times capacity + - * values for all elements with size 0. It is correct upper bound for 0/1. - * For unbounded case there will be no elements with size 0. - */ -template -typename KnapsackData::value get_density_based_value_upper_bound(KnapsackData knap_data) { - using Size = typename KnapsackData::size; - using ObjectRef = typename KnapsackData::object_ref; - auto density = knap_data.get_density(); - - // this filters are really needed only in 0/1 case - // in unbounded case, there is a guarantee that sizes are not 0 - auto not_zero_sizel = [=](ObjectRef obj) {return knap_data.get_size(obj) > Size{};}; - auto not_zero_size = utils::make_assignable_functor(not_zero_sizel); - auto zeroSize = utils::make_not_functor(not_zero_size); - - auto not_zeros = knap_data.get_objects() | boost::adaptors::filtered(not_zero_size); - auto zeros = knap_data.get_objects() | boost::adaptors::filtered(zeroSize ); - - auto maxElement = *max_element_functor(not_zeros, density); - return knap_data.get_capacity() * maxElement + sum_functor(zeros, knap_data.get_value()); -} - -//non-arithmetic size, upper bound -template -typename KnapsackData::value get_value_bound( - KnapsackData knap_data, - Nonarithmetic_size_tag, Is_0_1_Tag, upper_tag) { - return get_density_based_value_upper_bound(std::move(knap_data)); -} - -//arithmetic size, upper bound -template -typename KnapsackData::value get_value_bound( - KnapsackData knap_data, - arithmetic_size_tag, Is_0_1_Tag is_0_1_Tag, upper_tag) { - return std::min(2 * get_value_bound(knap_data, is_0_1_Tag, lower_tag{}), - get_density_based_value_upper_bound(knap_data)); -} - -//non-arithmetic size, lower bound -template -typename KnapsackData::value get_value_bound(KnapsackData knap_data, - Nonarithmetic_size_tag, Is_0_1_Tag, lower_tag) { - //computes lower bound as value of the most valuable element - return *max_element_functor(knap_data.get_objects(), knap_data.get_value()).base(); -} - -//arithmetic size, lower bound -template -typename KnapsackData::value get_value_bound(KnapsackData knap_data, - arithmetic_size_tag, Is_0_1_Tag is_0_1_Tag, lower_tag) { - auto out = boost::make_function_output_iterator(utils::skip_functor{}); - return knapsack_general_two_app(detail::make_knapsack_data(knap_data.get_objects(), - knap_data.get_capacity(), knap_data.get_size(), knap_data.get_value(), out), is_0_1_Tag).first; -} - -//decide whether size is arithmetic or not -template -typename KnapsackData::value get_value_bound(KnapsackData knap_data, - Is_0_1_Tag is_0_1_tag, BoundType bound_type_tag) { - return get_value_bound(std::move(knap_data), - Getarithmetic_size_tag{}, - is_0_1_tag, bound_type_tag); -} - -} //! detail -} //! paal -#endif // PAAL_GET_BOUND_HPP diff --git a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack/knapsack_common.hpp b/patrec/inc/WireCellPatRec/paal/dynamic/knapsack/knapsack_common.hpp deleted file mode 100644 index d82a4a2fb..000000000 --- a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack/knapsack_common.hpp +++ /dev/null @@ -1,71 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file knapsack_common.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-09-30 - */ -#ifndef PAAL_KNAPSACK_COMMON_HPP -#define PAAL_KNAPSACK_COMMON_HPP - -#include "paal/dynamic/knapsack/get_bound.hpp" - -namespace paal { -namespace detail { - -template -typename KnapsackData::return_type knapsack_check_integrality(KnapsackData knap_data, Is_0_1_Tag is_0_1_Tag, - RetrieveSolution retrieve_solutionTag = - RetrieveSolution{}) { - - return knapsack(std::move(knap_data), is_0_1_Tag, - detail::GetIntegralTag{}, - retrieve_solutionTag); -} - -// this overloads is for nonintegral SizeType and ValueType -// this case is invalid and allwas asserts! -template ::value>::type> - -typename KnapsackData::return_type knapsack(KnapsackData, Is_0_1_Tag is_0_1_Tag, IntegralTag, - RetrieveSolution retrieve_solution) { - // trick to avoid checking assert on template definition parse - static_assert( - std::is_same::value, - "At least one of the value or size must return integral value"); -} - -/** - * @brief Solution to Knapsack problem - * overload for integral Size and Value case - */ -template -typename KnapsackData::return_type knapsack(KnapsackData knap_data, Is_0_1_Tag is_0_1_Tag, - integral_value_and_size_tag, RetrieveSolution retrieve_solutionTag) { - if (get_value_bound(knap_data, is_0_1_Tag, upper_tag{}) > - knap_data.get_capacity()) { - return knapsack(std::move(knap_data), is_0_1_Tag, integral_size_tag{}, - retrieve_solutionTag); - } else { - return knapsack(std::move(knap_data), is_0_1_Tag, integral_value_tag{}, - retrieve_solutionTag); - } -} - -} //! detail -} //! paal -#endif // PAAL_KNAPSACK_COMMON_HPP diff --git a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack/knapsack_fptas_common.hpp b/patrec/inc/WireCellPatRec/paal/dynamic/knapsack/knapsack_fptas_common.hpp deleted file mode 100644 index cb596ea81..000000000 --- a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack/knapsack_fptas_common.hpp +++ /dev/null @@ -1,177 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file knapsack_fptas_common.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-10-04 - */ -#ifndef PAAL_KNAPSACK_FPTAS_COMMON_HPP -#define PAAL_KNAPSACK_FPTAS_COMMON_HPP - -#include "paal/utils/accumulate_functors.hpp" -#include "paal/dynamic/knapsack/get_bound.hpp" - -namespace paal { -namespace detail { - -/** - * @brief computes multiplier for FPTAS, version for 0/1 - */ -template -boost::optional get_multiplier(Objects &&objects, double epsilon, - double lowerBound, Functor, - detail::zero_one_tag) { - double n = boost::distance(objects); - auto ret = n / (epsilon * lowerBound); - static const double SMALLEST_MULTIPLIER = 1.; - if (ret > SMALLEST_MULTIPLIER) return boost::none; - return ret; -} - -// TODO this multiplier does not guarantee fptas -/** - * @brief computes multiplier for FPTAS, unbounded version - * - */ -template -boost::optional get_multiplier(Objects &&objects, double epsilon, - double lowerBound, Functor f, - detail::unbounded_tag) { - double minF = *min_element_functor(objects, f); - double n = int(double(lowerBound) * (1. + epsilon) / minF + - 1.); // maximal number of elements in the found solution - auto ret = n / (epsilon * lowerBound); - static const double SMALLEST_MULTIPLIER = 1.; - if (ret > SMALLEST_MULTIPLIER) return boost::none; - return ret; -} - -template -ReturnType knapsack_general_on_value_fptas(double epsilon, - KnapsackData knap_data, - IsZeroOne is_0_1_Tag, - RetrieveSolution retrieve_solution) { - using ObjectRef = typename KnapsackData::object_ref; - using Value = typename KnapsackData::value; - using Size = typename KnapsackData::size; - - auto &&objects = knap_data.get_objects(); - - if (boost::empty(objects)) { - return ReturnType{}; - } - - double maxValue = - detail::get_value_bound(knap_data, is_0_1_Tag, lower_tag{}); - auto multiplier = get_multiplier(objects, epsilon, maxValue, - knap_data.get_value(), is_0_1_Tag); - - if (!multiplier) { - return knapsack_check_integrality(std::move(knap_data), is_0_1_Tag, - retrieve_solution); - } - - auto newValue = utils::make_scale_functor( - knap_data.get_value(), *multiplier); - auto ret = knapsack_check_integrality( - detail::make_knapsack_data(objects, knap_data.get_capacity(), - knap_data.get_size(), newValue, - knap_data.get_output_iter()), - is_0_1_Tag, retrieve_solution); - return std::make_pair(Value(double(ret.first) / *multiplier), ret.second); -} - -template -ReturnType knapsack_general_on_size_fptas(double epsilon, - KnapsackData knap_data, - IsZeroOne is_0_1_Tag, - RetrieveSolution retrieve_solution) { - using ObjectRef = typename KnapsackData::object_ref; - using Size = typename KnapsackData::size; - - auto &&objects = knap_data.get_objects(); - - if (boost::empty(objects)) { - return ReturnType{}; - } - - auto multiplier = get_multiplier(objects, epsilon, knap_data.get_capacity(), - knap_data.get_size(), is_0_1_Tag); - - if (!multiplier) { - return knapsack_check_integrality(std::move(knap_data), is_0_1_Tag, - retrieve_solution); - } - - auto newSize = utils::make_scale_functor(knap_data.get_size(), - *multiplier); - auto ret = knapsack_check_integrality( - detail::make_knapsack_data( - objects, Size(knap_data.get_capacity() * *multiplier), newSize, - knap_data.get_value(), knap_data.get_output_iter()), - is_0_1_Tag, retrieve_solution); - return ReturnType(ret.first, double(ret.second) / *multiplier); -} - -template -typename KnapsackData::return_type -knapsack_general_on_value_fptas_retrieve(double epsilon, KnapsackData knap_data, - IsZeroOne is_0_1_Tag) { - using ObjectRef = typename KnapsackData::object_ref; - using Value = typename KnapsackData::value; - - Value realValue{}; - auto addValue = [&](ObjectRef obj) { - realValue += knap_data.get_value(obj); - knap_data.out(obj); - } - ; - - auto newOut = boost::make_function_output_iterator(addValue); - - auto reducedReturn = knapsack_general_on_value_fptas( - epsilon, detail::make_knapsack_data( - knap_data.get_objects(), knap_data.get_capacity(), - knap_data.get_size(), knap_data.get_value(), newOut), - is_0_1_Tag, retrieve_solution_tag{}); - return std::make_pair(realValue, reducedReturn.second); -} - -template -ReturnType knapsack_general_on_size_fptas_retrieve(double epsilon, - KnapsackData knap_data, - IsZeroOne is_0_1_Tag) { - using ObjectRef = typename KnapsackData::object_ref; - using Size = typename KnapsackData::size; - - Size realSize{}; - auto add_size = [&](ObjectRef obj) { - realSize += knap_data.get_size(obj); - knap_data.out(obj); - } - ; - - auto newOut = boost::make_function_output_iterator(add_size); - - auto reducedReturn = knapsack_general_on_size_fptas( - epsilon, detail::make_knapsack_data( - knap_data.get_objects(), knap_data.get_capacity(), - knap_data.get_size(), knap_data.get_value(), newOut), - is_0_1_Tag, retrieve_solution_tag{}); - return ReturnType(reducedReturn.first, realSize); -} - -} // detail -} // paal - -#endif // PAAL_KNAPSACK_FPTAS_COMMON_HPP diff --git a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack_0_1.hpp b/patrec/inc/WireCellPatRec/paal/dynamic/knapsack_0_1.hpp deleted file mode 100644 index 3e91e4c29..000000000 --- a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack_0_1.hpp +++ /dev/null @@ -1,309 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file knapsack_0_1.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-09-30 - */ -#ifndef PAAL_KNAPSACK_0_1_HPP -#define PAAL_KNAPSACK_0_1_HPP - -#include "paal/utils/functors.hpp" -#include "paal/utils/knapsack_utils.hpp" -#include "paal/utils/less_pointees.hpp" -#include "paal/utils/irange.hpp" -#include "paal/dynamic/knapsack/fill_knapsack_dynamic_table.hpp" -#include "paal/dynamic/knapsack/knapsack_common.hpp" -#include "paal/greedy/knapsack_0_1_two_app.hpp" - -#include -#include -#include - -#include - -namespace paal { - -namespace detail { - -/** - * @brief For 0/1 knapsack dynamic algorithm for given element the table has to - * be traversed from the highest to the lowest element - */ -struct Knapsack_0_1_get_position_range { - template - auto operator()(T begin, T end) - ->decltype(irange(begin, end) | boost::adaptors::reversed) { - return irange(begin, end) | boost::adaptors::reversed; - } -}; - -/** - * @brief This class helps solving 0/1 knapsack problem. - * Function solve returns the optimal value - * Function Retrieve solution returns chosen elements - * - * @tparam Objects - * @tparam ObjectSizeFunctor - * @tparam ObjectValueFunctor - */ -template -class Knapsack_0_1 { - using base = knapsack_base; - using SizeType = typename base::SizeType; - using ValueType = typename base::ValueType; - using ObjectType = typename base::ObjectType; - using ObjectRef = typename base::ObjectRef; - using ReturnType = typename base::return_type; - using ValueOrNull = boost::optional; - static_assert(std::is_integral::value, - "Size type must be integral"); - using ValueOrNullVector = std::vector; - - public: - - Knapsack_0_1(ObjectSizeFunctor size, ObjectValueFunctor value, - Comparator compare = Comparator()) - : m_size(size), m_value(value), m_comparator(compare) {} - - /** - * @brief Function solves dynamic programming problem - * @returns the optimal value - */ - template - ReturnType solve(Objects objects, SizeType capacity, - GetBestElement getBest) { - m_object_on_size.resize(capacity + 1); - fill_table(m_object_on_size, objects, capacity); - auto maxValue = getBest(m_object_on_size.begin(), - m_object_on_size.end(), m_comparator); - - if (maxValue != m_object_on_size.end()) { - return ReturnType(**maxValue, maxValue - m_object_on_size.begin()); - } else { - return ReturnType(ValueType{}, SizeType{}); - } - } - - //@brief here we find actual solution - // that is, the chosen objects - // this is done by simple divide and conquer strategy - template - void retrieve_solution(ValueType maxValue, SizeType size, Objects objects, - OutputIterator & out) const { - m_object_on_size.resize(size + 1); - m_object_on_size_rec.resize(size + 1); - retrieve_solution_rec(maxValue, size, std::begin(objects), - std::end(objects), out); - } - - private: - template - void retrieve_solution_rec(ValueType maxValue, SizeType capacity, - ObjectsIter oBegin, ObjectsIter oEnd, - OutputIterator & out) const { - if (maxValue == ValueType()) { - return; - } - - auto objNr = std::distance(oBegin, oEnd); - assert(objNr); - - // boundary case only one object left - if (objNr == 1) { - assert(m_value(*oBegin) == maxValue); - *out = *oBegin; - ++out; - return; - } - - // main case, at least 2 objects left - auto midle = oBegin + objNr / 2; - fill_table(m_object_on_size, boost::make_iterator_range(oBegin, midle), - capacity); - fill_table(m_object_on_size_rec, - boost::make_iterator_range(midle, oEnd), capacity); - - SizeType capacityLeftPartInOptimalSolution{}; - for (auto capacityLeftPart : irange(capacity + 1)) { - auto left = m_object_on_size[capacityLeftPart]; - auto right = m_object_on_size_rec[capacity - capacityLeftPart]; - if (left && right) { - if (*left + *right == maxValue) { - capacityLeftPartInOptimalSolution = capacityLeftPart; - break; - } - } - } - auto left = m_object_on_size[capacityLeftPartInOptimalSolution]; - auto right = - m_object_on_size_rec[capacity - capacityLeftPartInOptimalSolution]; - assert(left && right && *left + *right == maxValue); - - retrieve_solution_rec(*left, capacityLeftPartInOptimalSolution, oBegin, - midle, out); - retrieve_solution_rec( - *right, capacity - capacityLeftPartInOptimalSolution, midle, oEnd, - out); - } - - template - void fill_table(ValueOrNullVector &values, ObjectsRange &&objects, - SizeType capacity) const { - fill_knapsack_dynamic_table( - values.begin(), values.begin() + capacity + 1, - std::forward(objects), m_size, - [&](ValueOrNull val, - typename boost::range_iterator::type obj) { - return *val + m_value(*obj); - }, - [&](ValueOrNull left, ValueOrNull right) { - return m_comparator(*left, *right); - }, - [](ValueOrNull & val) { - val = ValueType{}; - }, - Knapsack_0_1_get_position_range{}); - } - - ObjectSizeFunctor m_size; - ObjectValueFunctor m_value; - Comparator m_comparator; - mutable ValueOrNullVector m_object_on_size; - mutable ValueOrNullVector m_object_on_size_rec; -}; - -template -Knapsack_0_1 -make_knapsack_0_1(ObjectSizeFunctor size, ObjectValueFunctor value, - Comparator comp) { - return Knapsack_0_1(size, value, comp); -} - -template -void retrieve_solution(const Knapsack &knapsack, ValueType maxValue, - IndexType size, Objects &&objects, OutputIterator & out, - retrieve_solution_tag) { - knapsack.retrieve_solution(maxValue, size, objects, out); -} - -template -void retrieve_solution(const Knapsack &knapsack, ValueType maxValue, - IndexType size, Objects &&objects, OutputIterator & out, - no_retrieve_solution_tag) {} - -/** - * @brief Solution to Knapsack 0/1 problem - * overload for integral Size case - */ -template -auto knapsack(KnapsackData knap_data, zero_one_tag, integral_size_tag, - retrieve_solution_tag retrieve_solutionTag) { - using Value = typename KnapsackData::value; - - auto knapsack = make_knapsack_0_1( - knap_data.get_size(), knap_data.get_value(), utils::less{}); - auto value_size = - knapsack.solve(knap_data.get_objects(), knap_data.get_capacity(), - get_max_element_on_capacity_indexed_collection()); - retrieve_solution(knapsack, value_size.first, value_size.second, - knap_data.get_objects(), knap_data.get_output_iter(), - retrieve_solutionTag); - return value_size; -} - -/** - * @brief Solution to Knapsack 0/1 problem - * overload for integral Value case - */ -template -auto knapsack(KnapsackData knap_data, zero_one_tag, integral_value_tag, - retrieve_solution_tag retrieve_solutionTag) { - using Value = typename KnapsackData::value; - using Size = typename KnapsackData::size; - - auto knapsack = make_knapsack_0_1( - knap_data.get_value(), knap_data.get_size(), utils::greater{}); - auto maxValue = get_value_bound(knap_data, zero_one_tag{}, upper_tag{}); - auto value_size = knapsack.solve( - knap_data.get_objects(), maxValue, - get_max_element_on_value_indexed_collection, - Value>( - boost::optional(knap_data.get_capacity() + 1))); - retrieve_solution(knapsack, value_size.first, value_size.second, - knap_data.get_objects(), knap_data.get_output_iter(), - retrieve_solutionTag); - return std::make_pair(value_size.second, value_size.first); -} - -} // detail - -/** - * @brief Solution to Knapsack 0/1 problem - * - * @tparam Objects - * @tparam OutputIterator - * @tparam ObjectSizeFunctor - * @tparam ObjectValueFunctor - * @param oBegin given objects - * @param oEnd - * @param out the result is returned using output iterator - * @param size functor that for given object returns its size - * @param value functor that for given object returns its value - */ -template -auto knapsack_0_1(Objects &&objects, - detail::FunctorOnRangePValue - capacity, // capacity is of size type - OutputIterator out, ObjectSizeFunctor size, - ObjectValueFunctor value = ObjectValueFunctor{}) { - - return detail::knapsack_check_integrality( - detail::make_knapsack_data(std::forward(objects), capacity, - size, value, out), - detail::zero_one_tag{}); -} - -/** - * @brief Solution to Knapsack 0/1 problem, without retrieving the objects in -* the solution - * - * @tparam Objects - * @tparam OutputIterator - * @tparam ObjectSizeFunctor - * @tparam ObjectValueFunctor - * @param oBegin given objects - * @param oEnd - * @param size functor that for given object returns its size - * @param value functor that for given object returns its value - */ -template -auto knapsack_0_1_no_output(Objects &&objects, - detail::FunctorOnRangePValue - capacity, // capacity is of size type - ObjectSizeFunctor size, - ObjectValueFunctor value = ObjectValueFunctor{}) { - auto out = boost::make_function_output_iterator(utils::skip_functor{}); - return detail::knapsack_check_integrality( - detail::make_knapsack_data( - std::forward(objects), capacity, size, value, out), - detail::zero_one_tag{}, detail::no_retrieve_solution_tag{}); -} - -} // paal - -#endif // PAAL_KNAPSACK_0_1_HPP diff --git a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack_0_1_fptas.hpp b/patrec/inc/WireCellPatRec/paal/dynamic/knapsack_0_1_fptas.hpp deleted file mode 100644 index 32a2aacd5..000000000 --- a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack_0_1_fptas.hpp +++ /dev/null @@ -1,87 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file knapsack_0_1_fptas.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-10-04 - */ -#ifndef PAAL_KNAPSACK_0_1_FPTAS_HPP -#define PAAL_KNAPSACK_0_1_FPTAS_HPP - -#include "paal/dynamic/knapsack_0_1.hpp" -#include "paal/dynamic/knapsack/knapsack_fptas_common.hpp" - -namespace paal { - -template -typename detail::knapsack_base::return_type -knapsack_0_1_on_value_fptas( - double epsilon, Objects &&objects, - detail::FunctorOnRangePValue - capacity, // capacity is of size type - OutputIterator out, ObjectSizeFunctor size, ObjectValueFunctor value) { - return detail::knapsack_general_on_value_fptas_retrieve( - epsilon, detail::make_knapsack_data(std::forward(objects), - capacity, size, value, out), - detail::zero_one_tag{}); -} - -template -typename detail::knapsack_base::return_type -knapsack_0_1_on_size_fptas( - double epsilon, Objects &&objects, - detail::FunctorOnRangePValue - capacity, // capacity is of size type - OutputIterator out, ObjectSizeFunctor size, ObjectValueFunctor value) { - return detail::knapsack_general_on_size_fptas_retrieve( - epsilon, detail::make_knapsack_data(std::forward(objects), - capacity, size, value, out), - detail::zero_one_tag{}); -} - -template -typename detail::knapsack_base::return_type -knapsack_0_1_no_output_on_value_fptas( - double epsilon, Objects &&objects, - detail::FunctorOnRangePValue - capacity, // capacity is of size type - ObjectSizeFunctor size, ObjectValueFunctor value) { - auto out = boost::make_function_output_iterator(utils::skip_functor()); - return detail::knapsack_general_on_value_fptas( - epsilon, detail::make_knapsack_data(std::forward(objects), - capacity, size, value, out), - detail::zero_one_tag{}, detail::no_retrieve_solution_tag{}); -} - -template -typename detail::knapsack_base::return_type -knapsack_0_1_no_output_on_size_fptas( - double epsilon, Objects &&objects, - detail::FunctorOnRangePValue - capacity, // capacity is of size type - ObjectSizeFunctor size, ObjectValueFunctor value) { - auto out = boost::make_function_output_iterator(utils::skip_functor()); - return detail::knapsack_general_on_size_fptas( - epsilon, detail::make_knapsack_data(std::forward(objects), - capacity, size, value, out), - detail::zero_one_tag{}, detail::no_retrieve_solution_tag{}); -} - -} // paal - -#endif // PAAL_KNAPSACK_0_1_FPTAS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack_unbounded.hpp b/patrec/inc/WireCellPatRec/paal/dynamic/knapsack_unbounded.hpp deleted file mode 100644 index 9597e8c01..000000000 --- a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack_unbounded.hpp +++ /dev/null @@ -1,183 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file knapsack_unbounded.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-09-20 - */ -#ifndef PAAL_KNAPSACK_UNBOUNDED_HPP -#define PAAL_KNAPSACK_UNBOUNDED_HPP - -#include "paal/dynamic/knapsack/fill_knapsack_dynamic_table.hpp" -#include "paal/dynamic/knapsack/get_bound.hpp" -#include "paal/dynamic/knapsack/knapsack_common.hpp" -#include "paal/greedy/knapsack_unbounded_two_app.hpp" -#include "paal/utils/functors.hpp" -#include "paal/utils/knapsack_utils.hpp" -#include "paal/utils/less_pointees.hpp" -#include "paal/utils/type_functions.hpp" -#include "paal/utils/irange.hpp" - -#include -#include - -#include - -namespace paal { - -namespace detail { -/** - * @brief For knapsack dynamic algorithm for given element the table has to be - * traversed from the lowest to highest element - */ -struct knapsack_get_position_range { - template - auto operator()(T begin, T end)->decltype(irange(begin, end)) { - return irange(begin, end); - } -}; - -template -ReturnType knapsack_unbounded_dynamic( - KnapsackData knap_data, - GetBestElement getBest, ValuesComparator compareValues) { - using Value = typename KnapsackData::value; - using Size = typename KnapsackData::size; - using ObjectsIter = typename KnapsackData::object_iter; - using ObjIterWithValueOrNull = - boost::optional>; - std::vector objectOnSize(knap_data.get_capacity() + 1); - - auto compare = [ = ](const ObjIterWithValueOrNull & left, - const ObjIterWithValueOrNull & right) { - return compareValues(left->second, right->second); - }; - - auto objectOnSizeBegin = objectOnSize.begin(); - auto objectOnSizeEnd = objectOnSize.end(); - fill_knapsack_dynamic_table(objectOnSizeBegin, objectOnSizeEnd, knap_data.get_objects(), knap_data.get_size(), - [&](ObjIterWithValueOrNull val, ObjectsIter obj) - ->ObjIterWithValueOrNull{ - return std::make_pair(obj, val->second + knap_data.get_value(*obj)); - }, - compare, [](ObjIterWithValueOrNull & val) { - val = std::make_pair(ObjectsIter{}, Value{}); - }, - detail::knapsack_get_position_range()); - - // getting position of the max value in the objectOnSize array - auto maxPos = getBest(objectOnSizeBegin, objectOnSizeEnd, compare); - - // setting solution - auto remainingSpaceInKnapsack = maxPos; - while (remainingSpaceInKnapsack != objectOnSizeBegin) { - assert(*remainingSpaceInKnapsack); - auto && obj = *((*remainingSpaceInKnapsack)->first); - knap_data.out(obj); - remainingSpaceInKnapsack -= knap_data.get_size(obj); - } - - // returning result - if (maxPos != objectOnSizeEnd) { - assert(*maxPos); - return ReturnType((*maxPos)->second, maxPos - objectOnSizeBegin); - } else { - return ReturnType(Value{}, Size{}); - } -} - -/** - * @brief Solution to the knapsack problem - * - * @tparam OutputIterator - * @param objects given objects - * @param out the result is returned using output iterator - * @param size functor that for given object returns its size - * @param value functor that for given object returns its value - */ -template -ReturnType knapsack(KnapsackData knap_data, - unbounded_tag, integral_value_tag, retrieve_solution_tag) { - using ValueType = typename KnapsackData::value; - using ObjectsIter = typename KnapsackData::object_iter; - using TableElementType = boost::optional>; - - auto && objects = knap_data.get_objects(); - - if (boost::empty(objects)) { - return ReturnType{}; - } - auto maxSize = get_value_bound(knap_data, unbounded_tag{}, upper_tag{}); - auto ret = knapsack_unbounded_dynamic( - detail::make_knapsack_data( - knap_data.get_objects(), maxSize, knap_data.get_value(), knap_data.get_size(), knap_data.get_output_iter()), - get_max_element_on_value_indexed_collection( - TableElementType(std::make_pair(ObjectsIter{}, knap_data.get_capacity() + 1))), - utils::greater{}); - return ReturnType(ret.second, ret.first); -} - -/** - * @brief Solution to the knapsack problem - * - * @tparam OutputIterator - * @param oBegin given objects - * @param oEnd - * @param out the result is returned using output iterator - * @param size functor that for given object returns its size - * @param value functor that for given object returns its value - */ -template -typename KnapsackData::return_type -knapsack(KnapsackData knap_data, - unbounded_tag, integral_size_tag, retrieve_solution_tag) { - using Value = typename KnapsackData::value; - return knapsack_unbounded_dynamic(std::move(knap_data), - detail::get_max_element_on_capacity_indexed_collection(), - utils::less{}); -} - -} // detail - -/** - * @brief Solution to the knapsack problem - * - * @tparam Objects - * @tparam OutputIterator - * @tparam ObjectSizeFunctor - * @tparam ObjectValueFunctor - * @param oBegin given objects - * @param oEnd - * @param out the result is returned using output iterator - * @param size functor that for given object returns its size - * @param value functor that for given object returns its value - */ -template -typename detail::knapsack_base::return_type -knapsack_unbounded(Objects && objects, - detail::FunctorOnRangePValue - capacity, // capacity is of size type - OutputIterator out, ObjectSizeFunctor size, - ObjectValueFunctor value = ObjectValueFunctor()) { - return detail::knapsack_check_integrality(detail::make_knapsack_data(std::forward(objects), capacity, size, - value, out), detail::unbounded_tag{}, - detail::retrieve_solution_tag()); -} - -} // paal - -#endif // PAAL_KNAPSACK_UNBOUNDED_HPP diff --git a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack_unbounded_fptas.hpp b/patrec/inc/WireCellPatRec/paal/dynamic/knapsack_unbounded_fptas.hpp deleted file mode 100644 index bc44dd800..000000000 --- a/patrec/inc/WireCellPatRec/paal/dynamic/knapsack_unbounded_fptas.hpp +++ /dev/null @@ -1,56 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file knapsack_unbounded_fptas.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-10-01 - */ -#ifndef PAAL_KNAPSACK_UNBOUNDED_FPTAS_HPP -#define PAAL_KNAPSACK_UNBOUNDED_FPTAS_HPP - -#include "paal/dynamic/knapsack_unbounded.hpp" -#include "paal/dynamic/knapsack/get_bound.hpp" -#include "paal/dynamic/knapsack/knapsack_fptas_common.hpp" - -#include - -namespace paal { - -template -typename detail::knapsack_base::return_type -knapsack_unbounded_on_value_fptas( - double epsilon, Objects && objects, - detail::FunctorOnRangePValue - capacity, // capacity is of size type - OutputIterator out, ObjectSizeFunctor size, ObjectValueFunctor value) { - return detail::knapsack_general_on_value_fptas_retrieve( - epsilon, detail::make_knapsack_data(std::forward(objects), capacity, size, value, out), - detail::unbounded_tag{}); -} - -template -typename detail::knapsack_base::return_type -knapsack_unbounded_on_size_fptas( - double epsilon, Objects && objects, - detail::FunctorOnRangePValue - capacity, // capacity is of size type - OutputIterator out, ObjectSizeFunctor size, ObjectValueFunctor value) { - return detail::knapsack_general_on_size_fptas_retrieve( - epsilon, detail::make_knapsack_data(std::forward(objects), capacity, size, value, out), - detail::unbounded_tag{}); -} - -} //! paal - -#endif // PAAL_KNAPSACK_UNBOUNDED_FPTAS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/greedy/k_center/k_center.hpp b/patrec/inc/WireCellPatRec/paal/greedy/k_center/k_center.hpp deleted file mode 100644 index 905da2e59..000000000 --- a/patrec/inc/WireCellPatRec/paal/greedy/k_center/k_center.hpp +++ /dev/null @@ -1,75 +0,0 @@ -//======================================================================= -// Copyright (c) 2014 Piotr Smulewicz -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file k_center.hpp - * @brief - * @author Piotr Smulewicz - * @version 1.0 - * @date 2014-01-23 - */ -#ifndef PAAL_K_CENTER_HPP -#define PAAL_K_CENTER_HPP - -#include "paal/data_structures/metric/metric_traits.hpp" -#include "paal/utils/assign_updates.hpp" - -#include - -namespace paal { -namespace greedy { -/** - * @brief this is solve K Center problem - * and return radius - * example: - * \snippet k_center_example.cpp K Center Example - * - * example file is k_center_example.cpp - * @param metric - * @param numberOfClusters - * @param result ItemIterators - * @param iBegin - * @param iEnd - * @tparam array_metric - * @tparam OutputIterator - * @tparam ItemIterator - */ -template -typename data_structures::metric_traits::DistanceType -kCenter(const Metric &metric, unsigned int numberOfClusters, - const ItemIterator iBegin, const ItemIterator iEnd, - OutputIterator result) { - - typedef typename data_structures::metric_traits::DistanceType Dist; - std::vector distance_from_closest_center( - std::distance(iBegin, iEnd), std::numeric_limits::max()); - ItemIterator last_centre = iBegin; - ItemIterator farthest_centre = iBegin; - Dist radius; - assert(numberOfClusters > 0); - do { - *result = *farthest_centre; - ++result; - radius = std::numeric_limits::min(); - auto it = distance_from_closest_center.begin(); - for (auto i = iBegin; i != iEnd; ++i) { - assign_min(*it, metric(*last_centre, *i)); - if (*it > radius) { - farthest_centre = i; - radius = *it; - } - ++it; - } - last_centre = farthest_centre; - } while (--numberOfClusters); - return radius; -} - -} //!greedy -} //!paal - -#endif // PAAL_K_CENTER_HPP diff --git a/patrec/inc/WireCellPatRec/paal/greedy/k_cut/k_cut.hpp b/patrec/inc/WireCellPatRec/paal/greedy/k_cut/k_cut.hpp deleted file mode 100644 index a37e7a280..000000000 --- a/patrec/inc/WireCellPatRec/paal/greedy/k_cut/k_cut.hpp +++ /dev/null @@ -1,207 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Smulewicz -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file k_cut.hpp - * @brief - * @author Piotr Smulewicz, Piotr Godlewski - * @version 1.0 - * @date 2013-09-25 - */ -#ifndef PAAL_K_CUT_HPP -#define PAAL_K_CUT_HPP - -#include "paal/utils/functors.hpp" -#include "paal/utils/type_functions.hpp" -#include "paal/utils/irange.hpp" - -#include -#include -#include -#include -#include -#include -#include - -#include - -namespace paal { -namespace greedy { - -/** - * @brief this is solve k_cut problem - * and return cut_cost - * example: - * \snippet k_cut_example.cpp K Cut Example - * - * example file is k_cut_example.cpp - * @param graph - * @param number_of_parts - * @param result pairs of vertex_descriptor and number form (1,2, -* ... ,k) id of part - * @param index_map - * @param weight_map - * @tparam InGraph - * @tparam OutputIterator - * @tparam VertexIndexMap - * @tparam EdgeWeightMap - */ -template -auto k_cut(const InGraph& graph, unsigned int number_of_parts,OutputIterator result, - VertexIndexMap index_map, EdgeWeightMap weight_map) -> - typename boost::property_traits::value_type{ - using cost_t = typename boost::property_traits::value_type; - using Vertex = typename boost::graph_traits::vertex_descriptor; - - using Graph = boost::adjacency_list< - boost::vecS, boost::vecS, boost::undirectedS, boost::no_property, - boost::property>>; - - assert(num_vertices(graph) >= number_of_parts); - - std::vector vertex_to_part(num_vertices(graph)); - using VertexIndexToVertex = typename std::vector; - using VertexIndexToVertexIndex = std::vector; - VertexIndexToVertex vertex_in_subgraph_to_vertex(num_vertices(graph)); - VertexIndexToVertexIndex vertex_to_vertex_in_subgraph(num_vertices(graph)); - int vertex_in_part; - int parts = 1; - // cuts contain pair(x,y) - // x is the cost of the cut - // y and y+1 are index parts of graph after make a cut - std::priority_queue< - std::pair, - std::vector > - ,utils::greater> cuts; - - int id_part = 0; - - //get part id and compute minimum cost of cut of that part and add it to queue - auto make_cut = [&](int id) { - vertex_in_part=0; - for (auto v: boost::as_array(vertices(graph))) { - if (vertex_to_part[get(index_map, v)] == id) { - vertex_in_subgraph_to_vertex[vertex_in_part] = v; - vertex_to_vertex_in_subgraph[get(index_map, v)] = vertex_in_part; - ++vertex_in_part; - } - } - Graph part(vertex_in_part); - for (auto edge : boost::as_array(edges(graph))) { - auto sour = get(index_map, source(edge,graph)); - auto targ = get(index_map, target(edge,graph)); - if (vertex_to_part[sour] == id && - vertex_to_part[targ] == id && - sour != targ) { - add_edge(vertex_to_vertex_in_subgraph[sour], - vertex_to_vertex_in_subgraph[targ], - get(weight_map, edge), - part); - } - } - if (vertex_in_part < 2) { - ++id_part; - *result = std::make_pair(vertex_in_subgraph_to_vertex[0], id_part); - ++result; - return; - } - auto parities = boost::make_one_bit_color_map(num_vertices(part), - get(boost::vertex_index, part)); - auto cut_cost = boost::stoer_wagner_min_cut(part, - get(boost::edge_weight, part), - boost::parity_map(parities)); - - for (auto i : irange(num_vertices(part))) { - vertex_to_part[get(index_map, vertex_in_subgraph_to_vertex[i])] = - parts + get(parities, i); //return value convertable to 0/1 - } - cuts.push(std::make_pair(cut_cost, parts)); - parts += 2; - }; - - make_cut(0); - cost_t k_cut_cost = cost_t(); - while (--number_of_parts) { - auto cut = cuts.top(); - cuts.pop(); - k_cut_cost += cut.first; - make_cut(cut.second); - make_cut(cut.second + 1); - } - - while (!cuts.empty()) { - auto cut = cuts.top(); - cuts.pop(); - ++id_part; - for (auto v: boost::as_array(vertices(graph))) { - if (vertex_to_part[get(index_map, v)] == cut.second || - vertex_to_part[get(index_map, v)] == cut.second + 1) { - *result = std::make_pair(v, id_part); - ++result; - } - } - } - return k_cut_cost; -} - -/** - * @brief this is solve k_cut problem - * and return cut_cost - * example: - * \snippet k_cut_example.cpp K Cut Example - * - * example file is k_cut_example.cpp - * @param graph - * @param number_of_parts - * @param result pairs of vertex_descriptor and number form (1,2, ... ,k) id of part - * @param params - * @tparam InGraph - * @tparam OutputIterator - * @tparam T - * @tparam P - * @tparam R - */ -template -auto k_cut(const InGraph& graph, unsigned int number_of_parts, - OutputIterator result, const boost::bgl_named_params& params) -> - typename boost::property_traits< - puretype(boost::choose_const_pmap(get_param(params, boost::edge_weight), graph, boost::edge_weight)) - >::value_type { - return k_cut(graph, number_of_parts, result, - boost::choose_const_pmap(get_param(params, boost::vertex_index), graph,boost::vertex_index), - boost::choose_const_pmap(get_param(params, boost::edge_weight), graph,boost::edge_weight) - ); -} - -/** - * @brief this is solve k_cut problem - * and return cut_cost - * example: - * \snippet k_cut_example.cpp K Cut Example - * - * example file is k_cut_example.cpp - * @param graph - * @param number_of_parts - * @param result pairs of vertex_descriptor and number form (1,2, ... ,k) id of part - * @tparam InGraph - * @tparam OutputIterator - */ -template -auto k_cut(const InGraph& graph, unsigned int number_of_parts, OutputIterator result) -> - typename boost::property_traits::value_type{ - return k_cut(graph, number_of_parts, result, boost::no_named_parameters()); -} - -} //!greedy -} //!paal - -#endif // PAAL_K_CUT_HPP diff --git a/patrec/inc/WireCellPatRec/paal/greedy/knapsack/knapsack_greedy.hpp b/patrec/inc/WireCellPatRec/paal/greedy/knapsack/knapsack_greedy.hpp deleted file mode 100644 index 4f25de5b1..000000000 --- a/patrec/inc/WireCellPatRec/paal/greedy/knapsack/knapsack_greedy.hpp +++ /dev/null @@ -1,118 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= - -/** - * @file knapsack_greedy.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-10-07 - */ - -#ifndef PAAL_KNAPSACK_GREEDY_HPP -#define PAAL_KNAPSACK_GREEDY_HPP - -#include "paal/utils/accumulate_functors.hpp" -#include "paal/utils/knapsack_utils.hpp" - -#include -#include - -namespace paal { -namespace detail { - -// if the knapsack dynamic table is indexed by values, -// the procedure to find the best element is to find the biggest index i in the -// table that -// *i is smaller than given threshold(capacity) -template -struct get_max_element_on_value_indexed_collection { - get_max_element_on_value_indexed_collection(MaxValueType maxValue) - : m_max_value(maxValue) {} - - template - Iterator operator()(Iterator begin, Iterator end, Comparator compare) { - auto compareOpt = make_less_pointees_t(compare); - // traverse in reverse order, skip the first - for (auto iter = end - 1; iter != begin; --iter) { - if (*iter && compareOpt(m_max_value, *iter)) { - return iter; - } - } - - return end; - } - - private: - MaxValueType m_max_value; -}; - -// if the knapsack dynamic table is indexed by sizes, -// the procedure to find the best element is to find the biggest -// index i in the table that maximizes *i -template -struct get_max_element_on_capacity_indexed_collection { - template - Iterator operator()(Iterator begin, Iterator end, Comparator compare) { - return std::max_element(begin, end, make_less_pointees_t(compare)); - } -}; - -template -typename KnapsackData::return_type knapsack_general_two_app( - KnapsackData knapsack_data, Is_0_1_Tag is_0_1_Tag) { - - using ObjectRef = typename KnapsackData::object_ref; - - static_assert(std::is_arithmetic::value && - std::is_arithmetic::value, - "Size type and Value type must be arithmetic types"); - auto capacity = knapsack_data.get_capacity(); - - auto bad_size = [=](ObjectRef o){return knapsack_data.get_size(o) > capacity;}; - - auto objects = boost::remove_if(knapsack_data.get_objects(), bad_size); - - if (boost::empty(objects)) { - return std::pair(); - } - - // finding the element with the greatest density - auto greedyFill = get_greedy_fill( - make_knapsack_data( - objects, capacity, - knapsack_data.get_size(), - knapsack_data.get_value(), - knapsack_data.get_output_iter()), is_0_1_Tag); - - // finding the biggest set elements with the greatest density - // this is actually small optimization compare to original algorithm - // note that largest is transformed iterator! - auto largest = max_element_functor(objects, knapsack_data.get_value()); - - if (*largest > std::get<0>(greedyFill)) { - knapsack_data.out(*largest.base()); - return std::make_pair(*largest, knapsack_data.get_size(*largest.base())); - } else { - greedy_to_output(std::get<2>(greedyFill), knapsack_data.get_output_iter(), is_0_1_Tag); - return std::make_pair(std::get<0>(greedyFill), std::get<1>(greedyFill)); - } -} - -template -struct is_range_const { - using ref = typename boost::range_reference::type; - static const bool value = std::is_const::value || - !std::is_reference::value; -}; -} //! detail -} //! paal -#endif // PAAL_KNAPSACK_GREEDY_HPP diff --git a/patrec/inc/WireCellPatRec/paal/greedy/knapsack_0_1_two_app.hpp b/patrec/inc/WireCellPatRec/paal/greedy/knapsack_0_1_two_app.hpp deleted file mode 100644 index b2db54f99..000000000 --- a/patrec/inc/WireCellPatRec/paal/greedy/knapsack_0_1_two_app.hpp +++ /dev/null @@ -1,93 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file knapsack_0_1_two_app.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-10-07 - */ -#ifndef PAAL_KNAPSACK_0_1_TWO_APP_HPP -#define PAAL_KNAPSACK_0_1_TWO_APP_HPP - -#include "paal/utils/knapsack_utils.hpp" -#include "paal/utils/functors.hpp" -#include "paal/utils/type_functions.hpp" -#include "paal/greedy/knapsack/knapsack_greedy.hpp" - -#include -#include -#include -#include - -#include -#include - -namespace paal { - -namespace detail { -template -std::tuple> -get_greedy_fill(KnapsackData knap_data, zero_one_tag) { - - auto density = knap_data.get_density(); - auto compare = utils::make_functor_to_comparator(density, utils::greater{}); - // objects must be lvalue because we return a subrange of this range - auto &objects = knap_data.get_objects(); - - // finding the biggest set elements with the greatest density - boost::sort(objects, compare); - - Value valueSum{}; - Size sizeSum{}; - auto range = boost::find_if( - objects, [ =, &sizeSum, &valueSum](ObjectRef obj) { - auto newSize = sizeSum + knap_data.get_size(obj); - if (newSize > knap_data.get_capacity()) { - return true; - } - sizeSum = newSize; - valueSum += knap_data.get_value(obj); - return false; - }); - return std::make_tuple(valueSum, sizeSum, range); -} - -template -void greedy_to_output(ObjectsRange range, OutputIter & out, zero_one_tag) { - for (auto obj : range) { - *out = obj; - ++out; - } -} - -} //! detail - -/// this version of algorithm might permute, the input range -template ::value>::type * = nullptr> -typename detail::knapsack_base::return_type -knapsack_0_1_two_app( - Objects &&objects, - typename detail::FunctorOnRangePValue capacity, - OutputIterator out, ObjectValueFunctor value, ObjectSizeFunctor size) { - return detail::knapsack_general_two_app( - detail::make_knapsack_data(std::forward(objects), capacity, - size, value, out), - detail::zero_one_tag()); -} -} //! paal -#endif // PAAL_KNAPSACK_0_1_TWO_APP_HPP diff --git a/patrec/inc/WireCellPatRec/paal/greedy/knapsack_unbounded_two_app.hpp b/patrec/inc/WireCellPatRec/paal/greedy/knapsack_unbounded_two_app.hpp deleted file mode 100644 index de1762591..000000000 --- a/patrec/inc/WireCellPatRec/paal/greedy/knapsack_unbounded_two_app.hpp +++ /dev/null @@ -1,80 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file knapsack_unbounded_two_app.hpp - * @brief - * @author Piotr Wygocki - * @version 1.0 - * @date 2013-10-07 - */ -#ifndef PAAL_KNAPSACK_UNBOUNDED_TWO_APP_HPP -#define PAAL_KNAPSACK_UNBOUNDED_TWO_APP_HPP - -#include "paal/utils/accumulate_functors.hpp" -#include "paal/utils/type_functions.hpp" -#include "paal/greedy/knapsack/knapsack_greedy.hpp" - -#include -#include - -#include -#include - -namespace paal { - -namespace detail { -template -std::tuple> -get_greedy_fill(KnapsackData knap_data, unbounded_tag) { - - auto density = knap_data.get_density(); - auto most_dense_iter = max_element_functor( - knap_data.get_objects(), density).base(); - - unsigned nr = knap_data.get_capacity() / knap_data.get_size(*most_dense_iter); - Value value_sum = Value(nr) * knap_data.get_value(*most_dense_iter); - Size size_sum = Size (nr) * knap_data.get_size (*most_dense_iter); - return std::make_tuple(value_sum, size_sum, - std::make_pair(most_dense_iter, nr)); -} - -template -void greedy_to_output(ObjectsIterAndNr most_dense_iter_and_nr, OutputIter & out, - unbounded_tag) { - auto nr = most_dense_iter_and_nr.second; - auto most_dense_iter = most_dense_iter_and_nr.first; - for (unsigned i = 0; i < nr; ++i) { - *out = *most_dense_iter; - ++out; - } -} - -} //! detail - -///this version of algorithm might permute, the input range -template ::value>::type * = nullptr> - -typename detail::knapsack_base::return_type -knapsack_unbounded_two_app( - Objects && objects, - typename detail::FunctorOnRangePValue capacity, - OutputIterator out, ObjectValueFunctor value, ObjectSizeFunctor size) { - return detail::knapsack_general_two_app( - detail::make_knapsack_data(std::forward(objects), capacity, size, value, out), - detail::unbounded_tag{}); -} -} //! paal -#endif // PAAL_KNAPSACK_UNBOUNDED_TWO_APP_HPP diff --git a/patrec/inc/WireCellPatRec/paal/greedy/scheduling_jobs/scheduling_jobs.hpp b/patrec/inc/WireCellPatRec/paal/greedy/scheduling_jobs/scheduling_jobs.hpp deleted file mode 100644 index 2c92f79ca..000000000 --- a/patrec/inc/WireCellPatRec/paal/greedy/scheduling_jobs/scheduling_jobs.hpp +++ /dev/null @@ -1,264 +0,0 @@ -//======================================================================= -// Copyright (c) -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file scheduling_jobs.hpp - * @brief - * @author Robert Rosolek - * @version 1.0 - * @date 2013-11-19 - */ -#ifndef PAAL_SCHEDULING_JOBS_HPP -#define PAAL_SCHEDULING_JOBS_HPP - -#define BOOST_RESULT_OF_USE_DECLTYPE - -#include "paal/data_structures/fraction.hpp" -#include "paal/utils/functors.hpp" -#include "paal/utils/type_functions.hpp" -#include "paal/utils/irange.hpp" -#include "paal/utils/assign_updates.hpp" - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -namespace paal { -namespace greedy { - -namespace detail { - -template -struct sched_traits { - typedef typename std::iterator_traits::reference - machine_reference; - typedef typename std::iterator_traits::reference job_reference; - typedef pure_result_of_t speed_t; - typedef pure_result_of_t load_t; - typedef data_structures::fraction frac_t; -}; - -template > -typename Traits::frac_t calculate_bound(const MachineIterator mfirst, - const MachineIterator mlast, - const JobIterator jfirst, - const JobIterator jlast, - GetSpeed get_speed, GetLoad get_load) { - typedef typename Traits::speed_t Speed; - typedef typename Traits::load_t Load; - typedef typename Traits::frac_t Frac; - - auto jobs_num = jlast - jfirst; - auto machines_num = mlast - mfirst; - - std::vector speed_sum(machines_num); - std::transform(mfirst, mlast, speed_sum.begin(), get_speed); - boost::partial_sum(speed_sum, speed_sum.begin()); - - std::vector load_sum(jobs_num); - std::transform(jfirst, jlast, load_sum.begin(), get_load); - boost::partial_sum(load_sum, load_sum.begin()); - - typedef decltype(machines_num) MachinesNumType; - assert(jobs_num > 0 && machines_num > 0); - Frac result(get_load(*jfirst), get_speed(*mfirst)); - for (auto jobID : irange(jobs_num)) { - Load load = get_load(jfirst[jobID]); - auto get_single = [ = ](MachinesNumType i) { - return Frac(load, get_speed(mfirst[i])); - }; - auto get_summed = [&](MachinesNumType i) { - return Frac(load_sum[jobID], speed_sum[i]); - }; - auto condition = [ = ](MachinesNumType i) { - return get_summed(i) >= get_single(i); - }; - auto machines_ids = boost::counting_range( - static_cast(0), machines_num); - // current range based version in boost is broken - // should be replaced when released - // https://github.com/boostorg/algorithm/pull/4 - auto it = std::partition_point(machines_ids.begin(), machines_ids.end(), - condition); - MachinesNumType machineID = - (it != machines_ids.end()) ? *it : machines_num - 1; - auto getMax = [ = ](MachinesNumType i) { - return std::max(get_single(i), get_summed(i)); - }; - - Frac candidate = getMax(machineID); - if (machineID != 0) { - assign_min(candidate, getMax(machineID - 1)); - } - assign_max(result, candidate); - } - return result; -} - -template -void schedule(MachineIterator mfirst, MachineIterator mlast, JobIterator jfirst, - JobIterator jlast, OutputIterator result, GetSpeed get_speed, - GetLoad get_load, RoundFun round) { - typedef sched_traits - Traits; - typedef typename Traits::speed_t Speed; - typedef typename Traits::load_t Load; - - if (mfirst == mlast || jfirst == jlast) { - return; - } - - std::vector machines; - boost::copy(boost::counting_range(mfirst, mlast), - std::back_inserter(machines)); - auto get_speed_from_iterator = utils::make_lift_iterator_functor(get_speed); - boost::sort(machines, utils::make_functor_to_comparator( - get_speed_from_iterator, utils::greater{})); - - std::vector jobs; - boost::copy(boost::counting_range(jfirst, jlast), std::back_inserter(jobs)); - auto get_load_from_iterator = utils::make_lift_iterator_functor(get_load); - boost::sort(jobs, utils::make_functor_to_comparator(get_load_from_iterator, - utils::greater{})); - - auto bound = detail::calculate_bound( - machines.begin(), machines.end(), jobs.begin(), jobs.end(), - get_speed_from_iterator, get_load_from_iterator); - Load bound_load = bound.num; - Speed bound_speed = bound.den; - Load current_load{}; - auto emit = [&result](MachineIterator miter, JobIterator jiter) { - *result = std::make_pair(miter, jiter); - ++result; - }; - auto job_iter = jobs.begin(); - for (auto machine_iter = machines.begin(); machine_iter != machines.end(); - ++machine_iter) { - auto &&machine = *(*machine_iter); - Speed speed = get_speed(machine); - while (job_iter != jobs.end()) { - auto &&job = *(*job_iter); - Load job_load = get_load(job) * bound_speed, - new_load = current_load + job_load; - assert(new_load <= bound_load * (2 * speed)); - if (bound_load * speed < new_load) { - Load frac_load = bound_load * speed - current_load; - if (round(frac_load, job_load)) { - emit(*machine_iter, *job_iter); - } else { - auto next_machine_iter = std::next(machine_iter); - assert(next_machine_iter != machines.end()); - emit(*next_machine_iter, *job_iter); - } - ++job_iter; - current_load = job_load - frac_load; - break; - } - emit(*machine_iter, *job_iter); - ++job_iter; - current_load = new_load; - } - } - assert(job_iter == jobs.end()); -} -} //!detail - -/* - * @brief This is deterministic solve scheduling jobs on machines with different - * speeds problem and return schedule - * - * Example: - * \snippet scheduling_jobs_example.cpp Scheduling Jobs Example - * - * example file is scheduling_jobs_example.cpp - * - * @param mfirst - * @param mlast - * @param jfirst - * @param jlast - * @param result - * @param get_speed - * @param get_load - * @tparam MachineIterator - * @tparam JobIterator - * @tparam OutputIterator - * @tparam GetSpeed - * @tparam GetLoad - */ -template -void schedule_deterministic(const MachineIterator mfirst, - const MachineIterator mlast, - const JobIterator jfirst, const JobIterator jlast, - OutputIterator result, GetSpeed get_speed, - GetLoad get_load) { - detail::schedule(mfirst, mlast, jfirst, jlast, result, get_speed, get_load, - utils::always_true{}); -} - -/* - * @brief This is randomized solve scheduling jobs on machines with different - * speeds problem and return schedule. - * - * Example: - * \snippet scheduling_jobs_example.cpp Scheduling Jobs Example - * - * example file is scheduling_jobs_example.cpp - * - * @param mfirst - * @param mlast - * @param jfirst - * @param jlast - * @param result - * @param get_speed - * @param get_load - * @param gen - * @tparam MachineIterator - * @tparam JobIterator - * @tparam OutputIterator - * @tparam GetSpeed - * @tparam GetLoad - * @tparam RandomNumberGenerator - */ -template -void schedule_randomized(const MachineIterator mfirst, - const MachineIterator mlast, const JobIterator jfirst, - const JobIterator jlast, OutputIterator result, - GetSpeed get_speed, GetLoad get_load, - RandomNumberGenerator &&gen = - std::default_random_engine(97345631u)) { - typedef typename detail::sched_traits Traits; - double alpha = std::uniform_real_distribution()(gen); - auto round = [alpha](typename Traits::load_t fractional_load, - typename Traits::load_t total_load) { - return total_load * alpha < fractional_load; - }; - detail::schedule(mfirst, mlast, jfirst, jlast, result, get_speed, get_load, - round); -} - -} //!greedy -} //!paal - -#endif // PAAL_SCHEDULING_JOBS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/greedy/scheduling_jobs_on_identical_parallel_machines/scheduling_jobs_on_identical_parallel_machines.hpp b/patrec/inc/WireCellPatRec/paal/greedy/scheduling_jobs_on_identical_parallel_machines/scheduling_jobs_on_identical_parallel_machines.hpp deleted file mode 100644 index ccbfc7028..000000000 --- a/patrec/inc/WireCellPatRec/paal/greedy/scheduling_jobs_on_identical_parallel_machines/scheduling_jobs_on_identical_parallel_machines.hpp +++ /dev/null @@ -1,88 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Smulewicz -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file scheduling_jobs_on_identical_parallel_machines.hpp - * @brief - * @author Piotr Smulewicz - * @version 1.0 - * @date 2013-09-06 - */ -#ifndef PAAL_SCHEDULING_JOBS_ON_IDENTICAL_PARALLEL_MACHINES_HPP -#define PAAL_SCHEDULING_JOBS_ON_IDENTICAL_PARALLEL_MACHINES_HPP - -#include "paal/utils/functors.hpp" -#include "paal/utils/type_functions.hpp" -#include "paal/utils/irange.hpp" - -#include -#include -#include -#include - - -namespace paal { -namespace greedy { -namespace detail { -class compare { - public: - compare(std::vector &load) : m_load(load) {} - bool operator()(int lhs, int rhs) const { - return m_load[lhs] < m_load[rhs]; - } - - private: - const std::vector &m_load; -}; -} //!detail - -/** - * @brief this is solve scheduling jobs on identical parallel machines problem - * and return schedule - * example: - * \snippet scheduling_jobs_on_identical_parallel_machines_example.cpp Scheduling Jobs On Identical Parallel Machines Example - * - * example file is - * scheduling_jobs_on_identical_parallel_machines_example.cpp - * @param n_machines - * @param first - * @param last - * @param result - * @param get_time - */ -template -void scheduling_jobs_on_identical_parallel_machines(int n_machines, - InputIterator first, - InputIterator last, - OutputIterator result, - GetTime get_time) { - using JobReference = - typename std::iterator_traits::reference; - using Time = pure_result_of_t; - - std::sort(first, last, utils::greater()); - std::vector load(n_machines); - - std::priority_queue, detail::compare> machines(load); - - for (auto machine_id : irange(n_machines)) { - machines.push(machine_id); - } - for (auto job_iter = first; job_iter < last; job_iter++) { - int least_loaded_machine = machines.top(); - machines.pop(); - load[least_loaded_machine] -= get_time(*job_iter); - machines.push(least_loaded_machine); - *result = std::make_pair(least_loaded_machine, job_iter); - ++result; - } -} - -} //!greedy -} //!paal - -#endif // PAAL_SCHEDULING_JOBS_ON_IDENTICAL_PARALLEL_MACHINES_HPP diff --git a/patrec/inc/WireCellPatRec/paal/greedy/scheduling_jobs_with_deadlines_on_a_single_machine/scheduling_jobs_with_deadlines_on_a_single_machine.hpp b/patrec/inc/WireCellPatRec/paal/greedy/scheduling_jobs_with_deadlines_on_a_single_machine/scheduling_jobs_with_deadlines_on_a_single_machine.hpp deleted file mode 100644 index 56d46528b..000000000 --- a/patrec/inc/WireCellPatRec/paal/greedy/scheduling_jobs_with_deadlines_on_a_single_machine/scheduling_jobs_with_deadlines_on_a_single_machine.hpp +++ /dev/null @@ -1,103 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Smulewicz -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file scheduling_jobs_with_deadlines_on_a_single_machine.hpp - * @brief - * @author Piotr Smulewicz - * @version 1.0 - * @date 2013-09-09 - */ -#ifndef PAAL_SCHEDULING_JOBS_WITH_DEADLINES_ON_A_SINGLE_MACHINE_HPP -#define PAAL_SCHEDULING_JOBS_WITH_DEADLINES_ON_A_SINGLE_MACHINE_HPP - -#include "paal/utils/functors.hpp" -#include "paal/utils/type_functions.hpp" -#include "paal/utils/assign_updates.hpp" - -#include -#include - -#include -#include -#include -#include - -namespace paal { -namespace greedy { - -/** - * @brief solve scheduling jobs on identical parallel machines problem - * and fill start time of all jobs - * example: - * \snippet scheduling_jobs_with_deadlines_on_a_single_machine_example.cpp Scheduling Jobs On Single Machine Example - * example file is - * scheduling_jobs_with_deadlines_on_a_single_machine_example.cpp - * @param first - jobs begin - * @param last - jobs end - * @param get_time - * @param get_release_date - * @param get_due_date - * @param result - * @tparam Time - * @tparam InputIterator - * @tparam OutputIterator - * @tparam GetTime - * @tparam GetDueDate - * @tparam GetReleaseDate - */ -template -auto scheduling_jobs_with_deadlines_on_a_single_machine( - const InputIterator first, const InputIterator last, GetTime get_time, - GetReleaseDate get_release_date, GetDueDate get_due_date, - OutputIterator result) { - using Time = puretype(get_time(*first)); - std::vector jobs; - std::copy(boost::make_counting_iterator(first), - boost::make_counting_iterator(last), std::back_inserter(jobs)); - - auto get_due_date_from_iterator = - utils::make_lift_iterator_functor(get_due_date); - auto due_date_compatator = utils::make_functor_to_comparator( - get_due_date_from_iterator, utils::greater{}); - using QueueType = std::priority_queue< - InputIterator, std::vector, decltype(due_date_compatator)>; - QueueType active_jobs_iters(due_date_compatator); - - auto get_release_date_from_iterator = - utils::make_lift_iterator_functor(get_release_date); - boost::sort(jobs, - utils::make_functor_to_comparator(get_release_date_from_iterator)); - Time start_idle = Time(); - Time longest_delay = Time(); - auto do_job = [&]() { - auto job_iter = active_jobs_iters.top(); - active_jobs_iters.pop(); - Time start_time = std::max(start_idle, get_release_date(*job_iter)); - start_idle = start_time + get_time(*job_iter); - assign_max(longest_delay, start_idle - get_due_date(*job_iter)); - *result = std::make_pair(job_iter, start_time); - ++result; - }; - for (auto job_iter : jobs) { - while (!active_jobs_iters.empty() && - get_release_date(*job_iter) > start_idle) - do_job(); - active_jobs_iters.push(job_iter); - } - while (!active_jobs_iters.empty()) { - do_job(); - } - - return longest_delay; -} - -} //!greedy -} //!paal - -#endif // PAAL_SCHEDULING_JOBS_WITH_DEADLINES_ON_A_SINGLE_MACHINE_HPP diff --git a/patrec/inc/WireCellPatRec/paal/greedy/set_cover/budgeted_maximum_coverage.hpp b/patrec/inc/WireCellPatRec/paal/greedy/set_cover/budgeted_maximum_coverage.hpp deleted file mode 100644 index 609827abc..000000000 --- a/patrec/inc/WireCellPatRec/paal/greedy/set_cover/budgeted_maximum_coverage.hpp +++ /dev/null @@ -1,434 +0,0 @@ -/** - * @file budgeted_maximum_coverage.hpp - * @brief - * @author Piotr Smulewicz - * @version 1.0 - * @date 2014-03-18 - */ -#ifndef PAAL_BUDGETED_MAXIMUM_COVERAGE_HPP -#define PAAL_BUDGETED_MAXIMUM_COVERAGE_HPP - -#include "paal/data_structures/fraction.hpp" -#include "paal/utils/accumulate_functors.hpp" -#include "paal/utils/algorithms/subset_backtrack.hpp" -#include "paal/utils/functors.hpp" -#include "paal/utils/type_functions.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -namespace paal { -namespace greedy { -namespace detail { - -template struct set_data_type { - set_data_type() - : m_weight_of_uncovered_elements{}, m_cost{}, m_is_processed{ true } {} - ElementWeight m_weight_of_uncovered_elements; // sum of weight of uncovered - // elements in backtrack and - // greedy - SetCost m_cost; - bool m_is_processed; - // set is processed if is selected or is not selected and at least one of - // the following situations occurs - // -we have not enough budget to select set - // -sum of weight of uncovered elements belong to set equal 0 -}; - -template -using element_weight_t = pure_result_of_t>)>; - -const int UNCOVERED = -1; -template -class selector { - const Budget m_budget; - SetCost &m_cost_of_solution; - std::vector m_selected_sets; - SetIdToData &m_sets_data; - const ElementWeight &m_weight_of_bests_solution; - ElementWeight &m_weight_of_covered_elements; - SetIdToElements m_set_id_to_elements; - std::vector &m_covered_by; - std::vector> &m_sets_covering_element; - ElementIndex &m_get_el_index; - GetWeightOfElement &m_element_to_weight; - DecreseWeight m_decrese_weight; - - using SetData = range_to_elem_t; - - public: - selector(const Budget budget, SetCost &cost_of_solution, - SetIdToData &sets_data, - const ElementWeight &weight_of_bests_solution, - ElementWeight &weight_of_covered_elements, - SetIdToElements set_id_to_elements, std::vector &covered_by, - std::vector> &sets_covering_element, - ElementIndex &get_el_index, GetWeightOfElement &element_to_weight, - DecreseWeight decrese_weight) - : m_budget(budget), m_cost_of_solution(cost_of_solution), - m_sets_data(sets_data), - m_weight_of_bests_solution(weight_of_bests_solution), - m_weight_of_covered_elements(weight_of_covered_elements), - m_set_id_to_elements(set_id_to_elements), m_covered_by(covered_by), - m_sets_covering_element(sets_covering_element), - m_get_el_index(get_el_index), m_element_to_weight(element_to_weight), - m_decrese_weight(decrese_weight) {} - - // we return true if (we select set) or (set is already in solution) - bool select_set_backtrack(int selected_set_id, bool in_reset = false) { - if(!can_select(m_sets_data[selected_set_id])) return false; - - select_set(selected_set_id, true); - if(!in_reset) m_selected_sets.push_back(selected_set_id); - return true; - } - - // we return true if we violated budget - bool select_set_greedy(int selected_set_id) { - auto &select_set_data = m_sets_data[selected_set_id]; - - if(!can_select(select_set_data)) return false; - m_selected_sets.push_back(selected_set_id); - - if(greedy_prune(select_set_data)) return true; - select_set(selected_set_id, false); - return false; - } - - - void deselect_set(int selected_set_id, bool backtrack = true) { - // we deselect set - - m_cost_of_solution -= m_sets_data[selected_set_id].m_cost; - for (auto element : m_set_id_to_elements(selected_set_id)) { - if (covered_by(element) == selected_set_id) { - m_covered_by[m_get_el_index(element)] = UNCOVERED; - auto weight_of_element = m_element_to_weight(element); - cover_element(element, -weight_of_element, backtrack, false); - } - } - m_selected_sets.pop_back(); - } - - void set_unprocessed( - boost::iterator_range::const_iterator> const & set_ids) { - for (auto set_id : set_ids) { - m_sets_data[set_id].m_is_processed = false; - } - }; - - void reset() { - set_unprocessed(m_selected_sets); - if (m_selected_sets.size() > 0) { - boost::for_each(m_selected_sets, [&](int selected_set_id) { - select_set_backtrack(selected_set_id, true); - }); - } - } - - std::size_t size() { - return m_selected_sets.size(); - } - - void resize(std::size_t size) { - return m_selected_sets.resize(size); - } - - template - void copy_to(OutputIterator out){ - boost::copy(m_selected_sets, out); - } -private: - - void select_set(int selected_set_id, bool backtrack) { - auto &select_set_data = m_sets_data[selected_set_id]; - - m_cost_of_solution += select_set_data.m_cost; - for (auto element : m_set_id_to_elements(selected_set_id)) { - if (covered_by(element) == UNCOVERED) { /* we do not cover the - elements being covered*/ - - m_covered_by[m_get_el_index(element)] = selected_set_id; - auto weight_of_element = m_element_to_weight(element); - cover_element(element, weight_of_element, backtrack); - } - } - } - - /// optimization: - /// in greedy phase we get sets in decreasing density order, so we can cut - /// when spend rest of budget with current density product solution, worse - /// then best found solution. - bool greedy_prune(const SetData & set_data) { - return (m_budget - m_cost_of_solution) * set_data.m_weight_of_uncovered_elements <= - static_cast(set_data.m_cost * (m_weight_of_bests_solution - m_weight_of_covered_elements)); - } - - ///this function is ALWAYS called from select_set, thats why we set set_data.m_is_processed! - bool can_select(SetData & set_data) { - if (set_data.m_is_processed) return false; - set_data.m_is_processed = true; - return static_cast(m_cost_of_solution + set_data.m_cost) <= m_budget && - set_data.m_weight_of_uncovered_elements!= ElementWeight{}; - } - - template - void cover_element(Element && el, ElementWeight weight_diff, bool backtrack, bool select = true) { - m_weight_of_covered_elements += weight_diff; - for (auto set_id : - m_sets_covering_element[m_get_el_index(el)]) { - if (m_sets_data[set_id].m_weight_of_uncovered_elements > - weight_diff || backtrack) { - m_sets_data[set_id].m_weight_of_uncovered_elements -= weight_diff; - if (!backtrack && !m_sets_data[set_id].m_is_processed) { - m_decrese_weight(set_id); - } - } else { - if (select) { - m_sets_data[set_id].m_is_processed = true; - } - } - } - } - - template - auto covered_by(Element && el) const - -> decltype(m_covered_by[m_get_el_index(el)]) { - - return m_covered_by[m_get_el_index(el)]; - } -}; - -template -auto make_selector(const Budget budget, SetCost &cost_of_solution, - SetIdToData &sets_data, - const ElementWeight &weight_of_bests_solution, - ElementWeight &weight_of_covered_elements, - SetIdToElements set_id_to_elements, - std::vector &covered_by, - std::vector> &sets_covering_element, - ElementIndex &get_el_index, - GetWeightOfElement &element_to_weight, - DecreseWeight decrese_weight) { - return selector( - budget, cost_of_solution, sets_data, - weight_of_bests_solution, weight_of_covered_elements, - set_id_to_elements, covered_by, sets_covering_element, get_el_index, - element_to_weight, decrese_weight); -} -} //!detail - -/** - * @brief this is solve Set Cover problem - * and return set cover cost - * example: - * \snippet set_cover_example.cpp Set Cover Example - * - * complete example is set_cover_example.cpp - * @param sets - * @param set_to_cost - * @param set_to_elements - * @param result set iterators of chosen sets - * @param get_el_index - * @param budget - * @param element_to_weight - * @param initial_set_size - * @tparam SetRange - * @tparam GetCostOfSet - * @tparam GetElementsOfSet - * @tparam OutputIterator - * @tparam ElementIndex - * @tparam Budget - * @tparam GetWeightOfElement - */ -template -auto budgeted_maximum_coverage( - SetRange && sets, GetCostOfSet set_to_cost, - GetElementsOfSet set_to_elements, OutputIterator result, - ElementIndex get_el_index, Budget budget, - GetWeightOfElement element_to_weight = GetWeightOfElement(), - const unsigned int initial_set_size = 3) { - - using set_reference = typename boost::range_reference::type; - using element_weight = typename detail::element_weight_t< - set_reference, GetElementsOfSet, GetWeightOfElement>; - using set_cost = pure_result_of_t; - using set_id_to_data = std::vector>; - - auto nu_sets = boost::distance(sets); - set_id_to_data initial_sets_data(nu_sets), sets_data(nu_sets); - set_cost cost_of_solution{}, cost_of_best_solution{}; - int number_of_elements = 0; - - // we find max index of elements in all sets - for (auto set_and_data : boost::combine(sets, initial_sets_data)) { - auto const & set = boost::get<0>(set_and_data); - auto &cost = boost::get<1>(set_and_data).m_cost; - cost = set_to_cost(set); - assert(cost != set_cost{}); - auto const & elements = set_to_elements(set); - if (!boost::empty(elements)) { - number_of_elements = std::max(number_of_elements, - *max_element_functor(elements, get_el_index) + 1); - } - } - element_weight weight_of_covered_elements{}, weight_of_bests_solution{}; - std::vector best_solution(1); - std::vector covered_by( - number_of_elements, detail::UNCOVERED); // index of the first set that covers - // element or -1 if element is uncovered - std::vector sets_id(nu_sets); - boost::iota(sets_id, 0); - std::vector> sets_covering_element(number_of_elements); - auto decreasing_density_order = - utils::make_functor_to_comparator([&](int x) { - return data_structures::make_fraction( - sets_data[x].m_weight_of_uncovered_elements, sets_data[x].m_cost); - }); - using queue = boost::heap::d_ary_heap< - int, boost::heap::arity<3>, boost::heap::mutable_, - boost::heap::compare>; - - queue uncovered_set_queue{ decreasing_density_order }; - std::vector set_id_to_handle(nu_sets); - - // we fill sets_covering_element and setToWeightOfElements - for (auto set : sets | boost::adaptors::indexed()) { - auto set_id = set.index(); - auto &set_data = initial_sets_data[set_id]; - - for (auto &&element : set_to_elements(set.value())) { - sets_covering_element[get_el_index(element)].push_back(set_id); - set_data.m_weight_of_uncovered_elements += element_to_weight(element); - } - if (initial_set_size == - 0) { /* we check all one element set. if initial_set_size!= 0 then - we will do it anyway */ - set_cost cost_of_set = set_data.m_cost; - if (set_data.m_weight_of_uncovered_elements >= weight_of_bests_solution && - static_cast(cost_of_set) <= budget) { - weight_of_bests_solution = set_data.m_weight_of_uncovered_elements; - best_solution[0] = set_id; - cost_of_best_solution = cost_of_set; - } - } - }; - - auto sort_sets = [&](std::vector& sets_range) { - boost::sort(sets_range, utils::make_functor_to_comparator([&](int x) { - return sets_data[x].m_weight_of_uncovered_elements; - })); - return sets_range.end(); - }; - auto selector = detail::make_selector - (budget, cost_of_solution,sets_data, - weight_of_bests_solution,weight_of_covered_elements, - [&](int selected_set_id){return set_to_elements(sets[selected_set_id]);}, - covered_by,sets_covering_element,get_el_index, - element_to_weight, - [&](int set_id){uncovered_set_queue.decrease(set_id_to_handle[set_id]);}); - - boost::copy(initial_sets_data, sets_data.begin()); - sort_sets(sets_id); - auto solver = make_subset_backtrack(sets_id); - - auto reset = [&]() { - boost::copy(initial_sets_data, sets_data.begin()); - cost_of_solution = set_cost{}; - selector.set_unprocessed(solver.get_moves()); - boost::fill(covered_by, detail::UNCOVERED); - weight_of_covered_elements = element_weight{}; - selector.reset(); - }; - - auto on_pop = [&](int deselected_set_id) { - selector.deselect_set(deselected_set_id); - selector.set_unprocessed(solver.get_moves()); - }; - - auto save_best_solution = [&]() { - // we check that new solution is better than any previous - // if is we remember them - // tricky: either better weight, or equal weight and lower cost - if (std::make_pair(weight_of_covered_elements, cost_of_best_solution) > - std::make_pair(weight_of_bests_solution, cost_of_solution)) { - weight_of_bests_solution = weight_of_covered_elements; - cost_of_best_solution = cost_of_solution; - best_solution.resize(selector.size()); - selector.copy_to(best_solution.begin()); - } - }; - auto greedy_phase = [&]() { - uncovered_set_queue.clear(); - auto moves = solver.get_moves(); - if(boost::empty(moves)) return; - - for (auto set_id : moves) { - set_id_to_handle[set_id] = uncovered_set_queue.push(set_id); - } - /* we select set with best elements to cost ratio, and add it to the - * result until all elements are covered*/ - int uncovered_set_id; - do { - uncovered_set_id = uncovered_set_queue.top(); - uncovered_set_queue.pop(); - } while (!selector.select_set_greedy(uncovered_set_id) && - !uncovered_set_queue.empty()); - }; - - auto can_push = [&](int candidate) { - if (!selector.select_set_backtrack(candidate)) { - return false; - } - if (selector.size() == initial_set_size) { - greedy_phase(); - save_best_solution(); - selector.resize(initial_set_size-1); - reset(); - return false; - } else { - save_best_solution(); - return true; - } - }; - - reset(); - if (initial_set_size != 0) { /* if initial_set_size == 0 then we do greedy - algorithm once starts from empty initial Set. - Otherwise we starts greedy algorithm from all initial set of size equal - initial_set_size those for which we have enough budget*/ - solver.solve(can_push, on_pop, sort_sets); - } else { - greedy_phase(); - save_best_solution(); - } - - for (auto set_id : best_solution) { - *result = *(sets.begin() + set_id); - ++result; - } - return weight_of_bests_solution; -}; -} //!greedy -} //!paal - -#endif /* PAAL_BUDGETED_MAXIMUM_COVERAGE_HPP */ diff --git a/patrec/inc/WireCellPatRec/paal/greedy/set_cover/maximum_coverage.hpp b/patrec/inc/WireCellPatRec/paal/greedy/set_cover/maximum_coverage.hpp deleted file mode 100644 index 8319f0ed5..000000000 --- a/patrec/inc/WireCellPatRec/paal/greedy/set_cover/maximum_coverage.hpp +++ /dev/null @@ -1,61 +0,0 @@ -/** - * @file maximum_coverage.hpp - * @brief - * @author Piotr Smulewicz - * @version 1.0 - * @date 2014-04-16 - */ -#ifndef PAAL_MAXIMUM_COVERAGE_HPP -#define PAAL_MAXIMUM_COVERAGE_HPP - -#include "paal/greedy/set_cover/budgeted_maximum_coverage.hpp" -#include "paal/utils/functors.hpp" - -namespace paal{ -namespace greedy{ -/** - * @brief this is solve Set Cover problem - * and return set cover cost - * example: - * \snippet set_cover_example.cpp Set Cover Example - * - * complete example is set_cover_example.cpp - * @param sets - * @param set_to_elements - * @param result set iterators of chosen sets - * @param get_el_index - * @param number_of_sets_to_select - * @param get_weight_of_element - * @tparam SetRange - * @tparam GetElementsOfSet - * @tparam OutputIterator - * @tparam GetElementIndex - * @tparam GetWeightOfElement - */ - -template -auto maximum_coverage( - SetRange && sets, - GetElementsOfSet set_to_elements, - OutputIterator result, - GetElementIndex get_el_index, - unsigned int number_of_sets_to_select, - GetWeightOfElement get_weight_of_element = GetWeightOfElement{} - ) { - auto set_to_cost = paal::utils::return_one_functor{}; - return budgeted_maximum_coverage( - sets, - set_to_cost, - set_to_elements, - result, - get_el_index, - number_of_sets_to_select, - get_weight_of_element, - 0 - ); -}; -}//!greedy -}//!paal - -#endif /* PAAL_MAXIMUM_COVERAGE_HPP */ diff --git a/patrec/inc/WireCellPatRec/paal/greedy/set_cover/set_cover.hpp b/patrec/inc/WireCellPatRec/paal/greedy/set_cover/set_cover.hpp deleted file mode 100644 index f93429e41..000000000 --- a/patrec/inc/WireCellPatRec/paal/greedy/set_cover/set_cover.hpp +++ /dev/null @@ -1,76 +0,0 @@ -/** - * @file set_cover.hpp - * @brief - * @author Piotr Smulewicz - * @version 1.0 - * @date 2014-02-12 - */ -#ifndef PAAL_SET_COVER_HPP -#define PAAL_SET_COVER_HPP - -#include "paal/greedy/set_cover/budgeted_maximum_coverage.hpp" -#include "paal/utils/accumulate_functors.hpp" -#include "paal/utils/functors.hpp" -#include "paal/utils/type_functions.hpp" - -#include - -#include -#include - -namespace paal{ -namespace greedy{ -namespace detail{ -template -using set_range_cost_t = pure_result_of_t< - GetCostOfSet(typename boost::range_reference::type)>; - -}//!detail -/** - * @brief this is solve Set Cover problem - * and return set cover cost - * example: - * \snippet set_cover_example.cpp Set Cover Example - * - * complete example is set_cover_example.cpp - * @param sets - * @param set_to_cost - * @param set_to_elements - * @param result set iterators of chosen sets - * @param get_el_index - * @tparam SetRange - * @tparam GetCostOfSet - * @tparam GetElementsOfSet - * @tparam OutputIterator - * @tparam GetElementIndex - */ -template -auto set_cover(SetRange && sets, - GetCostOfSet set_to_cost, - GetElementsOfSet set_to_elements, - OutputIterator result, - GetElementIndex get_el_index - ) { - using set_cost=typename detail::set_range_cost_t; - //TODO use sum functor from r=Robert commit - auto cost_of_all_sets=accumulate_functor(sets, set_cost{}, set_to_cost); - set_cost cost_of_solution{}; - budgeted_maximum_coverage(sets, - set_to_cost, - set_to_elements, - boost::make_function_output_iterator([&](int set){ - cost_of_solution += set_to_cost(set); - *result = set; - ++result; - }), - get_el_index, - cost_of_all_sets, - paal::utils::return_one_functor(), - 0 - ); - return cost_of_solution; -}; -}//!greedy -}//!paal - -#endif /* PAAL_SET_COVER_HPP */ diff --git a/patrec/inc/WireCellPatRec/paal/greedy/shortest_superstring/prefix_tree.hpp b/patrec/inc/WireCellPatRec/paal/greedy/shortest_superstring/prefix_tree.hpp deleted file mode 100644 index 02917a596..000000000 --- a/patrec/inc/WireCellPatRec/paal/greedy/shortest_superstring/prefix_tree.hpp +++ /dev/null @@ -1,178 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Smulewicz -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file prefix_tree.hpp - * @brief - * @author Piotr Smulewicz - * @version 1.0 - * @date 2013-09-10 - */ -#ifndef PAAL_PREFIX_TREE_HPP -#define PAAL_PREFIX_TREE_HPP - -#include -namespace paal { -namespace greedy { -namespace detail { - -template class prefix_tree { - - struct Node { - Letter letter = DELIMITER; - Node *son = CHILDLESS; - std::vector prefixes; // ends of all prefixes of single words in - // concatenate words corresponding to Node - Node(int _letter) : letter(_letter) {}; - Node() {}; // root - }; - - public: - prefix_tree(int length, std::vector const &suffix_array, - std::vector const &sumWords, - std::vector const &lcp, - std::vector const &lengthSuffixWord) - : m_length(length), m_prefix_tree(m_length), m_which_son_am_i(m_length), - m_prefix_to_tree(m_length), m_suffix_to_tree(m_length), - m_suffix_array(suffix_array), m_sum_words(sumWords), m_lcp(lcp), - m_length_suffix_word(lengthSuffixWord) {} - - void build_prefix_tree() { - m_prefix_tree.push_back(Node()); // root - for (auto suffix : m_suffix_array) { - // memory protection and we add only whole words in lexographic - // order - if ((suffix != 0) && (m_sum_words[suffix - 1] == DELIMITER)) { - add_word_to_prefix_tree(suffix); - } - } - } - - void erase_word_form_prefix_tree(int wordBegin) { - for (int letterOfWord = 0; - m_sum_words[letterOfWord + wordBegin] != DELIMITER; - ++letterOfWord) { - auto letterIdx = wordBegin + letterOfWord; - auto whichSon = m_which_son_am_i[letterIdx]; - auto &nodePrefixes = m_prefix_to_tree[letterIdx]->prefixes; - assert(std::size_t(whichSon) < nodePrefixes.size()); - int lastPrefix = nodePrefixes.back(); - nodePrefixes[whichSon] = lastPrefix; - m_which_son_am_i[lastPrefix + letterOfWord] = whichSon; - nodePrefixes.pop_back(); - } - } - - // for all suffix of word: if suffix is equal to any prefix of word we - // remember position in prefix tree coresponding to suffix - void fill_suffix_to_tree() { - for (int suffix = m_length - 1, lastWord = 0, commonPrefix = 0; - suffix > 0; suffix--) { - auto beginOfSuffix = m_suffix_array[suffix]; - if (beginOfSuffix == 0 || - m_sum_words[beginOfSuffix - 1] == DELIMITER) { - lastWord = beginOfSuffix; - commonPrefix = m_lcp[suffix]; - } else { - if (commonPrefix == m_length_suffix_word[beginOfSuffix]) { - m_suffix_to_tree[suffix] = - m_prefix_to_tree[lastWord + commonPrefix - 1]; - } - if (m_lcp[suffix] < commonPrefix) { - commonPrefix = m_lcp[suffix]; - } - } - } - } - - int get_prefixequal_to_suffix(int suffix, int firstWordInBlock) { - Node *nodeCorrespondingToSuffix = m_suffix_to_tree[suffix]; - if (nodeCorrespondingToSuffix == NO_SUFFIX_IN_TREE) { - return NOT_PREFIX; - } - auto const &overlapPrefixes = nodeCorrespondingToSuffix->prefixes; - - if (overlapPrefixes.size()) { - int whichPrefix = ANY_PREFIX; // which prefix of prefixes equal to - // suffix, will be joined - // check if first prefix belong to same block as prefix (avoid - // loops) - if (overlapPrefixes[whichPrefix] == firstWordInBlock) { - if (overlapPrefixes.size() >= 2) { - whichPrefix = ANY_OTHER_PREFIX; - } else { - return NOT_PREFIX; - } - } - return overlapPrefixes[whichPrefix]; - } else { - return NOT_PREFIX; - } - } - - private: - void add_word_to_prefix_tree(int word) { - Node *node = &m_prefix_tree[ROOT]; - int letter = word; - // we go by patch until Letter on patch all equal to letter in words - // we only check last son because we add words in lexographic order - while (node->son != CHILDLESS && - node->son->letter == m_sum_words[letter] && - m_sum_words[letter] != DELIMITER) { - node = node->son; - ++letter; - } - // we add new Node - while (m_sum_words[letter]) { - // if this asserts, you have very strange implementation of stl - assert(m_prefix_tree.capacity() > m_prefix_tree.size()); - m_prefix_tree.push_back(Node(m_sum_words[letter])); - node->son = &m_prefix_tree.back(); - node = &m_prefix_tree.back(); - ++letter; - } - node = &m_prefix_tree[ROOT]; - letter = word; - // we fill: - // m_prefix_to_tree - // m_which_son_am_i - // and add to Node.prefixes coresponding prefixes - while (m_sum_words[letter] != DELIMITER) { - node = node->son; - m_prefix_to_tree[letter] = node; - m_which_son_am_i[letter] = node->prefixes.size(); - node->prefixes.push_back(word); - ++letter; - } - } - int m_length; - - std::vector m_prefix_tree; - std::vector m_which_son_am_i; - - std::vector m_prefix_to_tree; - std::vector m_suffix_to_tree; - - const std::vector &m_suffix_array; - const std::vector &m_sum_words; - const std::vector &m_lcp; - const std::vector &m_length_suffix_word; - - const static int ROOT = 0; - const static int NOT_PREFIX = -1; - const static int ANY_PREFIX = 0; - const static int ANY_OTHER_PREFIX = 1; - const static std::nullptr_t NO_SUFFIX_IN_TREE; - const static std::nullptr_t CHILDLESS; - - public: - const static Letter DELIMITER = 0; -}; -} //!detail -} //!greedy -} //!paal -#endif // PAAL_PREFIX_TREE_HPP diff --git a/patrec/inc/WireCellPatRec/paal/greedy/shortest_superstring/shortest_superstring.hpp b/patrec/inc/WireCellPatRec/paal/greedy/shortest_superstring/shortest_superstring.hpp deleted file mode 100644 index 1786514a7..000000000 --- a/patrec/inc/WireCellPatRec/paal/greedy/shortest_superstring/shortest_superstring.hpp +++ /dev/null @@ -1,242 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Smulewicz -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file shortest_superstring.hpp - * @brief - * @author Piotr Smulewicz - * @version 1.0 - * @date 2013-08-29 - */ -#ifndef PAAL_SHORTEST_SUPERSTRING_HPP -#define PAAL_SHORTEST_SUPERSTRING_HPP - - -#include "paal/utils/algorithms/suffix_array/lcp.hpp" -#include "paal/utils/algorithms/suffix_array/suffix_array.hpp" -#include "paal/utils/type_functions.hpp" -#include "paal/greedy/shortest_superstring/prefix_tree.hpp" -#include "paal/data_structures/bimap.hpp" -#include "paal/utils/irange.hpp" - -#include - -#include -#include -#include -#include - -namespace paal { -namespace greedy { -namespace detail { - -/** - * @class shortest_superstring - * @brief class to solve shortest superstring 3.5 aproximation, - * using greedy algorithm: - * contract pair of words with largest overlap until one word stays - \snippet shortest_superstring_example.cpp Shortest Superstring Example - * - * example file is shortest_superstring_example.cpp - * - * @tparam Words - */ -template class shortest_superstring { - public: - typedef range_to_elem_t Word; - typedef range_to_elem_t Letter; - - shortest_superstring(const Words &words) - : m_length(count_sum_lenght(words)), - m_prefix_tree(m_length, m_suffix_array, m_sum_words, m_lcp, - m_length_suffix_word) { - - initialize(words); - - suffix_array(m_sum_words, m_suffix_array); - - data_structures::rank(m_suffix_array, m_rank); - - lcp(m_suffix_array, m_rank, m_lcp, m_sum_words); - - m_prefix_tree.build_prefix_tree(); - - m_prefix_tree.fill_suffix_to_tree(); - - join_all_words(); - } - - /** - * @brief return word contains all words as subwords, - * of lenght at most 3.5 larger than shortest superstring. - * - \snippet shortest_superstring_example.cpp Shortest Superstring Example - * - * example file is shortest_superstring_example.cpp - * - */ - Word get_solution() { - Word answer; - for (auto posInSumWords : irange(1, m_length)) { - if ((!m_is_joined_sufiix[m_pos_to_word[posInSumWords]]) && - (m_sum_words[posInSumWords - 1] == m_prefix_tree.DELIMITER)) { - for (int nextLetter = posInSumWords; - m_sum_words[nextLetter] != m_prefix_tree.DELIMITER;) { - answer.push_back(m_sum_words[nextLetter]); - if (m_res[nextLetter] == NO_OVERLAP_STARTS_HERE) { - ++nextLetter; - } else { - nextLetter = m_res[nextLetter]; - } - } - } - } - return answer; - } - - private: - int count_sum_lenght(const Words &words) { - int length = 1; - for (auto const &word : words) { - length += word.size() + 1; - } - return length; - } - - void initialize(const Words &words) { - m_nu_words = words.size(); - m_first_word_in_block_to_last_word_in_block.resize(m_length); - m_last_word_in_block_to_first_word_in_block.resize(m_length); - m_pos_to_word.resize(m_length); - m_length_suffix_word.resize(m_length); - - m_suffix_array.resize(m_length); - m_lcp.resize(m_length); - m_rank.resize(m_length); - m_res.resize(m_length); - m_sum_words.resize(m_length); - m_is_joined_prefix.resize(m_nu_words); - m_is_joined_sufiix.resize(m_nu_words); - m_length_to_pos.resize(m_length); - - m_length = 1; - int wordsId = 0; - for (auto const &word : words) { - auto wordSize = boost::distance(word); - m_length_words.push_back(wordSize); - m_length_to_pos[wordSize].push_back(m_length); - int noLetterInWord = 0; - for (auto letter : word) { - assert(letter != 0); - auto globalLetterId = m_length + noLetterInWord; - m_sum_words[globalLetterId] = letter; - m_pos_to_word[globalLetterId] = wordsId; - m_length_suffix_word[globalLetterId] = - wordSize - noLetterInWord; - ++noLetterInWord; - } - m_first_word_in_block_to_last_word_in_block[m_length] = m_length; - m_last_word_in_block_to_first_word_in_block[m_length] = m_length; - m_length += wordSize + 1; - ++wordsId; - } - } - - void erase_word_form_prefix_tree(int word) { - m_is_joined_sufiix[m_pos_to_word[word]] = JOINED; - m_prefix_tree.erase_word_form_prefix_tree(word); - } - - void join_all_words() { - auto ovelapSizeRange = irange(m_length) | boost::adaptors::reversed; - for (auto overlapSize : ovelapSizeRange) { - for (auto word : m_length_to_pos[overlapSize]) { - if (m_lcp[m_rank[word]] >= overlapSize) { // check if word is - // substring - erase_word_form_prefix_tree(word); - } - } - } - - // in each iteration we join all pair of words who have overlap size - // equal overlapSize - for (auto overlapSize : ovelapSizeRange) { - for (auto word : m_long_words) { - join_word(word, overlapSize); - } - for (auto word : m_length_to_pos[overlapSize]) { - if (m_lcp[m_rank[word]] < overlapSize) { // check if word is not - // substring - m_long_words.push_back(word); - } - } - } - } - - void join_word(int ps, int overlap) { - if (m_is_joined_prefix[m_pos_to_word[ps]] == JOINED) { - return; - }; - - int suffix = m_rank[ps + m_length_words[m_pos_to_word[ps]] - overlap]; - - int prefix = m_prefix_tree.get_prefixequal_to_suffix( - suffix, m_last_word_in_block_to_first_word_in_block[ps]); - - if (prefix == NOT_PREFIX) { - return; - } - m_res[ps + m_length_words[m_pos_to_word[ps]] - overlap - 1] = prefix; - m_is_joined_prefix[m_pos_to_word[ps]] = JOINED; - - m_last_word_in_block_to_first_word_in_block[ - m_first_word_in_block_to_last_word_in_block[prefix]] = - m_last_word_in_block_to_first_word_in_block[ps]; - m_first_word_in_block_to_last_word_in_block[ - m_last_word_in_block_to_first_word_in_block[prefix]] = prefix; - erase_word_form_prefix_tree(prefix); - } - - int m_length, m_nu_words; - std::vector m_sum_words; - std::vector m_first_word_in_block_to_last_word_in_block, - m_last_word_in_block_to_first_word_in_block, m_pos_to_word, - m_length_words, m_length_suffix_word, m_suffix_array, m_lcp, m_rank, - m_res, m_long_words; - std::vector m_is_joined_prefix, m_is_joined_sufiix; - std::vector> m_length_to_pos; - - prefix_tree m_prefix_tree; - - const static bool JOINED = true; - - const static int NO_OVERLAP_STARTS_HERE = 0; - const static int NOT_PREFIX = -1; -}; -} //!detail - -/** - * @param words - * @brief return word contains all words as subwords, - * of lenght at most 3.5 larger than shortest superstring. - * words canot contains letter 0 - \snippet shortest_superstring_example.cpp Shortest Superstring Example - * - * example file is shortest_superstring_example.cpp - * @tparam Words - */ -template -auto shortestSuperstring(const Words &words)->decltype( - std::declval>().get_solution()) { - detail::shortest_superstring solver(words); - return solver.get_solution(); -} -; - -} //!greedy -} //!paal -#endif // PAAL_SHORTEST_SUPERSTRING_HPP diff --git a/patrec/inc/WireCellPatRec/paal/greedy/steiner_tree_greedy.hpp b/patrec/inc/WireCellPatRec/paal/greedy/steiner_tree_greedy.hpp deleted file mode 100644 index 5ae6b093f..000000000 --- a/patrec/inc/WireCellPatRec/paal/greedy/steiner_tree_greedy.hpp +++ /dev/null @@ -1,237 +0,0 @@ -//======================================================================= -// Copyright (c) 2014 Piotr Smulewicz -// 2013 Piotr Wygocki -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file steiner_tree_greedy.hpp - * @brief - * @author Piotr Wygocki, Piotr Smulewicz - * @version 1.0 - * @date 2013-11-27 - */ -#ifndef PAAL_STEINER_TREE_GREEDY_HPP -#define PAAL_STEINER_TREE_GREEDY_HPP - -#include "paal/utils/accumulate_functors.hpp" -#include "paal/utils/functors.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include -#include -#include -#include -#include -#include - -#include -#include - -/// enum for edge base property -enum edge_base_t { edge_base }; - -namespace boost { -/// macro create edge base property -BOOST_INSTALL_PROPERTY(edge, base); -} - -namespace paal { - -/** - * @brief enum indicates if given color represents terminal or NONTERMINAL. - */ -enum Terminals { NONTERMINAL, TERMINAL }; - -namespace detail { -template -class nearest_recorder - : boost::base_visitor> { - public: - using event_filter = Tag; - nearest_recorder(NearestMap &nearest_map, LastEdgeMap &vpred) - : m_nearest_map(nearest_map), m_vpred(vpred) {}; - template - void operator()(Edge const e, Graph const &g) { - m_nearest_map[target(e, g)] = m_nearest_map[source(e, g)]; - m_vpred[target(e, g)] = e; - } - - private: - NearestMap &m_nearest_map; - LastEdgeMap &m_vpred; -}; - -template -nearest_recorder -make_nearest_recorder(NearestMap &nearest_map, LastEdgeMap &vpred, Tag) { - return nearest_recorder{ nearest_map, vpred }; -} -} -/** - * @brief non-named version of steiner_tree_greedy - * - * @tparam Graph - * @tparam OutputIterator - * @tparam EdgeWeightMap - * @tparam ColorMap - * @param g - given graph - * @param out - edge output iterator - * @param edge_weight - * @param color_map - */ -template -auto steiner_tree_greedy(const Graph &g, OutputIterator out, - EdgeWeightMap edge_weight, ColorMap color_map) - -> typename std::pair< - typename boost::property_traits::value_type, - typename boost::property_traits::value_type> { - using Vertex = typename boost::graph_traits::vertex_descriptor; - using Edge = typename boost::graph_traits::edge_descriptor; - using Base = typename boost::property; - using Weight = typename boost::property_traits::value_type; - using WeightProperty = - typename boost::property; - using TerminalGraph = - boost::adjacency_list; - using EdgeTerminal = - typename boost::graph_traits::edge_descriptor; - - auto N = num_vertices(g); - - // distance array used in the dijkstra runs - std::vector distance(N); - - // computing terminals - std::vector terminals; - auto terminals_nr = accumulate_functor( - vertices(g), 0, [=](Vertex v) { return get(color_map, v); }); - terminals.reserve(terminals_nr); - for (auto v : boost::as_array(vertices(g))) { - if (get(color_map, v) == Terminals::TERMINAL) { - terminals.push_back(v); - } - } - if (terminals.empty()) { - return std::make_pair(Weight{}, Weight{}); - } - std::vector nearest_terminal(num_vertices(g)); - auto index = get(boost::vertex_index, g); - auto nearest_terminal_map = boost::make_iterator_property_map( - nearest_terminal.begin(), get(boost::vertex_index, g)); - for (auto terminal : terminals) { - nearest_terminal_map[terminal] = terminal; - } - - // compute voronoi diagram each vertex get nearest terminal and last edge on - // path to nearest terminal - auto distance_map = make_iterator_property_map(distance.begin(), index); - std::vector vpred(N); - auto last_edge = boost::make_iterator_property_map( - vpred.begin(), get(boost::vertex_index, g)); - boost::dijkstra_shortest_paths( - g, terminals.begin(), terminals.end(), boost::dummy_property_map(), - distance_map, edge_weight, index, utils::less(), - boost::closed_plus(), std::numeric_limits::max(), 0, - boost::make_dijkstra_visitor(detail::make_nearest_recorder( - nearest_terminal_map, last_edge, boost::on_edge_relaxed{}))); - - // computing distances between terminals - // creating terminal_graph - TerminalGraph terminal_graph(N); - for (auto w : boost::as_array(edges(g))) { - auto const &nearest_to_source = nearest_terminal_map[source(w, g)]; - auto const &nearest_to_target = nearest_terminal_map[target(w, g)]; - if (nearest_to_source != nearest_to_target) { - add_edge(nearest_to_source, nearest_to_target, - WeightProperty(distance[source(w, g)] + - distance[target(w, g)] + edge_weight[w], - Base(w)), - terminal_graph); - } - } - // computing spanning tree on terminal_graph - std::vector terminal_edge; - boost::kruskal_minimum_spanning_tree(terminal_graph, - std::back_inserter(terminal_edge)); - - // computing result - std::vector tree_edges; - tree_edges.reserve(terminals_nr); - for (auto edge : terminal_edge) { - auto base = get(edge_base, terminal_graph, edge); - tree_edges.push_back(base); - for (auto pom : { source(base, g), target(base, g) }) { - while (nearest_terminal_map[pom] != pom) { - tree_edges.push_back(vpred[pom]); - pom = source(vpred[pom], g); - } - } - } - - // because in each voronoi region we have unique patch to all vertex from - // terminal, result graph contain no cycle - // and all leaf are terminal - - boost::sort(tree_edges); - auto get_weight=[&](Edge edge){return edge_weight[edge];}; - auto lower_bound=accumulate_functor(tree_edges, Weight{}, get_weight); - auto unique_edges = boost::unique(tree_edges); - auto cost_solution=accumulate_functor(unique_edges, Weight{}, get_weight); - boost::copy(unique_edges, out); - return std::make_pair(cost_solution, lower_bound / 2.); -} - -/** - * @brief named version of steiner_tree_greedy - * - * @tparam Graph - * @tparam OutputIterator - * @tparam P - * @tparam T - * @tparam R - * @param g - given graph - * @param out - edge output iterator - * @param params - */ -template -auto steiner_tree_greedy(const Graph &g, OutputIterator out, - const boost::bgl_named_params ¶ms) { - return steiner_tree_greedy( - g, out, choose_const_pmap(get_param(params, boost::edge_weight), g, - boost::edge_weight), - choose_const_pmap(get_param(params, boost::vertex_color), g, - boost::vertex_color)); -} - -/** - * @brief version of steiner_tree_greedy with all default parameters - * - * @tparam Graph - * @tparam OutputIterator - * @param g - given graph - * @param out - edge output iterator - */ -template -auto steiner_tree_greedy(const Graph &g, OutputIterator out) { - return steiner_tree_greedy(g, out, boost::no_named_parameters()); -} - -} // paal - -#endif // PAAL_STEINER_TREE_GREEDY_HPP diff --git a/patrec/inc/WireCellPatRec/paal/iterative_rounding/bounded_degree_min_spanning_tree/bounded_degree_mst.hpp b/patrec/inc/WireCellPatRec/paal/iterative_rounding/bounded_degree_min_spanning_tree/bounded_degree_mst.hpp deleted file mode 100644 index e9d50a8a8..000000000 --- a/patrec/inc/WireCellPatRec/paal/iterative_rounding/bounded_degree_min_spanning_tree/bounded_degree_mst.hpp +++ /dev/null @@ -1,636 +0,0 @@ -//======================================================================= -// Copyright (c) -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file bounded_degree_mst.hpp - * @brief - * @author Piotr Godlewski - * @version 1.0 - * @date 2013-06-03 - */ -#ifndef PAAL_BOUNDED_DEGREE_MST_HPP -#define PAAL_BOUNDED_DEGREE_MST_HPP - - -#include "paal/iterative_rounding/bounded_degree_min_spanning_tree/bounded_degree_mst_oracle.hpp" -#include "paal/iterative_rounding/ir_components.hpp" -#include "paal/iterative_rounding/iterative_rounding.hpp" -#include "paal/lp/lp_row_generation.hpp" - -#include -#include -#include - -namespace paal { -namespace ir { - -namespace { -struct bounded_degree_mst_compare_traits { - static const double EPSILON; -}; - -const double bounded_degree_mst_compare_traits::EPSILON = 1e-10; -} - -/** - * @class bounded_degree_mst - * @brief The class for solving the Bounded Degree MST problem using Iterative -* Rounding. - * - * @tparam Graph input graph - * @tparam DegreeBounds map from Graph vertices to degree bounds - * @tparam CostMap map from Graph edges to costs - * @tparam VertexIndex map from Graph vertices to indices - * @tparam SpanningTreeOutputIterator - * @tparam Oracle separation oracle - */ -template -class bounded_degree_mst { - public: - /** - * Constructor. - */ - bounded_degree_mst(const Graph & g, const DegreeBounds & deg_bounds, - CostMap cost_map, VertexIndex index, - SpanningTreeOutputIterator result_spanning_tree, Oracle oracle = Oracle{}) : - m_g(g), m_cost_map(cost_map), m_index(index), m_deg_bounds(deg_bounds), - m_result_spanning_tree(result_spanning_tree), - m_compare(bounded_degree_mst_compare_traits::EPSILON), - m_oracle(oracle) - {} - - using Edge = typename boost::graph_traits::edge_descriptor; - using Vertex = typename boost::graph_traits::vertex_descriptor; - - using EdgeMap = boost::bimap; - using VertexMap = std::unordered_map; - - using EdgeMapOriginal = std::vector>; - - using ErrorMessage = boost::optional; - - /** - * Checks if the input graph is connected. - */ - ErrorMessage check_input_validity() { - // Is g connected? - std::vector components(num_vertices(m_g)); - int num = boost::connected_components(m_g, &components[0]); - - if (num > 1) { - return ErrorMessage{ "The graph is not connected." }; - } - - return ErrorMessage{}; - } - - /** - * @brief - * - * @tparam LP - * @param lp - * - * @return - */ - template - auto get_find_violation(LP & lp) { - using candidate = bdmst_violation_checker::Candidate; - return m_oracle([&](){return m_violation_checker.get_violation_candidates(*this, lp);}, - [&](candidate c){return m_violation_checker.check_violation(c, *this);}, - [&](candidate c){return m_violation_checker.add_violated_constraint(c, *this, lp);}); - } - - /** - * Returns the input graph. - */ - const Graph &get_graph() const { return m_g; } - - /** - * Returns the vertex index. - */ - const VertexIndex &get_index() const { return m_index; } - - /** - * Removes an LP column and the graph edge corresponding to it. - */ - void remove_column(lp::col_id col_id) { - auto ret = m_edge_map.right.erase(col_id); - assert(ret == 1); - } - - /** - * Binds a graph edge to a LP column. - */ - void bind_edge_to_col(Edge e, lp::col_id col) { - m_edge_map_original.push_back( - typename EdgeMapOriginal::value_type(col, e)); - m_edge_map.insert(typename EdgeMap::value_type(e, col)); - } - - /** - * Returns the cost of a given edge. - */ - decltype(get(std::declval(), std::declval())) - get_cost(Edge e) { - return get(m_cost_map, e); - } - - /** - * Returns the degree bound of a vertex. - */ - decltype(std::declval()(get(std::declval(), - std::declval()))) - get_degree_bound(Vertex v) { - return m_deg_bounds(get(m_index, v)); - } - - /** - * Returns the LP column corresponding to an edge, if it wasn't deleted from - * the LP. - */ - boost::optional edge_to_col(Edge e) const { - auto i = m_edge_map.left.find(e); - if (i != m_edge_map.left.end()) { - return i->second; - } else { - return boost::none; - } - } - - /** - * Returns a bimap between edges and LP column IDs. - */ - const EdgeMap &get_edge_map() const { return m_edge_map; } - - /** - * Returns a mapping between LP column IDs and edges in the original graph. - */ - const EdgeMapOriginal &get_original_edges_map() const { - return m_edge_map_original; - } - - /** - * Adds an edge to the result spanning tree. - */ - void add_to_result_spanning_tree(Edge e) { - *m_result_spanning_tree = e; - ++m_result_spanning_tree; - } - - /** - * Returns the double comparison object. - */ - utils::compare get_compare() const { - return m_compare; - } - - /** - * Binds a graph vertex to an LP row. - */ - void bind_vertex_to_row(Vertex v, lp::row_id row) { - m_vertex_map.insert(typename VertexMap::value_type(row, v)); - } - - /** - * Unbinds the graph vertex from its corresponding (deleted) LP row. - */ - void remove_row(lp::row_id row_id) { - auto ret = m_vertex_map.erase(row_id); - assert(ret == 1); - } - - /** - * Returns the graph vertex corresponding to a given LP row, - * unless the row doen't correspond to any vertex. - */ - boost::optional row_to_vertex(lp::row_id row) { - auto i = m_vertex_map.find(row); - if (i != m_vertex_map.end()) { - return i->second; - } else { - return boost::none; - } - } - - private: - Edge col_to_edge(lp::col_id col) { - auto i = m_edge_map.right.find(col); - assert(i != m_edge_map.right.end()); - return i->second; - } - - const Graph &m_g; - CostMap m_cost_map; - VertexIndex m_index; - const DegreeBounds &m_deg_bounds; - SpanningTreeOutputIterator m_result_spanning_tree; - bdmst_violation_checker m_violation_checker; - - EdgeMapOriginal m_edge_map_original; - EdgeMap m_edge_map; - VertexMap m_vertex_map; - - const utils::compare m_compare; - - Oracle m_oracle; -}; - -namespace detail { -/** - * @brief Creates a bounded_degree_mst object. Non-named version. - * - * @tparam Oracle - * @tparam Graph - * @tparam DegreeBounds - * @tparam CostMap - * @tparam VertexIndex - * @tparam SpanningTreeOutputIterator - * @param g - * @param degBoundMap - * @param cost_map - * @param vertex_index - * @param result_spanning_tree - * @param oracle - * - * @return bounded_degree_mst object - */ -template -bounded_degree_mst -make_bounded_degree_mst(const Graph & g, const DegreeBounds & deg_bounds, - CostMap cost_map, VertexIndex vertex_index, - SpanningTreeOutputIterator result_spanning_tree, - Oracle oracle = Oracle()) { - return bounded_degree_mst(g, deg_bounds, cost_map, vertex_index, - result_spanning_tree, oracle); -} -} // detail - -/** - * Creates a bounded_degree_mst object. Named version. - * The returned object can be used to check input validity or to get a lower -* bound on the - * optimal solution cost. - * - * @tparam Oracle - * @tparam Graph - * @tparam DegreeBounds - * @tparam SpanningTreeOutputIterator - * @tparam P - * @tparam T - * @tparam R - * @param g - * @param deg_bounds - * @param params - * @param result_spanning_tree - * @param oracle - * - * @return bounded_degree_mst object - */ -template -auto -make_bounded_degree_mst(const Graph & g, - const DegreeBounds & deg_bounds, - const boost::bgl_named_params & params, - SpanningTreeOutputIterator result_spanning_tree, - Oracle oracle = Oracle()) - -> bounded_degree_mst { - - return detail::make_bounded_degree_mst(g, deg_bounds, - choose_const_pmap(get_param(params, boost::edge_weight), g, boost::edge_weight), - choose_const_pmap(get_param(params, boost::vertex_index), g, boost::vertex_index), - result_spanning_tree, oracle); -} - -/** - * Creates a bounded_degree_mst object. All default parameters. - * The returned object can be used to check input validity or to get a lower -* bound on the - * optimal solution cost. - * - * @tparam Oracle - * @tparam Graph - * @tparam DegreeBounds - * @tparam SpanningTreeOutputIterator - * @param g - * @param deg_bounds - * @param result_spanning_tree - * @param oracle - * - * @return bounded_degree_mst object - */ -template -auto -make_bounded_degree_mst(const Graph & g, const DegreeBounds & deg_bounds, - SpanningTreeOutputIterator result_spanning_tree, - Oracle oracle = Oracle()) -> - decltype(make_bounded_degree_mst(g, deg_bounds, boost::no_named_parameters(), result_spanning_tree, oracle)) { - return make_bounded_degree_mst(g, deg_bounds, boost::no_named_parameters(), result_spanning_tree, oracle); -} - -/** - * Round Condition of the IR Bounded Degree MST algorithm. - */ -struct bdmst_round_condition { - /** - * Constructor. Takes epsilon used in double comparison. - */ - bdmst_round_condition(double epsilon = - bounded_degree_mst_compare_traits::EPSILON) - : m_round_zero(epsilon) {} - - /** - * Checks if a given column of the LP can be rounded to 0. - * If the column is rounded, the corresponding edge is removed from the - * graph. - */ - template - boost::optional operator()(Problem &problem, const LP &lp, - lp::col_id col) { - auto ret = m_round_zero(problem, lp, col); - if (ret) { - problem.remove_column(col); - } - return ret; - } - - private: - round_condition_equals<0> m_round_zero; -}; - -/** - * Relax Condition of the IR Bounded Degree MST algorithm. - */ -struct bdmst_relax_condition { - /** - * Checks if a given row of the LP corresponds to a degree bound and can be - * relaxed. - * If the row degree is not greater than the corresponding degree bound + 1, - * it is relaxed - * and the degree bound is deleted from the problem. - */ - template - bool operator()(Problem &problem, const LP &lp, lp::row_id row) { - auto vertex = problem.row_to_vertex(row); - if (vertex) { - auto ret = (lp.get_row_degree(row) <= - problem.get_degree_bound(*vertex) + 1); - if (ret) { - problem.remove_row(row); - } - return ret; - } else - return false; - } -}; - -/** - * Initialization of the IR Bounded Degree MST algorithm. - */ -struct bdmst_init { - /** - * Initializes the LP: variables for edges, degree bound constraints - * and constraint for all edges. - */ - template - void operator()(Problem &problem, LP &lp) { - lp.set_lp_name("bounded degree minimum spanning tree"); - lp.set_optimization_type(lp::MINIMIZE); - - add_variables(problem, lp); - add_degree_bound_constraints(problem, lp); - add_all_set_equality(problem, lp); - } - - private: - /** - * Adds a variable to the LP for each edge in the input graph. - * Binds the LP columns to edges. - */ - template - void add_variables(Problem & problem, LP & lp) { - for (auto e : boost::as_array(edges(problem.get_graph()))) { - auto col = lp.add_column(problem.get_cost(e), 0, 1); - problem.bind_edge_to_col(e, col); - } - } - - /** - * Adds a degree bound constraint to the LP for each vertex in the input - * graph - * and binds vertices to rows. - */ - template - void add_degree_bound_constraints(Problem &problem, LP &lp) { - auto const &g = problem.get_graph(); - - for (auto v : boost::as_array(vertices(g))) { - lp::linear_expression expr; - - for (auto e : boost::as_array(out_edges(v, g))) { - expr += *(problem.edge_to_col(e)); - } - - auto row = - lp.add_row(std::move(expr) <= problem.get_degree_bound(v)); - problem.bind_vertex_to_row(v, row); - } - } - - /** - * Adds an equality constraint to the LP for the set of all edges in the - * input graph. - */ - template - void add_all_set_equality(Problem &problem, LP &lp) { - lp::linear_expression expr; - for (auto col : lp.get_columns()) { - expr += col; - } - lp.add_row(std::move(expr) == num_vertices(problem.get_graph()) - 1); - } -}; - -/** - * Set Solution component of the IR Bounded Degree MST algorithm. - */ -struct bdmst_set_solution { - /** - * Constructor. Takes epsilon used in double comparison. - */ - bdmst_set_solution(double epsilon = - bounded_degree_mst_compare_traits::EPSILON) - : m_compare(epsilon) {} - - /** - * Creates the result spanning tree form the LP (all edges corresponding to - * columns with value 1). - */ - template - void operator()(Problem & problem, const GetSolution & solution) { - for (auto col_and_edge : problem.get_original_edges_map()) { - if (m_compare.e(solution(col_and_edge.first), 1)) { - problem.add_to_result_spanning_tree(col_and_edge.second); - } - } - } - -private: - const utils::compare m_compare; -}; - -template , - typename ResolveLpToExtremePoint = ir::row_generation_solve_lp<>> -using bdmst_ir_components = - IRcomponents; - -namespace detail { -/** - * @brief Solves the Bounded Degree MST problem using Iterative Rounding. -* Non-named version. - * - * @tparam Oracle - * @tparam Graph - * @tparam DegreeBounds - * @tparam CostMap - * @tparam VertexIndex - * @tparam SpanningTreeOutputIterator - * @tparam IRcomponents - * @tparam Visitor - * @param g - * @param degBoundMap - * @param cost_map - * @param vertex_index - * @param result_spanning_tree - * @param components - * @param oracle - * @param visitor - * - * @return solution status - */ -template , - typename Visitor = trivial_visitor> -IRResult bounded_degree_mst_iterative_rounding( - const Graph & g, - const DegreeBounds & deg_bounds, - CostMap cost_map, - VertexIndex vertex_index, - SpanningTreeOutputIterator result_spanning_tree, - IRcomponents components = IRcomponents(), - Oracle oracle = Oracle(), - Visitor visitor = Visitor()) { - - auto bdmst = make_bounded_degree_mst(g, deg_bounds, cost_map, vertex_index, result_spanning_tree, oracle); - return solve_iterative_rounding(bdmst, std::move(components), std::move(visitor)); -} -} // detail - -/** - * @brief Solves the Bounded Degree MST problem using Iterative Rounding. Named -* version. - * - * @tparam Oracle - * @tparam Graph - * @tparam DegreeBounds - * @tparam SpanningTreeOutputIterator - * @tparam IRcomponents - * @tparam Visitor - * @tparam P - * @tparam T - * @tparam R - * @param g - * @param deg_bounds - * @param result_spanning_tree - * @param params - * @param components - * @param oracle - * @param visitor - * - * @return solution status - */ -template , - typename Visitor = trivial_visitor, typename P, typename T, - typename R> -IRResult bounded_degree_mst_iterative_rounding( - const Graph & g, - const DegreeBounds & deg_bounds, - const boost::bgl_named_params & params, - SpanningTreeOutputIterator result_spanning_tree, - IRcomponents components = IRcomponents(), - Oracle oracle = Oracle(), - Visitor visitor = Visitor()) { - - return detail::bounded_degree_mst_iterative_rounding(g, deg_bounds, - choose_const_pmap(get_param(params, boost::edge_weight), g, boost::edge_weight), - choose_const_pmap(get_param(params, boost::vertex_index), g, boost::vertex_index), - std::move(result_spanning_tree), std::move(components), - std::move(oracle), std::move(visitor)); -} - -/** - * @brief Solves the Bounded Degree MST problem using Iterative Rounding. All -* default parameters. - * - * @tparam Oracle - * @tparam Graph - * @tparam DegreeBounds - * @tparam SpanningTreeOutputIterator - * @tparam IRcomponents - * @tparam Visitor - * @param g - * @param deg_bounds - * @param result_spanning_tree - * @param components - * @param oracle - * @param visitor - * - * @return solution status - */ -template , - typename Visitor = trivial_visitor> -IRResult bounded_degree_mst_iterative_rounding( - const Graph & g, - const DegreeBounds & deg_bounds, - SpanningTreeOutputIterator result_spanning_tree, - IRcomponents components = IRcomponents(), - Oracle oracle = Oracle(), - Visitor visitor = Visitor()) { - - return bounded_degree_mst_iterative_rounding(g, deg_bounds, - boost::no_named_parameters(), std::move(result_spanning_tree), - std::move(components), std::move(oracle), std::move(visitor)); -} - -} //! ir -} //! paal -#endif // PAAL_BOUNDED_DEGREE_MST_HPP diff --git a/patrec/inc/WireCellPatRec/paal/iterative_rounding/bounded_degree_min_spanning_tree/bounded_degree_mst_oracle.hpp b/patrec/inc/WireCellPatRec/paal/iterative_rounding/bounded_degree_min_spanning_tree/bounded_degree_mst_oracle.hpp deleted file mode 100644 index e88adf9e3..000000000 --- a/patrec/inc/WireCellPatRec/paal/iterative_rounding/bounded_degree_min_spanning_tree/bounded_degree_mst_oracle.hpp +++ /dev/null @@ -1,210 +0,0 @@ -//======================================================================= -// Copyright (c) -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file bounded_degree_mst_oracle.hpp - * @brief - * @author Piotr Godlewski - * @version 1.0 - * @date 2013-06-05 - */ -#ifndef PAAL_BOUNDED_DEGREE_MST_ORACLE_HPP -#define PAAL_BOUNDED_DEGREE_MST_ORACLE_HPP - -#include "paal/iterative_rounding/min_cut.hpp" -#include "paal/lp/lp_base.hpp" - -#include -#include - -#include - -namespace paal { -namespace ir { - -/** - * @class bdmst_violation_checker - * @brief Violations checker for the separation oracle - * in the bounded degree minimum spanning tree problem. - */ -class bdmst_violation_checker { - using AuxEdge = min_cut_finder::Edge; - using AuxVertex = min_cut_finder::Vertex; - using AuxEdgeList = std::vector; - using Violation = boost::optional; - - public: - using Candidate = std::pair; - using CandidateList = std::vector; - - /** - * Returns an iterator range of violated constraint candidates. - */ - template - const CandidateList &get_violation_candidates(const Problem &problem, - const LP &lp) { - fill_auxiliary_digraph(problem, lp); - initialize_candidates(problem); - return m_candidate_list; - } - - /** - * Checks if the given constraint candidate is violated an if it is, - * returns the violation value and violated constraint ID. - */ - template - Violation check_violation(Candidate candidate, const Problem &problem) { - double violation = find_violation(candidate.first, candidate.second); - if (problem.get_compare().g(violation, 0)) { - return violation; - } else { - return Violation{}; - } - } - - /** - * Adds a violated constraint to the LP. - */ - template - void add_violated_constraint(Candidate violating_pair, - const Problem &problem, LP &lp) { - if (violating_pair != m_min_cut.get_last_cut()) { - find_violation(violating_pair.first, violating_pair.second); - } - - auto const &g = problem.get_graph(); - auto const &index = problem.get_index(); - - lp::linear_expression expr; - for (auto const &e : problem.get_edge_map().right) { - auto u = get(index, source(e.second, g)); - auto v = get(index, target(e.second, g)); - if (m_min_cut.is_in_source_set(u) && - m_min_cut.is_in_source_set(v)) { - expr += e.first; - } - } - lp.add_row(std::move(expr) <= m_min_cut.source_set_size() - 2); - } - - private: - - /** - * Creates the auxiliary directed graph used for feasibility testing. - */ - template - void fill_auxiliary_digraph(const Problem &problem, const LP &lp) { - auto const &g = problem.get_graph(); - auto const &index = problem.get_index(); - m_vertices_num = num_vertices(g); - m_min_cut.init(m_vertices_num); - m_src_to_v.resize(m_vertices_num); - m_v_to_trg.resize(m_vertices_num); - - for (auto const &e : problem.get_edge_map().right) { - lp::col_id col_idx = e.first; - double col_val = lp.get_col_value(col_idx) / 2; - - if (!problem.get_compare().e(col_val, 0)) { - auto u = get(index, source(e.second, g)); - auto v = get(index, target(e.second, g)); - m_min_cut.add_edge_to_graph(u, v, col_val, col_val); - } - } - - m_src = m_min_cut.add_vertex_to_graph(); - m_trg = m_min_cut.add_vertex_to_graph(); - - for (auto v : boost::as_array(vertices(g))) { - auto aux_v = get(index, v); - m_src_to_v[aux_v] = m_min_cut - .add_edge_to_graph(m_src, aux_v, degree_of(problem, v, lp) / 2) - .first; - m_v_to_trg[aux_v] = - m_min_cut.add_edge_to_graph(aux_v, m_trg, 1).first; - } - } - - /** - * Initializes the list of cut candidates. - */ - template - void initialize_candidates(const Problem &problem) { - auto const &g = problem.get_graph(); - auto const &index = problem.get_index(); - auto src = *(std::next(vertices(g).first, rand() % m_vertices_num)); - auto aux_src = get(index, src); - m_candidate_list.clear(); - for (auto v : boost::as_array(vertices(g))) { - if (v != src) { - auto aux_v = get(index, v); - m_candidate_list.push_back(std::make_pair(aux_src, aux_v)); - m_candidate_list.push_back(std::make_pair(aux_v, aux_src)); - } - } - } - - /** - * Calculates the sum of the variables for edges incident with a given - * vertex. - */ - template - double degree_of(const Problem &problem, const Vertex &v, const LP &lp) { - double res = 0; - - for (auto e : boost::as_array(out_edges(v, problem.get_graph()))) { - auto col_id = problem.edge_to_col(e); - if (col_id) { - res += lp.get_col_value(*col_id); - } - } - return res; - } - - /** - * Finds the most violated set of vertices containing \c src and not - * containing \c trg and returns its violation value. - * @param src vertex to be contained in the violating set - * @param trg vertex not to be contained in the violating set - * @return violation of the found set - */ - double find_violation(AuxVertex src, AuxVertex trg) { - double orig_cap = m_min_cut.get_capacity(m_src_to_v[src]); - - m_min_cut.set_capacity(m_src_to_v[src], m_vertices_num); - // capacity of m_src_to_v[trg] does not change - m_min_cut.set_capacity(m_v_to_trg[src], 0); - m_min_cut.set_capacity(m_v_to_trg[trg], m_vertices_num); - - double min_cut_weight = m_min_cut.find_min_cut(m_src, m_trg); - double violation = m_vertices_num - 1 - min_cut_weight; - - // reset the original values for the capacities - m_min_cut.set_capacity(m_src_to_v[src], orig_cap); - // capacity of m_src_to_v[trg] does not change - m_min_cut.set_capacity(m_v_to_trg[src], 1); - m_min_cut.set_capacity(m_v_to_trg[trg], 1); - - return violation; - } - - int m_vertices_num; - - AuxVertex m_src; - AuxVertex m_trg; - - AuxEdgeList m_src_to_v; - AuxEdgeList m_v_to_trg; - - CandidateList m_candidate_list; - - min_cut_finder m_min_cut; -}; - -} //! ir -} //! paal -#endif // PAAL_BOUNDED_DEGREE_MST_ORACLE_HPP diff --git a/patrec/inc/WireCellPatRec/paal/iterative_rounding/generalised_assignment/generalised_assignment.hpp b/patrec/inc/WireCellPatRec/paal/iterative_rounding/generalised_assignment/generalised_assignment.hpp deleted file mode 100644 index 166d7e017..000000000 --- a/patrec/inc/WireCellPatRec/paal/iterative_rounding/generalised_assignment/generalised_assignment.hpp +++ /dev/null @@ -1,377 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// 2014 Piotr Godlewski -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file generalised_assignment.hpp - * @brief - * @author Piotr Wygocki, Piotr Godlewski - * @version 1.0 - * @date 2013-05-06 - */ -#ifndef PAAL_GENERALISED_ASSIGNMENT_HPP -#define PAAL_GENERALISED_ASSIGNMENT_HPP - - -#include "paal/iterative_rounding/ir_components.hpp" -#include "paal/iterative_rounding/iterative_rounding.hpp" - -#include - -namespace paal { -namespace ir { - -/** - * Relax Condition of the IR Generalised Assignment algorithm. - */ -struct ga_relax_condition { - /** - * Checks if a given row of the LP corresponds to a machine and can be - * relaxed. - */ - template - bool operator()(Problem &problem, const LP &lp, lp::row_id row) { - auto &&machine_rows = problem.get_machine_rows(); - if (machine_rows.find(row) == machine_rows.end()) { - return false; - } - auto row_deg = lp.get_row_degree(row); - return row_deg <= 1 || (row_deg == 2 && problem.get_compare().ge( - lp.get_row_sum(row), 1)); - } -}; - -/** - * Set Solution component of the IR Generalised Assignment algorithm. - */ -struct ga_set_solution { - /** - * Creates the result assignment form the LP (all edges with value 1). - */ - template - void operator()(Problem &problem, const GetSolution &solution) { - auto jbegin = std::begin(problem.get_jobs()); - auto mbegin = std::begin(problem.get_machines()); - auto &col_idx = problem.get_col_idx(); - auto job_to_machine = problem.get_job_to_machines(); - - for (auto idx : irange(col_idx.size())) { - if (problem.get_compare().e(solution(col_idx[idx]), 1)) { - *job_to_machine = - std::make_pair(*(jbegin + problem.get_j_idx(idx)), - *(mbegin + problem.get_m_idx(idx))); - ++job_to_machine; - } - } - } -}; - -/** - * Initialization of the IR Generalised Assignment algorithm. - */ -class ga_init { - public: - /** - * Initializes the LP: variables for edges, constraints for jobs and - * machines. - */ - template - void operator()(Problem &problem, LP &lp) { - lp.set_lp_name("generalized assignment problem"); - lp.set_optimization_type(lp::MINIMIZE); - - add_variables(problem, lp); - add_constraints_for_jobs(problem, lp); - add_constraints_for_machines(problem, lp); - } - - private: - /** - * Adds a variable to the LP for each (machine, job) edge, unless the - * job proceeding time is greater than machine available time. Binds the - * LP columns to the (machine, job) pairs. - */ - template - void add_variables(Problem &problem, LP &lp) { - auto &col_idx = problem.get_col_idx(); - col_idx.reserve(problem.get_machines_cnt() * problem.get_jobs_cnt()); - for (auto &&j : problem.get_jobs()) { - for (auto &&m : problem.get_machines()) { - if (problem.get_proceeding_time()(j, m) <= - problem.get_machine_available_time()(m)) { - col_idx.push_back(lp.add_column(problem.get_cost()(j, m))); - } else { - col_idx.push_back( - lp.add_column(problem.get_cost()(j, m), 0, 0)); - } - } - } - } - - // constraints for job - template - void add_constraints_for_jobs(Problem &problem, LP &lp) { - auto &col_idx = problem.get_col_idx(); - for (auto j_idx : irange(problem.get_jobs_cnt())) { - lp::linear_expression expr; - for (auto m_idx : irange(problem.get_machines_cnt())) { - expr += col_idx[problem.idx(j_idx, m_idx)]; - } - lp.add_row(std::move(expr) == 1.0); - } - } - - // constraints for machines - template - void add_constraints_for_machines(Problem &problem, LP &lp) { - auto &col_idx = problem.get_col_idx(); - for (auto m : problem.get_machines() | boost::adaptors::indexed()) { - auto T = problem.get_machine_available_time()(m.value()); - lp::linear_expression expr; - - for (auto j : problem.get_jobs() | boost::adaptors::indexed()) { - auto t = problem.get_proceeding_time()(j.value(), m.value()); - auto x = col_idx[problem.idx(j.index(), m.index())]; - expr += x * t; - } - auto row = lp.add_row(std::move(expr) <= T); - problem.get_machine_rows().insert(row); - } - } -}; - -template -using ga_ir_components = - IRcomponents; - - -/** - * @class generalised_assignment - * @brief The class for solving the Generalised Assignment problem using -* Iterative Rounding. - * - * @tparam MachineIter - * @tparam JobIter - * @tparam Cost - * @tparam ProceedingTime - * @tparam MachineAvailableTime - * @tparam JobsToMachinesOutputIterator - */ -template -class generalised_assignment { - public: - using Job = typename std::iterator_traits::value_type; - using Machine = typename std::iterator_traits::value_type; - - using Compare = utils::compare; - using MachineRows = std::unordered_set; - using ColIdx = std::vector; - - using ErrorMessage = boost::optional; - - /** - * Constructor. - */ - generalised_assignment(MachineIter mbegin, MachineIter mend, JobIter jbegin, - JobIter jend, const Cost &c, const ProceedingTime &t, - const MachineAvailableTime &T, - JobsToMachinesOutputIterator job_to_machines) - : m_m_cnt(std::distance(mbegin, mend)), - m_j_cnt(std::distance(jbegin, jend)), m_jbegin(jbegin), m_jend(jend), - m_mbegin(mbegin), m_mend(mend), m_c(c), m_t(t), m_T(T), - m_job_to_machine(job_to_machines) {} - - /** - * Checks if input is valid. - */ - ErrorMessage check_input_validity() { - return ErrorMessage{}; - } - - /** - * Returns the index of the edge between a given job and a given machine. - */ - std::size_t idx(std::size_t j_idx, std::size_t m_idx) { return j_idx * m_m_cnt + m_idx; } - - /** - * Returns the index of a job given the index of the edge between the job - * and a machine. - */ - std::size_t get_j_idx(std::size_t idx) { return idx / m_m_cnt; } - - /** - * Returns the index of a machine given the index of the edge between a job - * and the machine. - */ - std::size_t get_m_idx(std::size_t idx) { return idx % m_m_cnt; } - - /** - * Returns the LP rows corresponding to the machines. - */ - MachineRows &get_machine_rows() { return m_machine_rows; } - - /** - * Returns the double comparison object. - */ - Compare get_compare() { return m_compare; } - - /** - * Returns the number of machines in the problem. - */ - std::size_t get_machines_cnt() const { return m_m_cnt; } - - /** - * Returns the number of jobs in the problem. - */ - std::size_t get_jobs_cnt() const { return m_j_cnt; } - - /** - * Returns the machines iterator range. - */ - boost::iterator_range get_machines() { - return boost::make_iterator_range(m_mbegin, m_mend); - } - - /** - * Returns the jobs iterator range. - */ - boost::iterator_range get_jobs() { - return boost::make_iterator_range(m_jbegin, m_jend); - } - - /** - * Returns the vector of LP column IDs. - */ - ColIdx &get_col_idx() { return m_col_idx; } - - /** - * Returns the result output iterator. - */ - JobsToMachinesOutputIterator get_job_to_machines() { - return m_job_to_machine; - } - - /** - * Returns the proceeding time function (function from (job, machine) - * pairs into the proceeding time of the job on the machine). - */ - const ProceedingTime &get_proceeding_time() { return m_t; } - - /** - * Returns the machine available time function (function returning - * the time available on a given machine). - */ - const MachineAvailableTime &get_machine_available_time() { return m_T; } - - /** - * Returns the cost function (function from (job, machine) - * pairs into the cost of executing the job on the machine). - */ - const Cost &get_cost() const { return m_c; } - - private: - - const std::size_t m_m_cnt; - const std::size_t m_j_cnt; - JobIter m_jbegin; - JobIter m_jend; - MachineIter m_mbegin; - MachineIter m_mend; - const Cost &m_c; - const ProceedingTime &m_t; - const MachineAvailableTime &m_T; - JobsToMachinesOutputIterator m_job_to_machine; - const Compare m_compare; - ColIdx m_col_idx; - MachineRows m_machine_rows; -}; - -/** - * @brief Creates a generalised_assignment object. - * - * @tparam MachineIter - * @tparam JobIter - * @tparam Cost - * @tparam ProceedingTime - * @tparam MachineAvailableTime - * @tparam JobsToMachinesOutputIterator - * @param mbegin begin machines iterator - * @param mend end machines iterator - * @param jbegin begin jobs iterator - * @param jend end jobs iterator - * @param c costs of assignments - * @param t jobs proceeding times - * @param T times available for the machines - * @param jobs_to_machines found assignment - * - * @return generalised_assignment object - */ -template -generalised_assignment -make_generalised_assignment(MachineIter mbegin, MachineIter mend, - JobIter jbegin, JobIter jend, const Cost &c, - const ProceedingTime &t, - const MachineAvailableTime &T, - JobsToMachinesOutputIterator jobs_to_machines) { - return generalised_assignment< - MachineIter, JobIter, Cost, ProceedingTime, MachineAvailableTime, - JobsToMachinesOutputIterator>(mbegin, mend, jbegin, jend, c, t, T, - jobs_to_machines); -} - -/** - * @brief Solves the Generalised Assignment problem using Iterative Rounding. - * - * @tparam MachineIter - * @tparam JobIter - * @tparam Cost - * @tparam ProceedingTime - * @tparam MachineAvailableTime - * @tparam JobsToMachinesOutputIterator - * @tparam Components - * @tparam Visitor - * @param mbegin begin machines iterator - * @param mend end machines iterator - * @param jbegin begin jobs iterator - * @param jend end jobs iterator - * @param c costs of assignments - * @param t jobs proceeding times - * @param T times available for the machines - * @param jobs_to_machines found assignment - * @param components IR components - * @param visitor - * - * @return solution status - */ -template , - typename Visitor = trivial_visitor> -IRResult generalised_assignment_iterative_rounding( - MachineIter mbegin, MachineIter mend, JobIter jbegin, JobIter jend, - const Cost &c, const ProceedingTime &t, const MachineAvailableTime &T, - JobsToMachinesOutputIterator jobs_to_machines, - Components components = Components(), Visitor visitor = Visitor()) { - auto ga_solution = make_generalised_assignment(mbegin, mend, jbegin, jend, - c, t, T, jobs_to_machines); - return solve_iterative_rounding(ga_solution, std::move(components), - std::move(visitor)); -} - - -} // ir -} // paal -#endif // PAAL_GENERALISED_ASSIGNMENT_HPP diff --git a/patrec/inc/WireCellPatRec/paal/iterative_rounding/ir_components.hpp b/patrec/inc/WireCellPatRec/paal/iterative_rounding/ir_components.hpp deleted file mode 100644 index 796d7c8b2..000000000 --- a/patrec/inc/WireCellPatRec/paal/iterative_rounding/ir_components.hpp +++ /dev/null @@ -1,299 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// 2014 Piotr Godlewski -// -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file ir_components.hpp - * @brief - * @author Piotr Wygocki, Piotr Godlewski - * @version 1.0 - * @date 2013-05-10 - */ -#ifndef PAAL_IR_COMPONENTS_HPP -#define PAAL_IR_COMPONENTS_HPP - - -#include "paal/data_structures/components/components.hpp" -#include "paal/lp/ids.hpp" -#include "paal/lp/lp_base.hpp" -#include "paal/lp/problem_type.hpp" -#include "paal/utils/floating.hpp" -#include "paal/utils/functors.hpp" - -#include -#include -#include - -#include - -namespace paal { -namespace ir { - -/** - * @brief Default column rounding condition component. - */ -class default_round_condition { - public: - /** - * @brief constructor takes epsilon used in double comparison. - */ - default_round_condition(double epsilon = utils::compare::default_epsilon()): m_compare(epsilon) { } - - /** - * @brief Rounds the column if its value is integral. - */ - template - boost::optional operator()(Problem &, const LP &lp, - lp::col_id col) { - double x = lp.get_col_value(col); - double r = std::round(x); - if (m_compare.e(x,r)) { - return r; - } - return boost::none; - } - - protected: - /// Double comparison object. - const utils::compare m_compare; -}; - -/** - * @brief Column rounding component. - * Rounds a column if its value is equal to one of the template parameter - * values. - */ -template class round_condition_equals { - round_condition_equals() = delete; -}; - -/** - * @brief Column rounding component. - * Rounds a column if its value is equal to one of the template parameter - * values. - */ -template -class round_condition_equals : public round_condition_equals { - public: - /** - * @brief constructor takes epsilon used in double comparison. - */ - round_condition_equals(double epsilon = utils::compare::default_epsilon()): round_condition_equals(epsilon) { } - - /// Rounds a column if its value is equal to one of the template parameter - /// values. - template - boost::optional operator()(Problem &, const LP &lp, - lp::col_id col) { - return get(lp, lp.get_col_value(col)); - } - - protected: - /// Checks if the value can be rounded to the first template parameter. - template - boost::optional get(const LP & lp, double x) { - if (this->m_compare.e(x, arg)) { - return double(arg); - } else { - return round_condition_equals::get(lp, x); - } - } -}; - -/** - * @brief Column rounding component. - * Rounds a column if its value is equal to one of the template parameter - * values. - * Edge case (no template parameter values). - */ -template <> class round_condition_equals<> { - public: - /** - * @brief constructor takes epsilon used in double comparison. - */ - round_condition_equals(double epsilon = utils::compare::default_epsilon()): m_compare(epsilon) { } - - protected: - /// Edge case: return false. - template boost::optional get(const LP &lp, double x) { - return boost::none; - } - - /// Double comparison object. - const utils::compare m_compare; -}; - -/** - * @brief Column rounding component. - * Rounds a column if its value satisfies a fixed condition. - * The column is rounded to a value defined by a fixed function. - */ -template class round_condition_to_fun { - public: - /** - * @brief Constructor. Takes the rounding condition and the rounding - * function. - */ - round_condition_to_fun(Cond c = Cond(), F f = F()) : m_cond(c), m_f(f) {} - - /// Rounds a column if its value satisfies a fixed condition. - template - boost::optional operator()(Problem &, const LP &lp, - lp::col_id col) { - double x = lp.get_col_value(col); - if (m_cond(x)) { - return m_f(x); - } - return boost::none; - } - - private: - Cond m_cond; - F m_f; -}; - -/** - * @brief Checks if a variable is greater or equal than a fixed bound. - */ -class cond_bigger_equal_than { - public: - /** - * @brief constructor takes epsilon used in double comparison. - */ - cond_bigger_equal_than(double b, double epsilon = utils::compare::default_epsilon()) - : m_bound(b), m_compare(epsilon) {} - - /// Checks if a variable is greater or equal than a fixed bound. - bool operator()(double x) { return m_compare.ge(x, m_bound); } - - private: - double m_bound; - const utils::compare m_compare; -}; - -/** - * @brief Column rounding component. - * A variable is rounded up to 1, if it has value at least half in the - * solution. - */ -struct round_condition_greater_than_half : - public round_condition_to_fun { - /** - * @brief constructor takes epsilon used in double comparison. - */ - round_condition_greater_than_half(double epsilon = utils::compare::default_epsilon()) : - round_condition_to_fun(cond_bigger_equal_than(0.5, epsilon)) {} -}; - -/** - * @brief Finds an extreme point solution to the LP. - */ -struct default_solve_lp_to_extreme_point { - /// Finds an extreme point solution to the LP. - template - lp::problem_type operator()(Problem &, LP &lp) { - return lp.solve_simplex(lp::PRIMAL); - } -}; - -/** - * @brief Finds an extreme point solution to the LP. - */ -struct default_resolve_lp_to_extreme_point { - /// Finds an extreme point solution to the LP. - template - lp::problem_type operator()(Problem &, LP &lp) { - return lp.resolve_simplex(lp::PRIMAL); - } -}; - -/** - * @brief Default stop condition component. - */ -class default_stop_condition { - public: - /** - * @brief Constructor. Takes epsilon used in double comparison. - */ - default_stop_condition(double epsilon = utils::compare::default_epsilon()): m_compare(epsilon) { } - - /** - * @brief Checks if the current LP solution has got only integer values. - */ - template - bool operator()(Problem &, const LP &lp) { - for (lp::col_id col : lp.get_columns()) { - double col_val = lp.get_col_value(col); - if (!m_compare.e(col_val, std::round(col_val))) { - return false; - } - } - - return true; - } - - protected: - /// Double comparison object. - const utils::compare m_compare; -}; - -/** - * @brief Checks if the relaxations limit was reached. - */ -class relaxations_limit_condition { - public: - /// Constructor. - relaxations_limit_condition(int limit = 1) : m_limit(limit) {} - - /** - * @brief Checks if the relaxations limit was reached. - */ - bool operator()(int relaxed) { return relaxed >= m_limit; } - - private: - int m_limit; -}; - -class Init; -class RoundCondition; -class RelaxCondition; -class SetSolution; -class SolveLP; -class ResolveLP; -class StopCondition; -class RelaxationsLimit; - -using components = data_structures::components< - Init, - data_structures::NameWithDefault, - data_structures::NameWithDefault, - data_structures::NameWithDefault, - data_structures::NameWithDefault, - data_structures::NameWithDefault, - data_structures::NameWithDefault, - data_structures::NameWithDefault>; - -/** - * @brief Iterative rounding components. - */ -template -using IRcomponents = typename components::type; - -/** - * @brief Returns iterative rounding components. - */ -template -auto make_IRcomponents(Args &&... args) - ->decltype(components::make_components(std::forward(args)...)) { - return components::make_components(std::forward(args)...); -} - -} // ir -} // paal - -#endif // PAAL_IR_COMPONENTS_HPP diff --git a/patrec/inc/WireCellPatRec/paal/iterative_rounding/iterative_rounding.hpp b/patrec/inc/WireCellPatRec/paal/iterative_rounding/iterative_rounding.hpp deleted file mode 100644 index f72a2f675..000000000 --- a/patrec/inc/WireCellPatRec/paal/iterative_rounding/iterative_rounding.hpp +++ /dev/null @@ -1,368 +0,0 @@ -//======================================================================= -// Copyright (c) 2013 Piotr Wygocki -// 2014 Piotr Godlewski -// Distributed under the Boost Software License, Version 1.0. (See -// accompanying file LICENSE_1_0.txt or copy at -// http://www.boost.org/LICENSE_1_0.txt) -//======================================================================= -/** - * @file iterative_rounding.hpp - * @brief - * @author Piotr Wygocki, Piotr Godlewski - * @version 1.0 - * @date 2013-05-06 - */ -#ifndef PAAL_ITERATIVE_ROUNDING_HPP -#define PAAL_ITERATIVE_ROUNDING_HPP - - -#include "paal/iterative_rounding/ir_components.hpp" -#include "paal/lp/glp.hpp" -#include "paal/utils/floating.hpp" -#include "paal/utils/type_functions.hpp" -#include "paal/utils/irange.hpp" - -#include - -#include -#include -#include // std::placeholders:: - - -namespace paal { -namespace ir { - -/** - * @brief Default Iterative Rounding visitor. - */ -struct trivial_visitor { - /** - * @brief Method called after (re)solving the LP. - */ - template - void solve_lp(Problem &problem, LP &lp) {} - - /** - * @brief Method called after rounding a column of the LP. - */ - template - void round_col(Problem &problem, LP &lp, lp::col_id col, double val) {} - - /** - * @brief Method called after relaxing a row of the LP. - */ - template - void relax_row(Problem &problem, LP &lp, lp::row_id row) {} -}; - -///default solve lp for row_generation, -///at first call PRIMAL, and DUAL on the next calls -template -class default_solve_lp_in_row_generation { - bool m_first; - LP & m_lp; -public: - ///constructor - default_solve_lp_in_row_generation(Problem &, LP & lp) : m_first(true), m_lp(lp) {} - - ///operator() - lp::problem_type operator()() - { - if (m_first) { - m_first = false; - return m_lp.solve_simplex(lp::PRIMAL); - } - return m_lp.resolve_simplex(lp::DUAL); - } -}; - -/// default row_generation for lp, -/// one can customize LP solving, by setting SolveLP -template