Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DM-43615: Add visit summary metric dispatching to CalibrateImageTask #945

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 45 additions & 1 deletion python/lsst/pipe/tasks/calibrateImage.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,23 @@
from . import measurePsf, repair, photoCal, computeExposureSummaryStats, snapCombine


class _EmptyTargetTask(pipeBase.PipelineTask):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not a fan of this approach. Could those metrics be moved into a different package, to fix the circular import problem? Alternately, could we modify ConfigurableField to allow optional and then let target=None? That seems like a better approach, though it still doesn't solve the problem of not being able to test the new if branch.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm also not a fan, and I like to think this is a temporary workaround the to the circular import problem. The reason we didn't go down the target=None route was to ensure that a meaningful error message was raised if one tried to implement metric-writing without retargeting.

The circular import arises solely due to analysis_tools needing pipe_tasks to be set up to import the LoadReferenceCatalogTask; if that (and colorterms, which LoadReferenceCatalogTask uses) can be moved out of pipe_tasks, then the circular import is fixed. However, I don't know whether that is something that would be considered feasible.

Could those metrics be moved into a different package...?

My understanding is that we want to avoid/move away from having metrics created outside of analysis_tools (is that what you mean?), in order to ensure that the same tool run from within a task or as a standalone analysis tool will produce the same results (from this post by @natelust; apologies to Nate if there is any misunderstanding on my part on this).

I seem to recall @TallJimbo saying that he was "ok" with this (temporary??) workaround, but that's not to say that it shouldn't be reconsidered. It may come down to how critical it is to have metric creation in here for OR4; but that decision is above my pay grade :) .

Again, apologies if I've misrepresented either Nate or Jim.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What exactly is the circular import problem, anyway? analysis_tools does not depend on pipe_tasks.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe analysis_tools -> cp_pipe -> pipe_tasks is the dependency.

"""
This is a placeholder target for create_summary_metrics and must be retargeted at
runtime. create_summary_metrics should target an analysis tool task, but that
would, at the time of writing, result in a circular import.

As a result, this class should not be used for anything else.
"""
ConfigClass = pipeBase.PipelineTaskConfig

def __init__(self, **kwargs) -> None:
raise NotImplementedError(
"do_create_summary_metrics is set to True, in which "
"case create_summary_metrics must be retargeted."
)


class CalibrateImageConnections(pipeBase.PipelineTaskConnections,
dimensions=("instrument", "visit", "detector")):

Expand Down Expand Up @@ -136,6 +153,11 @@ class CalibrateImageConnections(pipeBase.PipelineTaskConnections,
storageClass="Catalog",
dimensions=("instrument", "visit", "detector"),
)
summary_metrics = connectionTypes.Output(
name="initial_summary_metrics",
storageClass="MetricMeasurementBundle",
dimensions=("instrument", "visit", "detector"),
)

def __init__(self, *, config=None):
super().__init__(config=config)
Expand All @@ -147,6 +169,8 @@ def __init__(self, *, config=None):
del self.astrometry_matches
if config.optional_outputs is None or "photometry_matches" not in config.optional_outputs:
del self.photometry_matches
if not config.do_create_summary_metrics:
del self.summary_metrics


class CalibrateImageConfig(pipeBase.PipelineTaskConfig, pipelineConnections=CalibrateImageConnections):
Expand Down Expand Up @@ -259,6 +283,16 @@ class CalibrateImageConfig(pipeBase.PipelineTaskConfig, pipelineConnections=Cali
target=computeExposureSummaryStats.ComputeExposureSummaryStatsTask,
doc="Task to to compute summary statistics on the calibrated exposure."
)
do_create_summary_metrics = pexConfig.Field(
jrmullaney marked this conversation as resolved.
Show resolved Hide resolved
dtype=bool,
default=False,
doc="Run the subtask to create summary metrics, and then write those metrics."
)
jrmullaney marked this conversation as resolved.
Show resolved Hide resolved
create_summary_metrics = pexConfig.ConfigurableField(
target=_EmptyTargetTask,
doc="Subtask to create metrics from the summary stats. This must be retargeted, likely to an"
"analysis_tools task such as CalexpSummaryMetrics."
)

def setDefaults(self):
super().setDefaults()
Expand Down Expand Up @@ -411,6 +445,9 @@ def __init__(self, initial_stars_schema=None, **kwargs):

self.makeSubtask("compute_summary_stats")

if self.config.do_create_summary_metrics:
self.makeSubtask("create_summary_metrics")

# For the butler to persist it.
self.initial_stars_schema = afwTable.SourceCatalog(initial_stars_schema)

Expand Down Expand Up @@ -543,7 +580,9 @@ def run(self, *, exposures, id_generator=None, result=None):
result.photometry_matches = lsst.meas.astrom.denormalizeMatches(photometry_matches,
photometry_meta)

self._summarize(result.exposure, result.stars_footprints, result.background)
result.summary_metrics = self._summarize(result.exposure,
jrmullaney marked this conversation as resolved.
Show resolved Hide resolved
result.stars_footprints,
result.background)

return result

Expand Down Expand Up @@ -856,3 +895,8 @@ def _summarize(self, exposure, stars, background):
# applied calibration). This needs to be checked.
summary = self.compute_summary_stats.run(exposure, stars, background)
exposure.info.setSummaryStats(summary)

summaryMetrics = None
if self.config.do_create_summary_metrics:
summaryMetrics = self.create_summary_metrics.run(data=summary.__dict__).metrics
return summaryMetrics
37 changes: 37 additions & 0 deletions tests/test_calibrateImage.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,9 @@
import lsst.meas.base.tests
import lsst.pipe.base.testUtils
from lsst.pipe.tasks.calibrateImage import CalibrateImageTask
from lsst.analysis.tools.tasks import CalexpSummaryAnalysisTask
from lsst.analysis.tools.atools import CalexpSummaryMetrics
from lsst.analysis.tools.interfaces import MetricMeasurementBundle
import lsst.utils.tests


Expand Down Expand Up @@ -138,6 +141,10 @@ def setUp(self):
# Something about this test dataset prefers a larger threshold here.
self.config.star_selector["science"].unresolved.maximum = 0.2

self.config.do_create_summary_metrics = True
self.config.create_summary_metrics.retarget(CalexpSummaryAnalysisTask)
self.config.create_summary_metrics.atools.initial_pvi_metrics = CalexpSummaryMetrics

def _check_run(self, calibrate, result):
"""Test the result of CalibrateImage.run().

Expand All @@ -162,6 +169,27 @@ def _check_run(self, calibrate, result):
self.assertFloatsAlmostEqual(summary.ra, self.sky_center.getRa().asDegrees(), rtol=1e-7)
self.assertFloatsAlmostEqual(summary.dec, self.sky_center.getDec().asDegrees(), rtol=1e-7)

# Check that the summary metrics are reasonable.
metrics = result.summary_metrics
if self.config.do_create_summary_metrics:
self.assertIsInstance(metrics, MetricMeasurementBundle)

for metric in metrics['initial_pvi_metrics']:
if metric.metric_name.metric == 'psfSigma':
self.assertFloatsAlmostEqual(metric.quantity.value, 2.0, rtol=1e-2)
if metric.metric_name.metric == 'ra':
self.assertFloatsAlmostEqual(
metric.quantity.value,
self.sky_center.getRa().asDegrees(),
rtol=1e-7)
if metric.metric_name.metric == 'dec':
self.assertFloatsAlmostEqual(
metric.quantity.value,
self.sky_center.getDec().asDegrees(),
rtol=1e-7)
else:
self.assertIsNone(metrics)

# Should have finite sky coordinates in the afw and astropy catalogs.
self.assertTrue(np.isfinite(result.stars_footprints["coord_ra"]).all())
self.assertTrue(np.isfinite(result.stars["coord_ra"]).all())
Expand Down Expand Up @@ -212,6 +240,7 @@ def test_run_no_optionals(self):
struct, as appropriate.
"""
self.config.optional_outputs = None
self.config.do_create_summary_metrics = False
calibrate = CalibrateImageTask(config=self.config)
calibrate.astrometry.setRefObjLoader(self.ref_loader)
calibrate.photometry.match.setRefObjLoader(self.ref_loader)
Expand Down Expand Up @@ -480,6 +509,10 @@ def setUp(self):
"initial_photometry_match_detector",
{"instrument", "visit", "detector"},
"Catalog")
butlerTests.addDatasetType(self.repo,
"initial_summary_metrics",
{"instrument", "visit", "detector"},
"MetricMeasurementBundle")

# dataIds
self.exposure0_id = self.repo.registry.expandDataId(
Expand Down Expand Up @@ -520,6 +553,7 @@ def test_runQuantum(self):
"initial_pvi_background": self.visit_id,
"astrometry_matches": self.visit_id,
"photometry_matches": self.visit_id,
"summary_metrics": self.visit_id,
})
mock_run = lsst.pipe.base.testUtils.runTestQuantum(task, self.butler, quantum)

Expand Down Expand Up @@ -549,6 +583,7 @@ def test_runQuantum_2_snaps(self):
"initial_pvi_background": self.visit_id,
"astrometry_matches": self.visit_id,
"photometry_matches": self.visit_id,
"summary_metrics": self.visit_id,
})
mock_run = lsst.pipe.base.testUtils.runTestQuantum(task, self.butler, quantum)

Expand All @@ -575,6 +610,7 @@ def test_runQuantum_no_optional_outputs(self):
"initial_pvi_background": self.visit_id,
"astrometry_matches": self.visit_id,
"photometry_matches": self.visit_id,
"summary_metrics": self.visit_id,
}

# Check that we can turn off one output at a time.
Expand Down Expand Up @@ -621,6 +657,7 @@ def test_runQuantum_exception(self):
"initial_pvi_background": self.visit_id,
"astrometry_matches": self.visit_id,
"photometry_matches": self.visit_id,
"summary_metrics": self.visit_id,
})

# A generic exception should raise directly.
Expand Down
Loading