Load and scale hitmaps and white noise covariance matrices
import logging as log
Simulations are executed at lower sampling frequency:
- $20~Hz$ for SAT (production is $100~Hz$)
- $200~Hz$ for LAT (production is $400~Hz$)
hitmaps_naming = {
"SAT": "telescope_all_time_all_hits.fits.gz",
"LAT": "filtered_telescope_all_time_all_hmap.fits",
}
wcov_naming = {
"SAT": "telescope_all_time_all_wcov.fits.gz",
"LAT": "filtered_telescope_all_time_all_wcov.fits",
}
def load_hitmap_wcov(config, site, channel, realization=0, raw_hitmap=False):
"""Load hitmaps and white noise covariance matrices for a channel
This loads the simulated hitmaps and white noise covariance matrices
and scales them properly to the experiment configuration and duration
as defined in the input config file.
Hitmaps assumes a sampling frequency of 100 Hz for SAT and 400 Hz for
LAT.
Parameters
----------
config : dict
CMB-S4 configuration,
generally loaded from a TOML file
site : str
'Pole' or 'Chile', case doesn't matter
channel : str
Channel tag, e.g. 'MFHS1'
realization : int
Choose one of the available 8 realizations
Returns
-------
hitmap : numpy array
Hitmap for all channels all tubes combined
wcov : numpy array
White noise covariance matrix, rows are:
"II", "IQ", "IU", "QQ", "QU", "UU", units are K^2
"""
# it is the same scaling for hitmap and white noise covariance matrix,
# which is the same as noise except squared
telescope = get_telescope(channel)
tube = channel[:-1]
channel_noP = channel.replace("P", "")
map_filename = (
Path(f"{realization:08d}")
/ f"{site.lower()}_noise_{telescope}_{channel_noP}_{hitmaps_naming[telescope]}"
)
wcov_filename = (
Path(f"{realization:08d}")
/ f"{site.lower()}_noise_{telescope}_{channel_noP}_{wcov_naming[telescope]}"
)
log.info(f"Base folder: {base_folder}")
log.info(f"Reading {map_filename}")
hitmap = hp.read_map(Path(base_folder) / map_filename, 0, dtype=None, verbose=False)
hitmap[hitmap == hp.UNSEEN] = 0
if raw_hitmap:
return hitmap
wcov = hp.read_map(
Path(base_folder) / wcov_filename, (0, 1, 2, 3, 4, 5), dtype=None, verbose=False
)
scaling = (
365.25
* get_observing_efficiency(
config["experiment"]["observing_efficiency"], site, telescope, channel
)
* get_observing_efficiency(
config["experiment"]["sensitivity_factor"], site, telescope, channel
)
) / (10 * simulations_observing_efficiency[site.lower()][channel])
# focal plane thinning factor of TOD simulations
scaling *= get_thinfp(channel)
scaling *= get_tube_years(config, site, channel) / simulated_tubes[tube]
hitmap = np.round(hitmap * scaling).astype(np.int64)
hitmap *= simulations_sampling_frequency_scaling[telescope]
wcov /= scaling
wcov[:, hitmap == 0] = hp.UNSEEN
return hitmap, wcov
import toml
config = toml.load("s4_design.toml")
Estimate observation efficiency
In the Winter 2021 run, there is a significant efficiency loss in the turnaround of the telescopes. This is already taken into account in the overall observation efficiency so we want to calibrate it out. Best is to use the hitmaps themselves, and scale them up from whatever number of hits to the expected rate for 10 days at 100% efficiency for each channel.
Still we want to make sure they are within 10% of the scheduling efficiencies measured by TOAST:
scheduling_efficiency = dict(
pole=dict(SAT=0.642, LAT=0.754),
chile=dict(SAT=0.988, LAT=1.042),
)
s4 = read_instrument_model()
tot_seconds = 10 * 24 * 60 * 60
sampling_frequency = dict(SAT=20, LAT=200)
observing_efficiency = dict(Pole={}, Chile={})
for row in s4:
channel = row["band"]
sites = ["Pole"] if row["telescope"] == "SAT" or ("P" in channel) else ["Chile"]
for site in sites:
print(channel, site)
raw_hitmap = load_hitmap_wcov(config, site, channel, realization=0, raw_hitmap=True)
observing_efficiency[site][channel] = raw_hitmap.sum() / (tot_seconds*sampling_frequency[row["telescope"]]*row["detectors_per_tube"]/ get_thinfp(channel))
simulated_tubes
scheduling_efficiency
for site, channels in observing_efficiency.items():
for channel in channels:
observing_efficiency[site][channel] /= simulated_tubes[channel[:-1]]
for site, channels in observing_efficiency.items():
for channel, efficiency in channels.items():
telescope = get_telescope(channel)
print(
f"{site} {channel} Efficiency from hitmap {efficiency:.2f}, scheduling efficiency {scheduling_efficiency[site.lower()][telescope]:.2f}, ratio {efficiency/scheduling_efficiency[site.lower()][telescope]:.1%}")
observing_efficiency
ls /global/cscratch1/sd/keskital/s4sim/reference_tool_round_2/out/00000000/pole_noise_LAT*hmap.fits
filenames = !ls $base_folder/00000000/*noise*wcov*
import os.path
for f in map(os.path.basename, filenames):
print(f)
channel = "HFS1"
site = "Pole"
telescope = get_telescope(channel)
input_map = hp.read_map(
base_folder + "/00000000/" + \
f"{site.lower()}_noise_{telescope}_{channel}_telescope_all_time_all_hits.fits.gz"
, 0)
from astropy.io import fits
fits.open(base_folder + "/00000000/" + \
f"{site.lower()}_noise_{telescope}_{channel}_telescope_all_time_all_wcov.fits.gz"
)[1].header
input_EE_wcov = hp.read_map(base_folder + "/00000000/" + \
f"{site.lower()}_noise_{telescope}_{channel}_telescope_all_time_all_wcov.fits.gz"
, (3))
num_tubes = sum([
telescope[channel[:-1]] for telescope in config["telescopes"][telescope].values()
])
sampling_rate = input_map.sum()
sampling_rate /= 10 * 24 * 3600 * 0.5410630787037037
sampling_rate /= 8592 # HF detectors per tube
sampling_rate *= 8 # focal plane thinning factor
sampling_rate
Check we are within 5% of 20 Hz (used in sims for SAT, LAT used 200 Hz)
np.testing.assert_allclose(sampling_rate, 20, rtol=5/100)
import astropy.units as u
input_variance = input_EE_wcov * u.K**2 * (input_map / (20 * u.Hz))
input_variance = input_variance[input_map > 0]
input_variance = input_variance[input_variance > 0]
import matplotlib.pyplot as plt
plt.hist(input_variance.value, bins=100, log=True);
input_NET = np.sqrt(np.median(input_variance))
input_NET.to(u.uK * u.s**(.5)) / np.sqrt(2)
from astropy.tests.helper import assert_quantity_allclose
channel_NET = 609.1 * u.uK * u.s**(.5)
The channel NET is valid for temperature, when we solve for the $2$ polarization components we get a factor of $\sqrt{2}$.
assert_quantity_allclose(input_NET/np.sqrt(2), channel_NET, rtol=5/100)
telescope
hitmap, wcov = load_hitmap_wcov(config, site, channel, realization=0)
sampling_rate = hitmap.sum() / (config["experiment"]["total_experiment_length_years"] \
* config["experiment"]["observing_efficiency"][site][telescope][channel] \
* config["experiment"]["sensitivity_factor"][site][telescope][channel])
sampling_rate /= 365.25 * 24 * 3600
sampling_rate /= 8592 # HF detectors per tube
sampling_rate /= num_tubes
sampling_rate
np.testing.assert_allclose(sampling_rate, 100, rtol=5/100)
np.testing.assert_allclose(input_map == 0, hitmap == 0)
%matplotlib inline
import matplotlib.pyplot as plt
log_hitmap = np.log10(hitmap)
log_hitmap[np.isinf(log_hitmap)] = hp.UNSEEN
hp.mollview(log_hitmap, unit="log10(samples)", min=3, max=8.5, title="Hitmap")
for i,label in [(3, "EE"), (4, "EB"), (5, "BB")]:
hp.mollview(wcov[i], unit="$K^2_{CMB}$", min=0, max=1e-11, title=f"{label} White noise covariance")
wcov[wcov == hp.UNSEEN] = 0
variance = wcov[3] * u.K**2 * (hitmap / (100 * u.Hz))
variance[variance != 0].min(), variance.max(), variance[variance != 0].mean()
full_mission_NET = np.sqrt(np.median(variance[hitmap != 0]))
full_mission_NET.to(u.uK * u.s**(.5))/np.sqrt(2)
assert_quantity_allclose(full_mission_NET/np.sqrt(2), channel_NET, rtol=5/100)