6. Average polarimeter per resonance#
Import Python libraries
from __future__ import annotations
import logging
import os
from collections import defaultdict
from functools import cache
from math import ceil, sqrt
from textwrap import dedent, wrap
from typing import Literal
import matplotlib.pyplot as plt
import numpy as np
import plotly.graph_objects as go
import sympy as sp
import yaml
from ampform.io import aslatex
from ampform.sympy import PoolSum, perform_cached_doit
from ampform_dpd import AmplitudeModel
from ampform_dpd.decay import Particle
from ampform_dpd.io import perform_cached_lambdify, simplify_latex_rendering
from IPython.display import Latex, Math
from plotly.subplots import make_subplots
from tensorwaves.interface import ParametrizedFunction
from tqdm.auto import tqdm
from polarimetry import formulate_polarimetry
from polarimetry.data import create_data_transformer, generate_phasespace_sample
from polarimetry.function import compute_sub_function
from polarimetry.lhcb import (
ParameterBootstrap,
ParameterType,
flip_production_coupling_signs,
load_model_builder,
load_model_parameters,
)
from polarimetry.lhcb.particle import load_particles
SubSystemID = Literal[1, 2, 3]
simplify_latex_rendering()
FUNCTION_CACHE: dict[sp.Expr, ParametrizedFunction] = {}
MODEL_FILE = "../data/model-definitions.yaml"
PARTICLES = load_particles("../data/particle-definitions.yaml")
GITLAB_CI = "GIT_PAGER" in os.environ
BACKEND = "numpy" if GITLAB_CI else "jax"
NO_TQDM = "EXECUTE_NB" in os.environ
if NO_TQDM:
logging.getLogger().setLevel(logging.ERROR)
logging.getLogger("polarimetry.io").setLevel(logging.ERROR)
@cache
def doit(expr: PoolSum) -> sp.Expr:
return perform_cached_doit(expr)
6.1. Computations#
Formulate models
def formulate_all_models() -> dict[str, dict[SubSystemID, AmplitudeModel]]:
with open(MODEL_FILE) as f:
data = yaml.load(f, Loader=yaml.SafeLoader)
allowed_model_titles = list(data)
models = defaultdict(dict)
for title in tqdm(
allowed_model_titles,
desc="Formulate models",
disable=NO_TQDM,
):
builder = load_model_builder(MODEL_FILE, PARTICLES, title)
imported_parameters = load_model_parameters(
MODEL_FILE, builder.decay, title, PARTICLES
)
for reference_subsystem in (1, 2, 3):
model = builder.formulate(reference_subsystem)
model.parameter_defaults.update(imported_parameters)
models[title][reference_subsystem] = model
models[title][2] = flip_production_coupling_signs(models[title][2], ["K", "L"])
models[title][3] = flip_production_coupling_signs(models[title][3], ["K", "D"])
return {i: dict(dct.items()) for i, dct in models.items()}
MODELS = formulate_all_models()
NOMINAL_MODEL_TITLE = "Default amplitude model"
NOMINAL_MODEL = MODELS[NOMINAL_MODEL_TITLE]
DECAY = NOMINAL_MODEL[1].decay
Unfold symbolic expressions
def unfold_expressions() -> tuple[
dict[str, sp.Expr],
dict[str, sp.Expr],
dict[str, dict[int, sp.Expr]],
]:
intensity_exprs = {}
alpha_x_exprs = {}
alpha_z_exprs = defaultdict(dict)
for title, ref_models in tqdm(
MODELS.items(),
desc="Unfolding expressions",
disable=NO_TQDM,
):
model = ref_models[1]
intensity_exprs[title] = unfold(model.intensity, model)
for ref, model in tqdm(ref_models.items(), disable=NO_TQDM, leave=False):
builder = load_model_builder(MODEL_FILE, PARTICLES, model_id=title)
alpha_x, _, alpha_z = formulate_polarimetry(builder, ref)
if ref == 1:
alpha_x_exprs[title] = unfold(alpha_x, model)
alpha_z_exprs[title][ref] = unfold(alpha_z, model)
return (
intensity_exprs,
alpha_x_exprs,
{i: dict(dct) for i, dct in alpha_z_exprs.items()},
)
def unfold(expr: sp.Expr, model: AmplitudeModel) -> sp.Expr:
return doit(doit(expr).xreplace(model.amplitudes))
INTENSITY_EXPRS, ALPHA_X_EXPRS, ALPHA_Z_EXPRS = unfold_expressions()
Convert to numerical functions
def lambdify_expressions() -> tuple[
dict[str, ParametrizedFunction],
dict[str, ParametrizedFunction],
dict[str, dict[int, ParametrizedFunction]],
dict[str, dict[int, float]],
]:
intensity_funcs = {}
alpha_x_funcs = {}
alpha_z_funcs = defaultdict(dict)
original_parameters = defaultdict(dict)
for title, ref_models in tqdm(
MODELS.items(),
desc="Lambdifying",
disable=NO_TQDM,
):
reference_subsystem = 1
model = ref_models[reference_subsystem]
intensity_funcs[title] = cached_lambdify(INTENSITY_EXPRS[title], model)
alpha_x_funcs[title] = cached_lambdify(ALPHA_X_EXPRS[title], model)
for ref, model in ref_models.items():
alpha_z_expr = ALPHA_Z_EXPRS[title][ref]
alpha_z_funcs[title][ref] = cached_lambdify(alpha_z_expr, model)
str_parameters = {str(k): v for k, v in model.parameter_defaults.items()}
original_parameters[title][ref] = str_parameters
return (
intensity_funcs,
alpha_x_funcs,
{i: dict(dct) for i, dct in alpha_z_funcs.items()},
{i: dict(dct) for i, dct in original_parameters.items()},
)
def cached_lambdify(expr: sp.Expr, model: AmplitudeModel) -> ParametrizedFunction:
func = FUNCTION_CACHE.get(expr)
if func is None:
func = perform_cached_lambdify(
expr,
parameters=model.parameter_defaults,
backend=BACKEND,
)
FUNCTION_CACHE[expr] = func
str_parameters = {str(k): v for k, v in model.parameter_defaults.items()}
func.update_parameters(str_parameters)
return func
(
INTENSITY_FUNCS,
ALPHA_X_FUNCS,
ALPHA_Z_FUNCS,
ORIGINAL_PARAMETERS,
) = lambdify_expressions()
Generate phase space sample
N_EVENTS = 100_000
PHSP = generate_phasespace_sample(DECAY, N_EVENTS, seed=0)
for ref in tqdm((1, 2, 3), disable=NO_TQDM, leave=False):
transformer = create_data_transformer(NOMINAL_MODEL[ref], BACKEND)
PHSP.update(transformer(PHSP))
del transformer
Compute statistics with parameter bootstrap
def create_bootstraps() -> dict[SubSystemID, dict[str, ParameterType]]:
bootstraps = {
ref: ParameterBootstrap(MODEL_FILE, DECAY, NOMINAL_MODEL_TITLE)
for ref, model in NOMINAL_MODEL.items()
}
bootstraps[2] = flip_production_coupling_signs(bootstraps[2], ["K", "L"])
bootstraps[3] = flip_production_coupling_signs(bootstraps[3], ["K", "D"])
return {
i: bootstrap.create_distribution(N_BOOTSTRAPS, seed=0)
for i, bootstrap in bootstraps.items()
}
def compute_statistics_weighted_alpha() -> tuple[
dict[str, np.ndarray], dict[str, np.ndarray]
]:
weighted_αx = defaultdict(list)
weighted_αz = defaultdict(list)
resonances = get_resonance_to_reference()
intensity_func = INTENSITY_FUNCS[NOMINAL_MODEL_TITLE]
αx_ref1_func = ALPHA_X_FUNCS[NOMINAL_MODEL_TITLE]
αz_ref1_func = ALPHA_Z_FUNCS[NOMINAL_MODEL_TITLE][1]
original_parameters_ref1 = ORIGINAL_PARAMETERS[NOMINAL_MODEL_TITLE][1]
for resonance, ref in tqdm(
resonances.items(),
desc="Computing statistics",
disable=NO_TQDM,
):
_filter = [resonance.latex]
αz_func = ALPHA_Z_FUNCS[NOMINAL_MODEL_TITLE][ref]
original_parameters = ORIGINAL_PARAMETERS[NOMINAL_MODEL_TITLE][ref]
for i in tqdm(range(N_BOOTSTRAPS), disable=NO_TQDM, leave=False):
new_parameters = {k: v[i] for k, v in BOOTSTRAP_PARAMETERS[ref].items()}
new_parameters_ref1 = {k: v[i] for k, v in BOOTSTRAP_PARAMETERS[1].items()}
intensity_func.update_parameters(new_parameters_ref1)
αx_ref1_func.update_parameters(new_parameters_ref1)
αz_ref1_func.update_parameters(new_parameters_ref1)
αz_func.update_parameters(new_parameters)
intensities = compute_sub_function(intensity_func, PHSP, _filter)
αx_ref1_array = compute_sub_function(αx_ref1_func, PHSP, _filter).real
αz_ref1_array = compute_sub_function(αz_ref1_func, PHSP, _filter).real
αz_array = compute_sub_function(αz_func, PHSP, _filter).real
αx_ref1 = compute_weighted_average(αx_ref1_array, intensities)
αz_ref1 = compute_weighted_average(αz_ref1_array, intensities)
αz = compute_weighted_average(αz_array, intensities)
weighted_αx[resonance.name].append((αx_ref1, αz_ref1))
weighted_αz[resonance.name].append(αz)
intensity_func.update_parameters(original_parameters_ref1)
αx_ref1_func.update_parameters(original_parameters_ref1)
αz_ref1_func.update_parameters(original_parameters_ref1)
αz_func.update_parameters(original_parameters)
return (
{k: np.array(v) for k, v in weighted_αx.items()},
{k: np.array(v) for k, v in weighted_αz.items()},
)
def get_resonance_to_reference() -> dict[Particle, int]:
subsystem_ids: dict[str, SubSystemID] = dict(K=1, L=2, D=3)
resonances = [
c.resonance
for c in sorted(
DECAY.chains,
key=lambda p: (subsystem_ids[p.resonance.name[0]], p.resonance.mass),
)
]
return {p: subsystem_ids[p.name[0]] for p in resonances}
def compute_weighted_average(v: np.ndarray, weights: np.ndarray) -> np.ndarray:
return np.nansum(v * weights, axis=-1) / np.nansum(weights, axis=-1)
N_BOOTSTRAPS = 100
BOOTSTRAP_PARAMETERS = create_bootstraps()
STAT_WEIGHTED_ALPHA_REF1, STAT_WEIGHTED_ALPHA_Z = compute_statistics_weighted_alpha()
Compute systematics from alternative models
def compute_systematic_weighted_alpha() -> tuple[
dict[str, np.ndarray], dict[str, np.ndarray]
]:
weighted_αx = defaultdict(list)
weighted_αz = defaultdict(list)
resonances = get_resonance_to_reference()
for title in tqdm(
MODELS,
disable=NO_TQDM,
desc="Computing systematics",
):
for resonance, ref in tqdm(resonances.items(), disable=NO_TQDM, leave=False):
_filter = [resonance.latex]
intensity_func = INTENSITY_FUNCS[title]
αx_func_ref1 = ALPHA_X_FUNCS[title]
αz_func_ref1 = ALPHA_Z_FUNCS[title][1]
αz_func = ALPHA_Z_FUNCS[title][ref]
intensity_func.update_parameters(ORIGINAL_PARAMETERS[title][1])
αx_func_ref1.update_parameters(ORIGINAL_PARAMETERS[title][1])
αz_func_ref1.update_parameters(ORIGINAL_PARAMETERS[title][1])
αz_func.update_parameters(ORIGINAL_PARAMETERS[title][ref])
αx_ref1_array = compute_sub_function(αx_func_ref1, PHSP, _filter)
αz_ref1_array = compute_sub_function(αz_func_ref1, PHSP, _filter)
αz_array = compute_sub_function(αz_func, PHSP, _filter)
intensities = compute_sub_function(intensity_func, PHSP, _filter)
αx_ref1 = compute_weighted_average(αx_ref1_array, intensities).real
αz_ref1 = compute_weighted_average(αz_ref1_array, intensities).real
αz = compute_weighted_average(αz_array, intensities).real
weighted_αx[resonance.name].append((αx_ref1, αz_ref1))
weighted_αz[resonance.name].append(αz)
return (
{k: np.array(v) for k, v in weighted_αx.items()},
{k: np.array(v) for k, v in weighted_αz.items()},
)
SYST_WEIGHTED_ALPHA_REF1, SYST_WEIGHTED_ALPHA_Z = compute_systematic_weighted_alpha()
6.2. Result and comparison#
LHCb values are taken from the original study [1]:
Show code cell source
def tabulate_alpha_z():
with open("../data/observable-references.yaml") as f:
data = yaml.load(f, Loader=yaml.SafeLoader)
lhcb_values = {
k: tuple(float(v) for v in row.split(" ± "))
for k, row in data[NOMINAL_MODEL_TITLE]["alpha_z"].items()
if k != "K(892)"
}
model_ids = list(range(len(MODELS)))
alignment = "r".join("" for _ in model_ids)
header = " & ".join(Rf"\textbf{{{i}}}" for i in model_ids[1:])
src = Rf"\begin{{array}}{{l|c|c|{alignment}}}" "\n"
src += Rf" & \textbf{{this study}} & \textbf{{LHCb}} & {header} \\" "\n"
src += R"\hline" "\n"
src = dedent(src)
for resonance in get_resonance_to_reference():
src += f" {resonance.latex} & "
stat_array = 1e3 * STAT_WEIGHTED_ALPHA_Z[resonance.name]
syst_array = 1e3 * SYST_WEIGHTED_ALPHA_Z[resonance.name]
syst_diff = syst_array[1:] - syst_array[0]
value = syst_array[0]
std = stat_array.std()
min_ = syst_diff.min() # LS-model excluded
max_ = syst_diff.max() # LS-model excluded
src += Rf"{value:>+.0f} \pm {std:.0f}_{{{min_:+.0f}}}^{{{max_:+.0f}}}"
src += " & "
lhcb = lhcb_values.get(resonance.name)
if lhcb is not None:
val, stat, syst, _ = lhcb
src += Rf"{val:+.0f} \pm {stat:.0f} \pm {syst:.0f}"
for diff in syst_diff:
diff_str = f"{diff:>+.0f}"
if diff == syst_diff.max():
src += Rf" & \color{{red}}{{{diff_str}}}"
elif diff == syst_diff.min():
src += Rf" & \color{{blue}}{{{diff_str}}}"
else:
src += f" & {diff_str}"
src += R" \\" "\n"
src += R"\end{array}"
return Latex(src)
tabulate_alpha_z()
\[\begin{split}\begin{array}{l|c|c|rrrrrrrrrrrrrrrrr}
& \textbf{this study} & \textbf{LHCb} & \textbf{1} & \textbf{2} & \textbf{3} & \textbf{4} & \textbf{5} & \textbf{6} & \textbf{7} & \textbf{8} & \textbf{9} & \textbf{10} & \textbf{11} & \textbf{12} & \textbf{13} & \textbf{14} & \textbf{15} & \textbf{16} & \textbf{17} \\
\hline
K(700) & +63 \pm 78_{-235}^{+238} & +60 \pm 660 \pm 240 & -5 & -14 & -55 & -113 & -100 & +57 & -176 & \color{blue}{-235} & \color{red}{+238} & +96 & +51 & +211 & +52 & +11 & -221 & +12 & +1 \\
K(892) & +29 \pm 15_{-17}^{+31} & & +2 & -0 & +2 & -9 & \color{blue}{-17} & +2 & -5 & +23 & \color{red}{+31} & -8 & +5 & +8 & -3 & -2 & +13 & +1 & +10 \\
K(1430) & -339 \pm 28_{-102}^{+139} & -340 \pm 30 \pm 140 & +3 & +3 & -1 & -2 & +45 & +102 & +125 & -9 & -102 & \color{red}{+139} & -15 & \color{blue}{-102} & +7 & +4 & +6 & -1 & +1 \\
\Lambda(1405) & +580 \pm 31_{-122}^{+278} & -580 \pm 50 \pm 280 & +14 & -7 & +3 & +31 & -3 & -30 & \color{blue}{-122} & -22 & +124 & -64 & +31 & \color{red}{+278} & -17 & -8 & +51 & +0 & +7 \\
\Lambda(1520) & +925 \pm 8_{-84}^{+16} & -925 \pm 25 \pm 84 & +7 & +2 & +2 & \color{red}{+16} & -34 & +2 & +8 & +11 & +7 & -3 & +4 & \color{blue}{-84} & +2 & +1 & -6 & +2 & -10 \\
\Lambda(1600) & +199 \pm 51_{-428}^{+499} & -200 \pm 60 \pm 500 & +10 & -5 & +14 & -5 & +21 & +138 & +100 & \color{red}{+499} & \color{blue}{-428} & -140 & +12 & +72 & -21 & -12 & +13 & -19 & +11 \\
\Lambda(1670) & +817 \pm 15_{-46}^{+73} & -817 \pm 42 \pm 73 & +9 & -10 & +12 & +70 & -41 & -5 & \color{red}{+73} & +30 & +47 & \color{blue}{-46} & +12 & +29 & -5 & -3 & +17 & +3 & -18 \\
\Lambda(1690) & +958 \pm 8_{-35}^{+27} & -958 \pm 20 \pm 27 & -3 & +6 & -12 & \color{blue}{-35} & -14 & +22 & \color{red}{+27} & -20 & +3 & -4 & +5 & +18 & -4 & -1 & -1 & -0 & -9 \\
\Lambda(2000) & -573 \pm 9_{-191}^{+124} & +570 \pm 30 \pm 190 & +9 & -1 & +12 & +47 & -24 & -45 & \color{blue}{-191} & +58 & +85 & +78 & -19 & \color{red}{+124} & +6 & +3 & -9 & -2 & -23 \\
\Delta(1232) & +548 \pm 8_{-27}^{+36} & -548 \pm 14 \pm 36 & +9 & +0 & -9 & -14 & +17 & -1 & +10 & \color{red}{+36} & +5 & -11 & +2 & -8 & -2 & -1 & +12 & -0 & \color{blue}{-27} \\
\Delta(1600) & -502 \pm 9_{-112}^{+162} & +500 \pm 50 \pm 170 & +19 & +10 & +6 & +107 & \color{blue}{-112} & +115 & +88 & +49 & \color{red}{+162} & +5 & +51 & +97 & +16 & +9 & -27 & -3 & -53 \\
\Delta(1700) & +216 \pm 18_{-75}^{+42} & -216 \pm 36 \pm 75 & +40 & +4 & -0 & -19 & -2 & +23 & +16 & \color{red}{+42} & +23 & \color{blue}{-75} & -4 & -2 & +18 & +11 & -3 & +5 & -15 \\
\end{array}\end{split}\]
6.3. Distribution analysis#
Show code cell source
def plot_distributions():
layout_kwargs = dict(
font=dict(size=18),
height=800,
width=1000,
xaxis_title="Resonance",
yaxis_title="<i>ɑ̅<sub>z</sub></i>",
showlegend=False,
)
wrapped_titles = ["<br>".join(wrap(t, width=60)) for t in MODELS]
colors = dict( # https://stackoverflow.com/a/44727682
K="#d62728", # red
L="#1f77b4", # blue
D="#2ca02c", # green
)
align_left = {
"K(700)",
"K(1430)",
"L(1520)",
"L(2000)",
"L(1670)",
"D(1700)",
}
fig = go.Figure()
for i, (resonance, alpha_z) in enumerate(STAT_WEIGHTED_ALPHA_Z.items()):
subsystem_id = resonance[0]
fig.add_trace(
go.Violin(
y=alpha_z,
hovertemplate="<i>ɑ̅<sub>z</sub></i> = %{y:.3f}",
marker_color=colors[subsystem_id],
meanline_visible=True,
name=to_unicode(resonance),
points="all",
text=wrapped_titles,
)
)
fig.add_annotation(
x=i,
y=np.median(alpha_z),
xshift=-65 if resonance in align_left else +50,
font_color=colors[subsystem_id],
font_size=14,
showarrow=False,
text=format_average(resonance),
)
fig.update_layout(
title="<b>Statistical</b> distribution of weighted <i>ɑ̅<sub>z</sub></i>",
**layout_kwargs,
)
fig.update_xaxes(tickangle=45)
fig.show()
fig.update_layout(font=dict(size=24))
fig.write_image("_images/alpha-z-per-resonance-statistical.svg")
fig = go.Figure()
for i, (resonance, alpha_z) in enumerate(SYST_WEIGHTED_ALPHA_Z.items()):
subsystem_id = resonance[0]
fig.add_trace(
go.Box(
y=alpha_z,
boxpoints="all",
hovertemplate=(
"<b>%{text}</b><br>%{x}: <i>ɑ̅<sub>z</sub></i> = %{y:.3f}"
),
marker_color=colors[subsystem_id],
name=to_unicode(resonance),
text=wrapped_titles,
line=dict(color="rgba(0,0,0,0)"),
fillcolor="rgba(0,0,0,0)",
)
)
fig.add_annotation(
x=i,
y=np.median(alpha_z),
xshift=-65 if resonance in align_left else +15,
font_color=colors[subsystem_id],
font_size=14,
showarrow=False,
text=format_average(resonance),
)
fig.update_layout(
title="<b>Systematics</b> distribution of weighted <i>ɑ̅<sub>z</sub></i>",
**layout_kwargs,
)
fig.update_xaxes(tickangle=45)
fig.show()
fig.update_layout(font=dict(size=24))
fig.write_image("_images/alpha-z-per-resonance-systematics.svg")
def to_unicode(resonance: str) -> str:
return resonance.replace("L", "Λ").replace("D", "Δ")
def format_average(resonance: str) -> str:
stat_alpha = 1e3 * STAT_WEIGHTED_ALPHA_Z[resonance]
syst_alpha = 1e3 * SYST_WEIGHTED_ALPHA_Z[resonance]
diff = syst_alpha[1:] - syst_alpha[0]
mean = syst_alpha[0]
std = stat_alpha.std()
return f"{diff.max():+.0f}<br><b>{mean:+.0f}</b>±{std:.0f}<br>{diff.min():+.0f}"
%config InlineBackend.figure_formats = ['svg']
plot_distributions()
6.3.1. XZ-correlations#
It follows from the definition of \(\vec\alpha\) for a single resonance that:
\[\begin{split}
\begin{array}{rcl}
\alpha_x &=& \left|\vec\alpha\right| \int I_0 \sin\left(\zeta^0\right) \,\mathrm{d}\tau \big/ \int I_0 \,\mathrm{d}\tau \\
\alpha_z &=& \left|\vec\alpha\right| \int I_0 \cos\left(\zeta^0\right) \,\mathrm{d}\tau \big/ \int I_0 \,\mathrm{d}\tau
\end{array}
\end{split}\]
This means that the correlation if 100% if \(I_0\) does not change in the bootstrap. This may explain the \(xz\)-correlation observed for \(\overline{\alpha}\) over the complete decay as reported in Average polarimetry values.
Show code cell source
def round_nested(expression: sp.Expr, n_decimals: int) -> sp.Expr:
no_sqrt_expr = expression
for node in sp.preorder_traversal(expression):
if node.free_symbols:
continue
if isinstance(node, sp.Pow) and node.args[1] == 1 / 2:
no_sqrt_expr = no_sqrt_expr.xreplace({node: node.n()})
rounded_expr = no_sqrt_expr
for node in sp.preorder_traversal(no_sqrt_expr):
if isinstance(node, (float, sp.Float)):
rounded_expr = rounded_expr.xreplace({node: round(node, n_decimals)})
return rounded_expr
resonance_name = "L(2000)"
resonance_couplings = {
k: 0 if "production" in str(k) and resonance_name not in str(k) else v
for k, v in NOMINAL_MODEL[1].parameter_defaults.items()
}
intensity_symbol = sp.Symbol(f"I_{{{resonance_name}}}")
intensity_expr = round_nested(
INTENSITY_EXPRS[NOMINAL_MODEL_TITLE].xreplace(resonance_couplings).simplify(),
n_decimals=3,
)
Math(aslatex({intensity_symbol: intensity_expr}))
\[\begin{split}\displaystyle \begin{array}{rcl}
I_{L(2000)} &=& 0 \\
\end{array}\end{split}\]
Show code cell source
alpha_symbol = sp.Symbol(Rf"\alpha_{{x,{resonance_name}}}")
alpha_expr = (
round_nested(
ALPHA_X_EXPRS[NOMINAL_MODEL_TITLE].xreplace(resonance_couplings).simplify(),
n_decimals=3,
)
.rewrite(sp.conjugate)
.simplify()
)
Math(aslatex({alpha_symbol: alpha_expr}))
\[\begin{split}\displaystyle \begin{array}{rcl}
\alpha_{x,L(2000)} &=& \text{NaN} \\
\end{array}\end{split}\]
Show code cell source
alpha_symbol = sp.Symbol(Rf"\alpha_{{z,{resonance_name}}}")
alpha_expr = (
round_nested(
ALPHA_Z_EXPRS[NOMINAL_MODEL_TITLE][1].xreplace(resonance_couplings).simplify(),
n_decimals=3,
)
.rewrite(sp.conjugate)
.simplify()
)
Math(aslatex({alpha_symbol: alpha_expr}))
\[\begin{split}\displaystyle \begin{array}{rcl}
\alpha_{z,L(2000)} &=& \text{NaN} \\
\end{array}\end{split}\]
Show code cell source
def plot_correlation_xz_mpl(stat_or_syst, typ: str) -> None:
resonances = get_resonance_to_reference()
n_resonances = len(resonances)
n_cols = ceil(sqrt(n_resonances))
n_rows = ceil(n_resonances / n_cols)
fig, axes = plt.subplots(
figsize=(12, 8),
ncols=n_cols,
nrows=n_rows,
)
fig.suptitle(typ + R" $\overline{\alpha}_{xz}$-distribution")
colors = dict( # https://stackoverflow.com/a/44727682
K="#d62728", # red
L="#1f77b4", # blue
D="#2ca02c", # green
)
for ax, resonance in zip(axes.flatten(), resonances):
subsystem = resonance.name[0]
color = colors[subsystem]
αx, αz = stat_or_syst[resonance.name].T
if αx.std() == 0:
correlation = 1
slope = 0
else:
correlation = np.corrcoef(αx, αz)[0, 1]
slope, _ = np.polyfit(αz, αx, deg=1)
ax.scatter(αz, αx, c=color, s=1)
ax.set_aspect("equal", adjustable="datalim")
ax.set_title(f"${resonance.latex}$", y=0.85)
kwargs = dict(c=color, size=8, transform=ax.transAxes)
ax.text(0.03, 0.11, f"slope: {slope:+.3g}", **kwargs)
ax.text(0.03, 0.03, f"correlation: {correlation:+.3g}", **kwargs)
if ax in axes[-1, :]:
ax.set_xlabel(R"$\overline{\alpha}_z$")
if ax in axes[:, 0]:
ax.set_ylabel(R"$\overline{\alpha}_x$")
fig.tight_layout()
fig.savefig(f"_images/alpha-xz-{typ.lower()}.svg")
plt.show()
plt.close()
%config InlineBackend.figure_formats = ['svg']
plot_correlation_xz_mpl(STAT_WEIGHTED_ALPHA_REF1, "Statistics")
plot_correlation_xz_mpl(SYST_WEIGHTED_ALPHA_REF1, "Systematics")
Show code cell source
def plot_correlation_xz(stat_or_syst, typ: str) -> None:
resonances = get_resonance_to_reference()
n_resonances = len(resonances)
n_cols = ceil(sqrt(n_resonances))
n_rows = ceil(n_resonances / n_cols)
fig = make_subplots(
cols=n_cols,
rows=n_rows,
subplot_titles=[to_unicode(r.name) for r in resonances],
horizontal_spacing=0.02,
vertical_spacing=0.08,
)
fig.update_layout(
title_text=(
"<b>Systematics</b> distribution of weighted <i>ɑ̅<sub>xz</sub></i>"
),
height=800,
width=1000,
showlegend=False,
)
colors = dict( # https://stackoverflow.com/a/44727682
K="#d62728", # red
L="#1f77b4", # blue
D="#2ca02c", # green
)
wrapped_titles = ["<br>".join(wrap(t, width=60)) for t in MODELS]
hovertemplate = "<i>ɑ̅<sub>x</sub></i> = %{y:.3f}, <i>ɑ̅<sub>z</sub></i> = %{x:.3f}"
if "syst" in typ.lower():
hovertemplate = "<b>%{text}</b><br>" + hovertemplate
for i, resonance in enumerate(resonances):
αx, αz = stat_or_syst[resonance.name].T
subsystem_name = resonance.name[0]
fig.add_trace(
go.Scatter(
x=αz,
y=αx,
hovertemplate=hovertemplate,
marker_color=colors[subsystem_name],
name=to_unicode(resonance.name),
text=wrapped_titles,
mode="markers",
),
col=i % n_cols + 1,
row=i // n_cols + 1,
)
fig.show()
fig.write_image(f"_images/alpha-xz-{typ.lower()}-plotly.svg")
%config InlineBackend.figure_formats = ['svg']
plot_correlation_xz(STAT_WEIGHTED_ALPHA_REF1, "Statistics")
plot_correlation_xz(SYST_WEIGHTED_ALPHA_REF1, "Systematics")