new
This commit is contained in:
49
rfg_adc_plotter/processing/__init__.py
Normal file
49
rfg_adc_plotter/processing/__init__.py
Normal file
@ -0,0 +1,49 @@
|
||||
"""Pure sweep-processing helpers."""
|
||||
|
||||
from rfg_adc_plotter.processing.calibration import (
|
||||
calibrate_freqs,
|
||||
get_calibration_base,
|
||||
get_calibration_coeffs,
|
||||
recalculate_calibration_c,
|
||||
set_calibration_base_value,
|
||||
)
|
||||
from rfg_adc_plotter.processing.fft import (
|
||||
compute_distance_axis,
|
||||
compute_fft_mag_row,
|
||||
compute_fft_row,
|
||||
fft_mag_to_db,
|
||||
)
|
||||
from rfg_adc_plotter.processing.formatting import (
|
||||
compute_auto_ylim,
|
||||
format_status_kv,
|
||||
parse_spec_clip,
|
||||
)
|
||||
from rfg_adc_plotter.processing.normalization import (
|
||||
build_calib_envelopes,
|
||||
normalize_by_calib,
|
||||
)
|
||||
from rfg_adc_plotter.processing.peaks import (
|
||||
find_peak_width_markers,
|
||||
find_top_peaks_over_ref,
|
||||
rolling_median_ref,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"build_calib_envelopes",
|
||||
"calibrate_freqs",
|
||||
"compute_auto_ylim",
|
||||
"compute_distance_axis",
|
||||
"compute_fft_mag_row",
|
||||
"compute_fft_row",
|
||||
"fft_mag_to_db",
|
||||
"find_peak_width_markers",
|
||||
"find_top_peaks_over_ref",
|
||||
"format_status_kv",
|
||||
"get_calibration_base",
|
||||
"get_calibration_coeffs",
|
||||
"normalize_by_calib",
|
||||
"parse_spec_clip",
|
||||
"recalculate_calibration_c",
|
||||
"rolling_median_ref",
|
||||
"set_calibration_base_value",
|
||||
]
|
||||
81
rfg_adc_plotter/processing/calibration.py
Normal file
81
rfg_adc_plotter/processing/calibration.py
Normal file
@ -0,0 +1,81 @@
|
||||
"""Frequency-axis calibration helpers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Mapping
|
||||
|
||||
import numpy as np
|
||||
|
||||
from rfg_adc_plotter.constants import SWEEP_FREQ_MAX_GHZ, SWEEP_FREQ_MIN_GHZ
|
||||
from rfg_adc_plotter.types import SweepData
|
||||
|
||||
|
||||
def recalculate_calibration_c(
|
||||
base_coeffs: np.ndarray,
|
||||
f_min: float = SWEEP_FREQ_MIN_GHZ,
|
||||
f_max: float = SWEEP_FREQ_MAX_GHZ,
|
||||
) -> np.ndarray:
|
||||
"""Recalculate coefficients while preserving sweep edges."""
|
||||
coeffs = np.asarray(base_coeffs, dtype=np.float64).reshape(-1)
|
||||
if coeffs.size < 3:
|
||||
out = np.zeros((3,), dtype=np.float64)
|
||||
out[: coeffs.size] = coeffs
|
||||
coeffs = out
|
||||
c0, c1, c2 = float(coeffs[0]), float(coeffs[1]), float(coeffs[2])
|
||||
x0 = float(f_min)
|
||||
x1 = float(f_max)
|
||||
y0 = c0 + c1 * x0 + c2 * (x0 ** 2)
|
||||
y1 = c0 + c1 * x1 + c2 * (x1 ** 2)
|
||||
if not (np.isfinite(y0) and np.isfinite(y1)) or y1 == y0:
|
||||
return np.asarray([c0, c1, c2], dtype=np.float64)
|
||||
scale = (x1 - x0) / (y1 - y0)
|
||||
shift = x0 - scale * y0
|
||||
return np.asarray(
|
||||
[
|
||||
shift + scale * c0,
|
||||
scale * c1,
|
||||
scale * c2,
|
||||
],
|
||||
dtype=np.float64,
|
||||
)
|
||||
|
||||
|
||||
CALIBRATION_C_BASE = np.asarray([0.0, 1.0, 0.025], dtype=np.float64)
|
||||
CALIBRATION_C = recalculate_calibration_c(CALIBRATION_C_BASE)
|
||||
|
||||
|
||||
def get_calibration_base() -> np.ndarray:
|
||||
return np.asarray(CALIBRATION_C_BASE, dtype=np.float64).copy()
|
||||
|
||||
|
||||
def get_calibration_coeffs() -> np.ndarray:
|
||||
return np.asarray(CALIBRATION_C, dtype=np.float64).copy()
|
||||
|
||||
|
||||
def set_calibration_base_value(index: int, value: float) -> np.ndarray:
|
||||
"""Update one base coefficient and recalculate the working coefficients."""
|
||||
global CALIBRATION_C
|
||||
CALIBRATION_C_BASE[int(index)] = float(value)
|
||||
CALIBRATION_C = recalculate_calibration_c(CALIBRATION_C_BASE)
|
||||
return get_calibration_coeffs()
|
||||
|
||||
|
||||
def calibrate_freqs(sweep: Mapping[str, Any]) -> SweepData:
|
||||
"""Return a sweep copy with calibrated and resampled frequency axis."""
|
||||
freqs = np.asarray(sweep["F"], dtype=np.float64).copy()
|
||||
values = np.asarray(sweep["I"], dtype=np.float64).copy()
|
||||
coeffs = np.asarray(CALIBRATION_C, dtype=np.float64)
|
||||
if freqs.size > 0:
|
||||
freqs = coeffs[0] + coeffs[1] * freqs + coeffs[2] * (freqs * freqs)
|
||||
|
||||
if freqs.size >= 2:
|
||||
freqs_cal = np.linspace(float(freqs[0]), float(freqs[-1]), freqs.size, dtype=np.float64)
|
||||
values_cal = np.interp(freqs_cal, freqs, values).astype(np.float64)
|
||||
else:
|
||||
freqs_cal = freqs.copy()
|
||||
values_cal = values.copy()
|
||||
|
||||
return {
|
||||
"F": freqs_cal,
|
||||
"I": values_cal,
|
||||
}
|
||||
109
rfg_adc_plotter/processing/fft.py
Normal file
109
rfg_adc_plotter/processing/fft.py
Normal file
@ -0,0 +1,109 @@
|
||||
"""FFT helpers for line and waterfall views."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import numpy as np
|
||||
|
||||
from rfg_adc_plotter.constants import C_M_S, FFT_LEN
|
||||
|
||||
|
||||
def prepare_fft_segment(
|
||||
sweep: np.ndarray,
|
||||
freqs: Optional[np.ndarray],
|
||||
fft_len: int = FFT_LEN,
|
||||
) -> Optional[Tuple[np.ndarray, int]]:
|
||||
"""Prepare a sweep segment for FFT on a uniform frequency grid."""
|
||||
take_fft = min(int(sweep.size), int(fft_len))
|
||||
if take_fft <= 0:
|
||||
return None
|
||||
|
||||
sweep_seg = np.asarray(sweep[:take_fft], dtype=np.float32)
|
||||
fallback = np.nan_to_num(sweep_seg, nan=0.0).astype(np.float32, copy=False)
|
||||
if freqs is None:
|
||||
return fallback, take_fft
|
||||
|
||||
freq_arr = np.asarray(freqs)
|
||||
if freq_arr.size < take_fft:
|
||||
return fallback, take_fft
|
||||
|
||||
freq_seg = np.asarray(freq_arr[:take_fft], dtype=np.float64)
|
||||
valid = np.isfinite(sweep_seg) & np.isfinite(freq_seg)
|
||||
if int(np.count_nonzero(valid)) < 2:
|
||||
return fallback, take_fft
|
||||
|
||||
x_valid = freq_seg[valid]
|
||||
y_valid = sweep_seg[valid]
|
||||
order = np.argsort(x_valid, kind="mergesort")
|
||||
x_valid = x_valid[order]
|
||||
y_valid = y_valid[order]
|
||||
x_unique, unique_idx = np.unique(x_valid, return_index=True)
|
||||
y_unique = y_valid[unique_idx]
|
||||
if x_unique.size < 2 or x_unique[-1] <= x_unique[0]:
|
||||
return fallback, take_fft
|
||||
|
||||
x_uniform = np.linspace(float(x_unique[0]), float(x_unique[-1]), take_fft, dtype=np.float64)
|
||||
resampled = np.interp(x_uniform, x_unique, y_unique).astype(np.float32)
|
||||
return resampled, take_fft
|
||||
|
||||
|
||||
def fft_mag_to_db(mag: np.ndarray) -> np.ndarray:
|
||||
"""Convert magnitude to dB with safe zero handling."""
|
||||
mag_arr = np.asarray(mag, dtype=np.float32)
|
||||
safe_mag = np.maximum(mag_arr, 0.0)
|
||||
return (20.0 * np.log10(safe_mag + 1e-9)).astype(np.float32, copy=False)
|
||||
|
||||
|
||||
def compute_fft_mag_row(
|
||||
sweep: np.ndarray,
|
||||
freqs: Optional[np.ndarray],
|
||||
bins: int,
|
||||
) -> np.ndarray:
|
||||
"""Compute a linear FFT magnitude row."""
|
||||
if bins <= 0:
|
||||
return np.zeros((0,), dtype=np.float32)
|
||||
|
||||
prepared = prepare_fft_segment(sweep, freqs, fft_len=FFT_LEN)
|
||||
if prepared is None:
|
||||
return np.full((bins,), np.nan, dtype=np.float32)
|
||||
|
||||
fft_seg, take_fft = prepared
|
||||
fft_in = np.zeros((FFT_LEN,), dtype=np.float32)
|
||||
window = np.hanning(take_fft).astype(np.float32)
|
||||
fft_in[:take_fft] = fft_seg * window
|
||||
spec = np.fft.ifft(fft_in)
|
||||
mag = np.abs(spec).astype(np.float32)
|
||||
if mag.shape[0] != bins:
|
||||
mag = mag[:bins]
|
||||
return mag
|
||||
|
||||
|
||||
def compute_fft_row(
|
||||
sweep: np.ndarray,
|
||||
freqs: Optional[np.ndarray],
|
||||
bins: int,
|
||||
) -> np.ndarray:
|
||||
"""Compute a dB FFT row."""
|
||||
return fft_mag_to_db(compute_fft_mag_row(sweep, freqs, bins))
|
||||
|
||||
|
||||
def compute_distance_axis(freqs: Optional[np.ndarray], bins: int) -> np.ndarray:
|
||||
"""Compute the one-way distance axis for IFFT output."""
|
||||
if bins <= 0:
|
||||
return np.zeros((0,), dtype=np.float64)
|
||||
if freqs is None:
|
||||
return np.arange(bins, dtype=np.float64)
|
||||
|
||||
freq_arr = np.asarray(freqs, dtype=np.float64)
|
||||
finite = freq_arr[np.isfinite(freq_arr)]
|
||||
if finite.size < 2:
|
||||
return np.arange(bins, dtype=np.float64)
|
||||
|
||||
df_ghz = float((finite[-1] - finite[0]) / max(1, finite.size - 1))
|
||||
df_hz = abs(df_ghz) * 1e9
|
||||
if not np.isfinite(df_hz) or df_hz <= 0.0:
|
||||
return np.arange(bins, dtype=np.float64)
|
||||
|
||||
step_m = C_M_S / (2.0 * FFT_LEN * df_hz)
|
||||
return np.arange(bins, dtype=np.float64) * step_m
|
||||
71
rfg_adc_plotter/processing/formatting.py
Normal file
71
rfg_adc_plotter/processing/formatting.py
Normal file
@ -0,0 +1,71 @@
|
||||
"""Formatting and display-range helpers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Mapping, Optional, Tuple
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def format_status_kv(data: Mapping[str, Any]) -> str:
|
||||
"""Convert status metrics into a compact single-line representation."""
|
||||
|
||||
def _fmt(value: Any) -> str:
|
||||
if value is None:
|
||||
return "NA"
|
||||
try:
|
||||
f_value = float(value)
|
||||
except Exception:
|
||||
return str(value)
|
||||
if not np.isfinite(f_value):
|
||||
return "nan"
|
||||
if abs(f_value) >= 1000 or (0 < abs(f_value) < 0.01):
|
||||
return f"{f_value:.3g}"
|
||||
return f"{f_value:.3f}".rstrip("0").rstrip(".")
|
||||
|
||||
return " ".join(f"{key}:{_fmt(value)}" for key, value in data.items())
|
||||
|
||||
|
||||
def parse_spec_clip(spec: Optional[str]) -> Optional[Tuple[float, float]]:
|
||||
"""Parse a waterfall percentile clip specification."""
|
||||
if not spec:
|
||||
return None
|
||||
value = str(spec).strip().lower()
|
||||
if value in ("off", "none", "no"):
|
||||
return None
|
||||
try:
|
||||
p0, p1 = value.replace(";", ",").split(",")
|
||||
low = float(p0)
|
||||
high = float(p1)
|
||||
if not (0.0 <= low < high <= 100.0):
|
||||
return None
|
||||
return (low, high)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def compute_auto_ylim(*series_list: Optional[np.ndarray]) -> Optional[Tuple[float, float]]:
|
||||
"""Compute a common Y-range with a small padding."""
|
||||
y_min: Optional[float] = None
|
||||
y_max: Optional[float] = None
|
||||
for series in series_list:
|
||||
if series is None:
|
||||
continue
|
||||
arr = np.asarray(series)
|
||||
if arr.size == 0:
|
||||
continue
|
||||
finite = arr[np.isfinite(arr)]
|
||||
if finite.size == 0:
|
||||
continue
|
||||
cur_min = float(np.min(finite))
|
||||
cur_max = float(np.max(finite))
|
||||
y_min = cur_min if y_min is None else min(y_min, cur_min)
|
||||
y_max = cur_max if y_max is None else max(y_max, cur_max)
|
||||
|
||||
if y_min is None or y_max is None:
|
||||
return None
|
||||
if y_min == y_max:
|
||||
pad = max(1.0, abs(y_min) * 0.05)
|
||||
else:
|
||||
pad = 0.05 * (y_max - y_min)
|
||||
return (y_min - pad, y_max + pad)
|
||||
116
rfg_adc_plotter/processing/normalization.py
Normal file
116
rfg_adc_plotter/processing/normalization.py
Normal file
@ -0,0 +1,116 @@
|
||||
"""Sweep normalization helpers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Tuple
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def normalize_sweep_simple(raw: np.ndarray, calib: np.ndarray) -> np.ndarray:
|
||||
"""Simple element-wise raw/calib normalization."""
|
||||
width = min(raw.size, calib.size)
|
||||
if width <= 0:
|
||||
return raw
|
||||
out = np.full_like(raw, np.nan, dtype=np.float32)
|
||||
with np.errstate(divide="ignore", invalid="ignore"):
|
||||
out[:width] = raw[:width] / calib[:width]
|
||||
out = np.nan_to_num(out, nan=np.nan, posinf=np.nan, neginf=np.nan)
|
||||
return out
|
||||
|
||||
|
||||
def build_calib_envelopes(calib: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
|
||||
"""Estimate lower and upper envelopes of a calibration curve."""
|
||||
n = int(calib.size)
|
||||
if n <= 0:
|
||||
empty = np.zeros((0,), dtype=np.float32)
|
||||
return empty, empty
|
||||
|
||||
values = np.asarray(calib, dtype=np.float32)
|
||||
finite = np.isfinite(values)
|
||||
if not np.any(finite):
|
||||
zeros = np.zeros_like(values, dtype=np.float32)
|
||||
return zeros, zeros
|
||||
|
||||
if not np.all(finite):
|
||||
x = np.arange(n, dtype=np.float32)
|
||||
values = values.copy()
|
||||
values[~finite] = np.interp(x[~finite], x[finite], values[finite]).astype(np.float32)
|
||||
|
||||
if n < 3:
|
||||
return values.copy(), values.copy()
|
||||
|
||||
dy = np.diff(values)
|
||||
signs = np.sign(dy).astype(np.int8, copy=False)
|
||||
|
||||
if np.any(signs == 0):
|
||||
for i in range(1, signs.size):
|
||||
if signs[i] == 0:
|
||||
signs[i] = signs[i - 1]
|
||||
for i in range(signs.size - 2, -1, -1):
|
||||
if signs[i] == 0:
|
||||
signs[i] = signs[i + 1]
|
||||
signs[signs == 0] = 1
|
||||
|
||||
max_idx = np.where((signs[:-1] > 0) & (signs[1:] < 0))[0] + 1
|
||||
min_idx = np.where((signs[:-1] < 0) & (signs[1:] > 0))[0] + 1
|
||||
|
||||
x = np.arange(n, dtype=np.float32)
|
||||
|
||||
def _interp_nodes(nodes: np.ndarray) -> np.ndarray:
|
||||
if nodes.size == 0:
|
||||
idx = np.array([0, n - 1], dtype=np.int64)
|
||||
else:
|
||||
idx = np.unique(np.concatenate(([0], nodes, [n - 1]))).astype(np.int64)
|
||||
return np.interp(x, idx.astype(np.float32), values[idx]).astype(np.float32)
|
||||
|
||||
upper = _interp_nodes(max_idx)
|
||||
lower = _interp_nodes(min_idx)
|
||||
|
||||
swap = lower > upper
|
||||
if np.any(swap):
|
||||
tmp = upper[swap].copy()
|
||||
upper[swap] = lower[swap]
|
||||
lower[swap] = tmp
|
||||
|
||||
return lower, upper
|
||||
|
||||
|
||||
def normalize_sweep_projector(raw: np.ndarray, calib: np.ndarray) -> np.ndarray:
|
||||
"""Project raw values between calibration envelopes into [-1000, 1000]."""
|
||||
width = min(raw.size, calib.size)
|
||||
if width <= 0:
|
||||
return raw
|
||||
|
||||
out = np.full_like(raw, np.nan, dtype=np.float32)
|
||||
raw_seg = np.asarray(raw[:width], dtype=np.float32)
|
||||
lower, upper = build_calib_envelopes(np.asarray(calib[:width], dtype=np.float32))
|
||||
span = upper - lower
|
||||
|
||||
finite_span = span[np.isfinite(span) & (span > 0)]
|
||||
if finite_span.size > 0:
|
||||
eps = max(float(np.median(finite_span)) * 1e-6, 1e-9)
|
||||
else:
|
||||
eps = 1e-9
|
||||
|
||||
valid = (
|
||||
np.isfinite(raw_seg)
|
||||
& np.isfinite(lower)
|
||||
& np.isfinite(upper)
|
||||
& (span > eps)
|
||||
)
|
||||
if np.any(valid):
|
||||
proj = np.empty_like(raw_seg, dtype=np.float32)
|
||||
proj[valid] = ((2.0 * (raw_seg[valid] - lower[valid]) / span[valid]) - 1.0) * 1000.0
|
||||
proj[valid] = np.clip(proj[valid], -1000.0, 1000.0)
|
||||
proj[~valid] = np.nan
|
||||
out[:width] = proj
|
||||
return out
|
||||
|
||||
|
||||
def normalize_by_calib(raw: np.ndarray, calib: np.ndarray, norm_type: str) -> np.ndarray:
|
||||
"""Apply the selected normalization method."""
|
||||
norm = str(norm_type).strip().lower()
|
||||
if norm == "simple":
|
||||
return normalize_sweep_simple(raw, calib)
|
||||
return normalize_sweep_projector(raw, calib)
|
||||
209
rfg_adc_plotter/processing/peaks.py
Normal file
209
rfg_adc_plotter/processing/peaks.py
Normal file
@ -0,0 +1,209 @@
|
||||
"""Peak-search helpers for FFT visualizations."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
def find_peak_width_markers(xs: np.ndarray, ys: np.ndarray) -> Optional[Dict[str, float]]:
|
||||
"""Find the dominant non-zero peak and its half-height width."""
|
||||
x_arr = np.asarray(xs, dtype=np.float64)
|
||||
y_arr = np.asarray(ys, dtype=np.float64)
|
||||
valid = np.isfinite(x_arr) & np.isfinite(y_arr) & (x_arr > 0.0)
|
||||
if int(np.count_nonzero(valid)) < 3:
|
||||
return None
|
||||
|
||||
x = x_arr[valid]
|
||||
y = y_arr[valid]
|
||||
x_min = float(x[0])
|
||||
x_max = float(x[-1])
|
||||
x_span = x_max - x_min
|
||||
central_mask = (x >= (x_min + 0.25 * x_span)) & (x <= (x_min + 0.75 * x_span))
|
||||
if int(np.count_nonzero(central_mask)) > 0:
|
||||
central_idx = np.flatnonzero(central_mask)
|
||||
peak_idx = int(central_idx[int(np.argmax(y[central_mask]))])
|
||||
else:
|
||||
peak_idx = int(np.argmax(y))
|
||||
peak_y = float(y[peak_idx])
|
||||
shoulder_gap = max(1, min(8, y.size // 64 if y.size > 0 else 1))
|
||||
shoulder_width = max(4, min(32, y.size // 16 if y.size > 0 else 4))
|
||||
left_lo = max(0, peak_idx - shoulder_gap - shoulder_width)
|
||||
left_hi = max(0, peak_idx - shoulder_gap)
|
||||
right_lo = min(y.size, peak_idx + shoulder_gap + 1)
|
||||
right_hi = min(y.size, right_lo + shoulder_width)
|
||||
background_parts = []
|
||||
if left_hi > left_lo:
|
||||
background_parts.append(float(np.nanmedian(y[left_lo:left_hi])))
|
||||
if right_hi > right_lo:
|
||||
background_parts.append(float(np.nanmedian(y[right_lo:right_hi])))
|
||||
if background_parts:
|
||||
background = float(np.mean(background_parts))
|
||||
else:
|
||||
background = float(np.nanpercentile(y, 10))
|
||||
if not np.isfinite(peak_y) or not np.isfinite(background) or peak_y <= background:
|
||||
return None
|
||||
|
||||
half_level = background + 0.5 * (peak_y - background)
|
||||
|
||||
def _interp_cross(x0: float, y0: float, x1: float, y1: float) -> float:
|
||||
if not (np.isfinite(x0) and np.isfinite(y0) and np.isfinite(x1) and np.isfinite(y1)):
|
||||
return x1
|
||||
dy = y1 - y0
|
||||
if dy == 0.0:
|
||||
return x1
|
||||
t = (half_level - y0) / dy
|
||||
t = min(1.0, max(0.0, t))
|
||||
return x0 + t * (x1 - x0)
|
||||
|
||||
left_x = float(x[0])
|
||||
for i in range(peak_idx, 0, -1):
|
||||
if y[i - 1] <= half_level <= y[i]:
|
||||
left_x = _interp_cross(float(x[i - 1]), float(y[i - 1]), float(x[i]), float(y[i]))
|
||||
break
|
||||
|
||||
right_x = float(x[-1])
|
||||
for i in range(peak_idx, x.size - 1):
|
||||
if y[i] >= half_level >= y[i + 1]:
|
||||
right_x = _interp_cross(float(x[i]), float(y[i]), float(x[i + 1]), float(y[i + 1]))
|
||||
break
|
||||
|
||||
width = right_x - left_x
|
||||
if not np.isfinite(width) or width <= 0.0:
|
||||
return None
|
||||
|
||||
return {
|
||||
"background": background,
|
||||
"left": left_x,
|
||||
"right": right_x,
|
||||
"width": width,
|
||||
"amplitude": peak_y,
|
||||
}
|
||||
|
||||
|
||||
def rolling_median_ref(xs: np.ndarray, ys: np.ndarray, window_ghz: float) -> np.ndarray:
|
||||
"""Compute a rolling median reference on a fixed-width X window."""
|
||||
x = np.asarray(xs, dtype=np.float64)
|
||||
y = np.asarray(ys, dtype=np.float64)
|
||||
out = np.full(y.shape, np.nan, dtype=np.float64)
|
||||
if x.size == 0 or y.size == 0 or x.size != y.size:
|
||||
return out
|
||||
width = float(window_ghz)
|
||||
if not np.isfinite(width) or width <= 0.0:
|
||||
return out
|
||||
half = 0.5 * width
|
||||
for i in range(x.size):
|
||||
xi = x[i]
|
||||
if not np.isfinite(xi):
|
||||
continue
|
||||
left = np.searchsorted(x, xi - half, side="left")
|
||||
right = np.searchsorted(x, xi + half, side="right")
|
||||
if right <= left:
|
||||
continue
|
||||
segment = y[left:right]
|
||||
finite = np.isfinite(segment)
|
||||
if not np.any(finite):
|
||||
continue
|
||||
out[i] = float(np.nanmedian(segment))
|
||||
return out
|
||||
|
||||
|
||||
def find_top_peaks_over_ref(
|
||||
xs: np.ndarray,
|
||||
ys: np.ndarray,
|
||||
ref: np.ndarray,
|
||||
top_n: int = 3,
|
||||
) -> List[Dict[str, float]]:
|
||||
"""Find the top-N non-overlapping peaks above a reference curve."""
|
||||
x = np.asarray(xs, dtype=np.float64)
|
||||
y = np.asarray(ys, dtype=np.float64)
|
||||
r = np.asarray(ref, dtype=np.float64)
|
||||
if x.size < 3 or y.size != x.size or r.size != x.size:
|
||||
return []
|
||||
|
||||
valid = np.isfinite(x) & np.isfinite(y) & np.isfinite(r)
|
||||
if not np.any(valid):
|
||||
return []
|
||||
delta = np.full_like(y, np.nan, dtype=np.float64)
|
||||
delta[valid] = y[valid] - r[valid]
|
||||
|
||||
candidates: List[int] = []
|
||||
for i in range(1, x.size - 1):
|
||||
if not (np.isfinite(delta[i - 1]) and np.isfinite(delta[i]) and np.isfinite(delta[i + 1])):
|
||||
continue
|
||||
if delta[i] <= 0.0:
|
||||
continue
|
||||
left_ok = delta[i] > delta[i - 1]
|
||||
right_ok = delta[i] >= delta[i + 1]
|
||||
alt_left_ok = delta[i] >= delta[i - 1]
|
||||
alt_right_ok = delta[i] > delta[i + 1]
|
||||
if (left_ok and right_ok) or (alt_left_ok and alt_right_ok):
|
||||
candidates.append(i)
|
||||
if not candidates:
|
||||
return []
|
||||
|
||||
candidates.sort(key=lambda i: float(delta[i]), reverse=True)
|
||||
|
||||
def _interp_cross(x0: float, y0: float, x1: float, y1: float, y_cross: float) -> float:
|
||||
dy = y1 - y0
|
||||
if not np.isfinite(dy) or dy == 0.0:
|
||||
return x1
|
||||
t = (y_cross - y0) / dy
|
||||
t = min(1.0, max(0.0, t))
|
||||
return x0 + t * (x1 - x0)
|
||||
|
||||
picked: List[Dict[str, float]] = []
|
||||
for idx in candidates:
|
||||
peak_y = float(y[idx])
|
||||
peak_ref = float(r[idx])
|
||||
peak_h = float(delta[idx])
|
||||
if not (np.isfinite(peak_y) and np.isfinite(peak_ref) and np.isfinite(peak_h)) or peak_h <= 0.0:
|
||||
continue
|
||||
|
||||
half_level = peak_ref + 0.5 * peak_h
|
||||
|
||||
left_x = float(x[0])
|
||||
for i in range(idx, 0, -1):
|
||||
y0 = float(y[i - 1])
|
||||
y1 = float(y[i])
|
||||
if np.isfinite(y0) and np.isfinite(y1) and (y0 <= half_level <= y1):
|
||||
left_x = _interp_cross(float(x[i - 1]), y0, float(x[i]), y1, half_level)
|
||||
break
|
||||
|
||||
right_x = float(x[-1])
|
||||
for i in range(idx, x.size - 1):
|
||||
y0 = float(y[i])
|
||||
y1 = float(y[i + 1])
|
||||
if np.isfinite(y0) and np.isfinite(y1) and (y0 >= half_level >= y1):
|
||||
right_x = _interp_cross(float(x[i]), y0, float(x[i + 1]), y1, half_level)
|
||||
break
|
||||
|
||||
width = float(right_x - left_x)
|
||||
if not np.isfinite(width) or width <= 0.0:
|
||||
continue
|
||||
|
||||
overlap = False
|
||||
for peak in picked:
|
||||
if not (right_x <= peak["left"] or left_x >= peak["right"]):
|
||||
overlap = True
|
||||
break
|
||||
if overlap:
|
||||
continue
|
||||
|
||||
picked.append(
|
||||
{
|
||||
"x": float(x[idx]),
|
||||
"peak_y": peak_y,
|
||||
"ref": peak_ref,
|
||||
"height": peak_h,
|
||||
"left": left_x,
|
||||
"right": right_x,
|
||||
"width": width,
|
||||
}
|
||||
)
|
||||
if len(picked) >= int(max(1, top_n)):
|
||||
break
|
||||
|
||||
picked.sort(key=lambda peak: peak["x"])
|
||||
return picked
|
||||
Reference in New Issue
Block a user