improved codestyle and added logging

This commit is contained in:
Ayzen
2025-09-26 17:22:03 +03:00
parent 926268733c
commit 451c0654f5
39 changed files with 27401 additions and 1949 deletions

View File

@ -1,9 +0,0 @@
{
"server": {
"host": "0.0.0.0",
"port": 8000
},
"logging": {
"level": "INFO"
}
}

View File

@ -1,88 +1,148 @@
from fastapi import APIRouter, HTTPException
from typing import Any
from fastapi import APIRouter, HTTPException, Query
import vna_system.core.singletons as singletons
from vna_system.core.logging.logger import get_component_logger
router = APIRouter(prefix="/api/v1", tags=["acquisition"])
logger = get_component_logger(__file__)
@router.get("/acquisition/status")
async def get_acquisition_status():
"""Get current acquisition status."""
async def get_acquisition_status() -> dict[str, Any]:
"""
Return current acquisition status.
Response
--------
{
"running": bool,
"paused": bool,
"continuous_mode": bool,
"sweep_count": int
}
"""
acquisition = singletons.vna_data_acquisition_instance
if acquisition is None:
logger.error("Acquisition singleton is not initialized")
raise HTTPException(status_code=500, detail="Acquisition not initialized")
return {
"running": acquisition.is_running,
"paused": acquisition.is_paused,
"continuous_mode": acquisition.is_continuous_mode,
"sweep_count": acquisition._sweep_buffer._sweep_counter if hasattr(acquisition._sweep_buffer, '_sweep_counter') else 0
"sweep_count": acquisition.sweep_buffer.current_sweep_number,
}
@router.post("/acquisition/start")
async def start_acquisition():
"""Start data acquisition."""
async def start_acquisition() -> dict[str, Any]:
"""
Start data acquisition in continuous mode (resumes if paused).
"""
try:
acquisition = singletons.vna_data_acquisition_instance
if acquisition is None:
logger.error("Acquisition singleton is not initialized")
raise HTTPException(status_code=500, detail="Acquisition not initialized")
if not acquisition.is_running:
# Start thread if not running
acquisition.start()
logger.info("Acquisition thread started via API")
# Set to continuous mode (also resumes if paused)
acquisition.set_continuous_mode(True)
return {"success": True, "message": "Acquisition started"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
except HTTPException:
raise
except Exception as exc: # noqa: BLE001
logger.error("Failed to start acquisition")
raise HTTPException(status_code=500, detail=str(exc))
@router.post("/acquisition/stop")
async def stop_acquisition():
"""Stop/pause data acquisition."""
async def stop_acquisition() -> dict[str, Any]:
"""
Pause data acquisition (thread remains alive for fast resume).
"""
try:
acquisition = singletons.vna_data_acquisition_instance
if acquisition is None:
logger.error("Acquisition singleton is not initialized")
raise HTTPException(status_code=500, detail="Acquisition not initialized")
if not acquisition.is_running:
return {"success": True, "message": "Acquisition already stopped"}
# Just pause instead of full stop - keeps thread alive for restart
acquisition.pause()
return {"success": True, "message": "Acquisition stopped"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
logger.info("Acquisition paused via API")
return {"success": True, "message": "Acquisition paused"}
except HTTPException:
raise
except Exception as exc: # noqa: BLE001
logger.error("Failed to stop acquisition")
raise HTTPException(status_code=500, detail=str(exc))
@router.post("/acquisition/single-sweep")
async def trigger_single_sweep():
"""Trigger a single sweep. Automatically starts acquisition if needed."""
async def trigger_single_sweep() -> dict[str, Any]:
"""
Trigger a single sweep.
Automatically starts acquisition if needed and switches to single-sweep mode.
"""
try:
acquisition = singletons.vna_data_acquisition_instance
if acquisition is None:
logger.error("Acquisition singleton is not initialized")
raise HTTPException(status_code=500, detail="Acquisition not initialized")
if not acquisition.is_running:
# Start acquisition if not running
acquisition.start()
logger.info("Acquisition thread started (single-sweep request)")
acquisition.trigger_single_sweep()
return {"success": True, "message": "Single sweep triggered"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
except HTTPException:
raise
except Exception as exc: # noqa: BLE001
logger.error("Failed to trigger single sweep")
raise HTTPException(status_code=500, detail=str(exc))
@router.get("/acquisition/latest-sweep")
async def get_latest_sweep():
"""Get the latest sweep data."""
async def get_latest_sweep(
limit: int = Query(10, ge=1, le=1000, description="Max number of points to include in response"),
) -> dict[str, Any]:
"""
Return the latest sweep metadata and a limited subset of points.
Query Params
------------
limit : int
Number of points to include from the start of the sweep (default 10, max 1000).
"""
try:
acquisition = singletons.vna_data_acquisition_instance
latest_sweep = acquisition._sweep_buffer.get_latest_sweep()
if acquisition is None:
logger.error("Acquisition singleton is not initialized")
raise HTTPException(status_code=500, detail="Acquisition not initialized")
latest_sweep = acquisition.sweep_buffer.get_latest_sweep()
if not latest_sweep:
return {"sweep": None, "message": "No sweep data available"}
points = latest_sweep.points[:limit]
return {
"sweep": {
"sweep_number": latest_sweep.sweep_number,
"timestamp": latest_sweep.timestamp,
"total_points": latest_sweep.total_points,
"points": latest_sweep.points[:10] if len(latest_sweep.points) > 10 else latest_sweep.points # Limit for API response
"points": points,
},
"message": f"Latest sweep #{latest_sweep.sweep_number} with {latest_sweep.total_points} points"
"message": f"Latest sweep #{latest_sweep.sweep_number} with {latest_sweep.total_points} points",
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
except HTTPException:
raise
except Exception as exc: # noqa: BLE001
logger.error("Failed to fetch latest sweep")
raise HTTPException(status_code=500, detail=str(exc))

View File

@ -1,10 +1,13 @@
from typing import Any, List # pydantic response_model uses List
from fastapi import APIRouter, HTTPException
from typing import List
from pathlib import Path
import vna_system.core.singletons as singletons
from vna_system.core.logging.logger import get_component_logger
from vna_system.core.settings.calibration_manager import CalibrationStandard
from vna_system.core.visualization.magnitude_chart import generate_standards_magnitude_plots, generate_combined_standards_plot
from vna_system.core.visualization.magnitude_chart import (
generate_standards_magnitude_plots,
)
from vna_system.api.models.settings import (
PresetModel,
CalibrationModel,
@ -15,74 +18,81 @@ from vna_system.api.models.settings import (
SaveCalibrationRequest,
SetCalibrationRequest,
RemoveStandardRequest,
WorkingCalibrationModel
WorkingCalibrationModel,
)
router = APIRouter(prefix="/api/v1/settings", tags=["settings"])
logger = get_component_logger(__file__)
@router.get("/status", response_model=SettingsStatusModel)
async def get_status():
"""Get current settings status"""
async def get_status() -> dict[str, Any]:
"""Get current settings status."""
try:
status = singletons.settings_manager.get_status_summary()
return status
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return singletons.settings_manager.get_status_summary()
except Exception as exc: # noqa: BLE001
logger.error("Failed to get settings status")
raise HTTPException(status_code=500, detail=str(exc))
@router.get("/presets", response_model=List[PresetModel])
async def get_presets(mode: str | None = None):
"""Get all available configuration presets, optionally filtered by mode"""
async def get_presets(mode: str | None = None) -> list[PresetModel]:
"""Get all available configuration presets, optionally filtered by mode."""
try:
if mode:
from vna_system.core.settings.preset_manager import VNAMode
try:
vna_mode = VNAMode(mode.lower())
presets = singletons.settings_manager.get_presets_by_mode(vna_mode)
except ValueError:
raise HTTPException(status_code=400, detail=f"Invalid mode: {mode}")
presets = singletons.settings_manager.get_presets_by_mode(vna_mode)
else:
presets = singletons.settings_manager.get_available_presets()
return [
PresetModel(
filename=preset.filename,
mode=preset.mode.value,
start_freq=preset.start_freq,
stop_freq=preset.stop_freq,
points=preset.points,
bandwidth=preset.bandwidth
filename=p.filename,
mode=p.mode.value,
start_freq=p.start_freq,
stop_freq=p.stop_freq,
points=p.points,
bandwidth=p.bandwidth,
)
for preset in presets
for p in presets
]
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
except HTTPException:
raise
except Exception as exc: # noqa: BLE001
logger.error("Failed to list presets")
raise HTTPException(status_code=500, detail=str(exc))
@router.post("/preset/set")
async def set_preset(request: SetPresetRequest):
"""Set current configuration preset"""
async def set_preset(request: SetPresetRequest) -> dict[str, Any]:
"""Set current configuration preset."""
try:
# Find preset by filename
presets = singletons.settings_manager.get_available_presets()
preset = next((p for p in presets if p.filename == request.filename), None)
if not preset:
if preset is None:
raise HTTPException(status_code=404, detail=f"Preset not found: {request.filename}")
# Clear current calibration when changing preset
# Changing preset invalidates active calibration selection.
singletons.settings_manager.calibration_manager.clear_current_calibration()
singletons.settings_manager.set_current_preset(preset)
logger.info("Preset selected via API", filename=preset.filename, mode=preset.mode.value)
return {"success": True, "message": f"Preset set to {request.filename}"}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
except HTTPException:
raise
except Exception as exc: # noqa: BLE001
logger.error("Failed to set preset")
raise HTTPException(status_code=500, detail=str(exc))
@router.get("/preset/current", response_model=PresetModel | None)
async def get_current_preset():
"""Get currently selected configuration preset"""
"""Get currently selected configuration preset."""
try:
preset = singletons.settings_manager.get_current_preset()
if not preset:
@ -94,171 +104,173 @@ async def get_current_preset():
start_freq=preset.start_freq,
stop_freq=preset.stop_freq,
points=preset.points,
bandwidth=preset.bandwidth
bandwidth=preset.bandwidth,
)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
except Exception as exc: # noqa: BLE001
logger.error("Failed to get current preset")
raise HTTPException(status_code=500, detail=str(exc))
@router.get("/calibrations", response_model=List[CalibrationModel])
async def get_calibrations(preset_filename: str | None = None):
"""Get available calibrations for current or specified preset"""
async def get_calibrations(preset_filename: str | None = None) -> list[CalibrationModel]:
"""Get available calibrations for current or specified preset."""
try:
preset = None
if preset_filename:
presets = singletons.settings_manager.get_available_presets()
preset = next((p for p in presets if p.filename == preset_filename), None)
if not preset:
if preset is None:
raise HTTPException(status_code=404, detail=f"Preset not found: {preset_filename}")
calibrations = singletons.settings_manager.get_available_calibrations(preset)
# Get detailed info for each calibration
calibration_details = []
details: list[CalibrationModel] = []
current_preset = preset or singletons.settings_manager.get_current_preset()
if current_preset:
for calib_name in calibrations:
info = singletons.settings_manager.get_calibration_info(calib_name, current_preset)
for name in calibrations:
info = singletons.settings_manager.get_calibration_info(name, current_preset)
standards = info.get("standards", {})
# Convert standards format if needed
standards = info.get('standards', {})
# Normalize standards into {standard: bool}
if isinstance(standards, list):
# If standards is a list (from complete calibration), convert to dict
required_standards = singletons.settings_manager.get_required_standards(current_preset.mode)
standards = {std.value: std.value in standards for std in required_standards}
required = singletons.settings_manager.get_required_standards(current_preset.mode)
standards = {std.value: (std.value in standards) for std in required}
calibration_details.append(CalibrationModel(
name=calib_name,
is_complete=info.get('is_complete', False),
standards=standards
))
details.append(
CalibrationModel(
name=name,
is_complete=bool(info.get("is_complete", False)),
standards=standards,
)
)
return calibration_details
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return details
except HTTPException:
raise
except Exception as exc: # noqa: BLE001
logger.error("Failed to list calibrations")
raise HTTPException(status_code=500, detail=str(exc))
@router.post("/calibration/start")
async def start_calibration(request: StartCalibrationRequest):
"""Start new calibration for current or specified preset"""
async def start_calibration(request: StartCalibrationRequest) -> dict[str, Any]:
"""Start new calibration for current or specified preset."""
try:
preset = None
if request.preset_filename:
presets = singletons.settings_manager.get_available_presets()
preset = next((p for p in presets if p.filename == request.preset_filename), None)
if not preset:
if preset is None:
raise HTTPException(status_code=404, detail=f"Preset not found: {request.preset_filename}")
calibration_set = singletons.settings_manager.start_new_calibration(preset)
required_standards = singletons.settings_manager.get_required_standards(calibration_set.preset.mode)
calib = singletons.settings_manager.start_new_calibration(preset)
required = singletons.settings_manager.get_required_standards(calib.preset.mode)
return {
"success": True,
"message": "Calibration started",
"preset": calibration_set.preset.filename,
"required_standards": [s.value for s in required_standards]
"preset": calib.preset.filename,
"required_standards": [s.value for s in required],
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
except HTTPException:
raise
except Exception as exc: # noqa: BLE001
logger.error("Failed to start calibration")
raise HTTPException(status_code=500, detail=str(exc))
@router.post("/calibration/add-standard")
async def add_calibration_standard(request: CalibrateStandardRequest):
"""Add calibration standard from latest sweep"""
async def add_calibration_standard(request: CalibrateStandardRequest) -> dict[str, Any]:
"""Add calibration standard from the latest sweep."""
try:
# Validate standard
try:
standard = CalibrationStandard(request.standard)
except ValueError:
raise HTTPException(status_code=400, detail=f"Invalid calibration standard: {request.standard}")
# Capture from data acquisition
sweep_number = singletons.settings_manager.capture_calibration_standard_from_acquisition(
sweep_no = singletons.settings_manager.capture_calibration_standard_from_acquisition(
standard, singletons.vna_data_acquisition_instance
)
# Get current working calibration status
working_calib = singletons.settings_manager.get_current_working_calibration()
progress = working_calib.get_progress() if working_calib else (0, 0)
working = singletons.settings_manager.get_current_working_calibration()
progress = working.get_progress() if working else (0, 0)
return {
"success": True,
"message": f"Added {standard.value} standard from sweep {sweep_number}",
"sweep_number": sweep_number,
"message": f"Added {standard.value} standard from sweep {sweep_no}",
"sweep_number": sweep_no,
"progress": f"{progress[0]}/{progress[1]}",
"is_complete": working_calib.is_complete() if working_calib else False
"is_complete": working.is_complete() if working else False,
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
except HTTPException:
raise
except Exception as exc: # noqa: BLE001
logger.error("Failed to add calibration standard")
raise HTTPException(status_code=500, detail=str(exc))
@router.post("/calibration/save")
async def save_calibration(request: SaveCalibrationRequest):
"""Save current working calibration set"""
async def save_calibration(request: SaveCalibrationRequest) -> dict[str, Any]:
"""Save current working calibration set."""
try:
calibration_set = singletons.settings_manager.save_calibration_set(request.name)
saved = singletons.settings_manager.save_calibration_set(request.name)
return {
"success": True,
"message": f"Calibration '{request.name}' saved successfully",
"preset": calibration_set.preset.filename,
"standards": list(calibration_set.standards.keys())
"preset": saved.preset.filename,
"standards": [s.value for s in saved.standards.keys()],
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
except Exception as exc: # noqa: BLE001
logger.error("Failed to save calibration")
raise HTTPException(status_code=500, detail=str(exc))
@router.post("/calibration/set")
async def set_calibration(request: SetCalibrationRequest):
"""Set current active calibration"""
async def set_calibration(request: SetCalibrationRequest) -> dict[str, Any]:
"""Set current active calibration."""
try:
preset = None
if request.preset_filename:
presets = singletons.settings_manager.get_available_presets()
preset = next((p for p in presets if p.filename == request.preset_filename), None)
if not preset:
if preset is None:
raise HTTPException(status_code=404, detail=f"Preset not found: {request.preset_filename}")
singletons.settings_manager.set_current_calibration(request.name, preset)
return {
"success": True,
"message": f"Calibration set to '{request.name}'"
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
return {"success": True, "message": f"Calibration set to '{request.name}'"}
except HTTPException:
raise
except Exception as exc: # noqa: BLE001
logger.error("Failed to set calibration")
raise HTTPException(status_code=500, detail=str(exc))
@router.get("/working-calibration", response_model=WorkingCalibrationModel)
async def get_working_calibration():
"""Get current working calibration status"""
async def get_working_calibration() -> WorkingCalibrationModel:
"""Get current working calibration status."""
try:
working_calib = singletons.settings_manager.get_current_working_calibration()
if not working_calib:
working = singletons.settings_manager.get_current_working_calibration()
if not working:
return WorkingCalibrationModel(active=False)
completed, total = working_calib.get_progress()
missing_standards = working_calib.get_missing_standards()
completed, total = working.get_progress()
return WorkingCalibrationModel(
active=True,
preset=working_calib.preset.filename,
preset=working.preset.filename,
progress=f"{completed}/{total}",
is_complete=working_calib.is_complete(),
completed_standards=[s.value for s in working_calib.standards.keys()],
missing_standards=[s.value for s in missing_standards]
is_complete=working.is_complete(),
completed_standards=[s.value for s in working.standards.keys()],
missing_standards=[s.value for s in working.get_missing_standards()],
)
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
except Exception as exc: # noqa: BLE001
logger.error("Failed to get working calibration")
raise HTTPException(status_code=500, detail=str(exc))
@router.delete("/calibration/remove-standard")
async def remove_calibration_standard(request: RemoveStandardRequest):
"""Remove calibration standard from current working set"""
async def remove_calibration_standard(request: RemoveStandardRequest) -> dict[str, Any]:
"""Remove calibration standard from current working set."""
try:
# Validate standard
try:
standard = CalibrationStandard(request.standard)
except ValueError:
@ -266,153 +278,140 @@ async def remove_calibration_standard(request: RemoveStandardRequest):
singletons.settings_manager.remove_calibration_standard(standard)
# Get current working calibration status
working_calib = singletons.settings_manager.get_current_working_calibration()
progress = working_calib.get_progress() if working_calib else (0, 0)
working = singletons.settings_manager.get_current_working_calibration()
progress = working.get_progress() if working else (0, 0)
return {
"success": True,
"message": f"Removed {standard.value} standard",
"progress": f"{progress[0]}/{progress[1]}",
"is_complete": working_calib.is_complete() if working_calib else False
"is_complete": working.is_complete() if working else False,
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
except HTTPException:
raise
except Exception as exc: # noqa: BLE001
logger.error("Failed to remove calibration standard")
raise HTTPException(status_code=500, detail=str(exc))
@router.get("/calibration/current")
async def get_current_calibration():
"""Get currently selected calibration details"""
async def get_current_calibration() -> dict[str, Any]:
"""Get currently selected calibration details."""
try:
current_calib = singletons.settings_manager.get_current_calibration()
if not current_calib:
current = singletons.settings_manager.get_current_calibration()
if not current:
return {"active": False}
return {
"active": True,
"preset": {
"filename": current_calib.preset.filename,
"mode": current_calib.preset.mode.value
},
"calibration_name": current_calib.name,
"standards": [s.value for s in current_calib.standards.keys()],
"is_complete": current_calib.is_complete()
"preset": {"filename": current.preset.filename, "mode": current.preset.mode.value},
"calibration_name": current.name,
"standards": [s.value for s in current.standards.keys()],
"is_complete": current.is_complete(),
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
except Exception as exc: # noqa: BLE001
logger.error("Failed to get current calibration")
raise HTTPException(status_code=500, detail=str(exc))
@router.get("/calibration/{calibration_name}/standards-plots")
async def get_calibration_standards_plots(calibration_name: str, preset_filename: str = None):
"""Get magnitude plots for all standards in a calibration set"""
async def get_calibration_standards_plots(
calibration_name: str,
preset_filename: str | None = None,
) -> dict[str, Any]:
"""Get magnitude plots for all standards in a calibration set."""
try:
# Get preset
# Resolve preset (explicit or current)
preset = None
if preset_filename:
presets = singletons.settings_manager.get_available_presets()
preset = next((p for p in presets if p.filename == preset_filename), None)
if not preset:
if preset is None:
raise HTTPException(status_code=404, detail=f"Preset not found: {preset_filename}")
else:
preset = singletons.settings_manager.get_current_preset()
if not preset:
if preset is None:
raise HTTPException(status_code=400, detail="No current preset selected")
# Get calibration directory
# Resolve calibration directory (uses manager's internal layout)
calibration_manager = singletons.settings_manager.calibration_manager
calibration_dir = calibration_manager._get_preset_calibration_dir(preset) / calibration_name
calibration_dir = calibration_manager._get_preset_calibration_dir(preset) / calibration_name # noqa: SLF001
if not calibration_dir.exists():
raise HTTPException(status_code=404, detail=f"Calibration not found: {calibration_name}")
# Generate plots for each standard
individual_plots = generate_standards_magnitude_plots(calibration_dir, preset)
return {
"calibration_name": calibration_name,
"preset": {
"filename": preset.filename,
"mode": preset.mode.value
},
"individual_plots": individual_plots
"preset": {"filename": preset.filename, "mode": preset.mode.value},
"individual_plots": individual_plots,
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
except HTTPException:
raise
except Exception as exc: # noqa: BLE001
logger.error("Failed to build calibration standards plots")
raise HTTPException(status_code=500, detail=str(exc))
@router.get("/working-calibration/standards-plots")
async def get_working_calibration_standards_plots():
"""Get magnitude plots for standards in current working calibration"""
async def get_working_calibration_standards_plots() -> dict[str, Any]:
"""Get magnitude plots for standards in the current working calibration."""
try:
working_calib = singletons.settings_manager.get_current_working_calibration()
if not working_calib:
working = singletons.settings_manager.get_current_working_calibration()
if not working:
raise HTTPException(status_code=404, detail="No working calibration active")
# Check if there are any standards captured
if not working_calib.standards:
if not working.standards:
raise HTTPException(status_code=404, detail="No standards captured in working calibration")
# Generate plots directly from in-memory sweep data
from vna_system.core.visualization.magnitude_chart import generate_magnitude_plot_from_sweep_data
individual_plots = {}
individual: dict[str, Any] = {}
standard_colors = {
'open': '#2ca02c', # Green
'short': '#d62728', # Red
'load': '#ff7f0e', # Orange
'through': '#1f77b4' # Blue
"open": "#2ca02c",
"short": "#d62728",
"load": "#ff7f0e",
"through": "#1f77b4",
}
for standard, sweep_data in working_calib.standards.items():
for standard, sweep in working.standards.items():
try:
# Generate plot for this standard
plot_config = generate_magnitude_plot_from_sweep_data(sweep_data, working_calib.preset)
fig = generate_magnitude_plot_from_sweep_data(sweep, working.preset)
if "error" not in fig and fig.get("data"):
fig["data"][0]["line"]["color"] = standard_colors.get(standard.value, "#1f77b4")
fig["data"][0]["name"] = f"{standard.value.upper()} Standard"
fig["layout"]["title"] = f"{standard.value.upper()} Standard Magnitude (Working)"
if 'error' not in plot_config:
# Customize color and title for this standard
if plot_config.get('data'):
plot_config['data'][0]['line']['color'] = standard_colors.get(standard.value, '#1f77b4')
plot_config['data'][0]['name'] = f'{standard.value.upper()} Standard'
plot_config['layout']['title'] = f'{standard.value.upper()} Standard Magnitude (Working)'
# Include raw sweep data for download
plot_config['raw_sweep_data'] = {
'sweep_number': sweep_data.sweep_number,
'timestamp': sweep_data.timestamp,
'total_points': sweep_data.total_points,
'points': sweep_data.points, # Raw complex data points
'file_path': None # No file path for working calibration
fig["raw_sweep_data"] = {
"sweep_number": sweep.sweep_number,
"timestamp": sweep.timestamp,
"total_points": sweep.total_points,
"points": sweep.points,
"file_path": None,
}
# Add frequency information
plot_config['frequency_info'] = {
'start_freq': working_calib.preset.start_freq,
'stop_freq': working_calib.preset.stop_freq,
'points': working_calib.preset.points,
'bandwidth': working_calib.preset.bandwidth
fig["frequency_info"] = {
"start_freq": working.preset.start_freq,
"stop_freq": working.preset.stop_freq,
"points": working.preset.points,
"bandwidth": working.preset.bandwidth,
}
individual[standard.value] = fig
except Exception as exc: # noqa: BLE001
individual[standard.value] = {"error": f"Failed to generate plot for {standard.value}: {exc}"}
individual_plots[standard.value] = plot_config
else:
individual_plots[standard.value] = plot_config
except Exception as e:
individual_plots[standard.value] = {'error': f'Failed to generate plot for {standard.value}: {str(e)}'}
if not individual_plots:
if not individual:
raise HTTPException(status_code=404, detail="No valid plots generated for working calibration")
return {
"calibration_name": "Working Calibration",
"preset": {
"filename": working_calib.preset.filename,
"mode": working_calib.preset.mode.value
},
"individual_plots": individual_plots,
"preset": {"filename": working.preset.filename, "mode": working.preset.mode.value},
"individual_plots": individual,
"is_working": True,
"is_complete": working_calib.is_complete()
"is_complete": working.is_complete(),
}
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
except HTTPException:
raise
except Exception as exc: # noqa: BLE001
logger.error("Failed to build working calibration standards plots")
raise HTTPException(status_code=500, detail=str(exc))

View File

@ -1,143 +0,0 @@
from __future__ import annotations
import json
import logging
import sys
from contextlib import asynccontextmanager
from typing import Any, Dict
import uvicorn
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from pathlib import Path
import vna_system.core.singletons as singletons
from vna_system.api.endpoints import health, settings, web_ui, acquisition
from vna_system.api.websockets import processing as ws_processing
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
# Disable noisy third-party loggers
logging.getLogger('kaleido').setLevel(logging.ERROR)
logging.getLogger('choreographer').setLevel(logging.ERROR)
logging.getLogger('kaleido.kaleido').setLevel(logging.ERROR)
logging.getLogger('choreographer.browsers.chromium').setLevel(logging.ERROR)
logging.getLogger('choreographer.browser_async').setLevel(logging.ERROR)
logging.getLogger('choreographer.utils._tmpfile').setLevel(logging.ERROR)
logging.getLogger('kaleido._kaleido_tab').setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
def load_config(config_path: str = "vna_system/api/api_config.json") -> Dict[str, Any]:
"""Load API configuration from file."""
try:
with open(config_path, 'r') as f:
config = json.load(f)
logger.info(f"Loaded API config from {config_path}")
return config
except Exception as e:
logger.error(f"Failed to load config: {e}")
sys.exit(1)
@asynccontextmanager
async def lifespan(app: FastAPI):
"""FastAPI lifespan events."""
# Startup
logger.info("Starting VNA API Server...")
try:
# Load config
config = load_config()
# Set log level
log_level = config.get("logging", {}).get("level", "INFO")
logging.getLogger().setLevel(getattr(logging, log_level))
# Start acquisition
logger.info("Starting data acquisition...")
singletons.vna_data_acquisition_instance.start()
# Initialize processor system
logger.info("Starting processor system...")
singletons.processor_manager.start_processing()
logger.info(f"Processor system started with processors: {singletons.processor_manager.list_processors()}")
logger.info("VNA API Server started successfully")
yield
except Exception as e:
logger.error(f"Error during startup: {e}")
raise
# Shutdown
logger.info("Shutting down VNA API Server...")
if singletons.processor_manager:
singletons.processor_manager.stop_processing()
logger.info("Processor system stopped")
if singletons.vna_data_acquisition_instance and singletons.vna_data_acquisition_instance._running:
singletons.vna_data_acquisition_instance.stop()
logger.info("Acquisition stopped")
logger.info("VNA API Server shutdown complete")
# Create FastAPI app
app = FastAPI(
title="VNA System API",
description="Real-time VNA data acquisition and processing API",
version="1.0.0",
lifespan=lifespan
)
# Mount static files for web UI
WEB_UI_DIR = Path(__file__).parent.parent / "web_ui"
STATIC_DIR = WEB_UI_DIR / "static"
if STATIC_DIR.exists():
app.mount("/static", StaticFiles(directory=str(STATIC_DIR)), name="static")
logger.info(f"Mounted static files from: {STATIC_DIR}")
else:
logger.warning(f"Static directory not found: {STATIC_DIR}")
# Include routers
app.include_router(web_ui.router) # Web UI should be first for root path
app.include_router(health.router)
# app.include_router(processing.router)
app.include_router(acquisition.router)
app.include_router(settings.router)
app.include_router(ws_processing.router)
def main():
"""Main entry point."""
config = load_config()
# Server configuration
server_config = config.get("server", {})
host = server_config.get("host", "0.0.0.0")
port = server_config.get("port", 8000)
# Start server
uvicorn.run(
"vna_system.api.main:app",
host=host,
port=port,
log_level="info",
reload=False
)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,18 @@
{
"preset": {
"filename": "s11_start100_stop8800_points1000_bw1khz.bin",
"mode": "s11",
"start_freq": 100000000.0,
"stop_freq": 8800000000.0,
"points": 1000,
"bandwidth": 1000.0
},
"calibration_name": "еуыеуые",
"standards": [
"open",
"load",
"short"
],
"created_timestamp": "2025-09-26T17:19:50.019248",
"is_complete": true
}

View File

@ -0,0 +1,16 @@
{
"preset": {
"filename": "s11_start100_stop8800_points1000_bw1khz.bin",
"mode": "s11",
"start_freq": 100000000.0,
"stop_freq": 8800000000.0,
"points": 1000,
"bandwidth": 1000.0
},
"calibration_name": "еуыеуые",
"standard": "load",
"sweep_number": 12,
"sweep_timestamp": 1758896376.33808,
"created_timestamp": "2025-09-26T17:19:50.017201",
"total_points": 1000
}

View File

@ -0,0 +1,16 @@
{
"preset": {
"filename": "s11_start100_stop8800_points1000_bw1khz.bin",
"mode": "s11",
"start_freq": 100000000.0,
"stop_freq": 8800000000.0,
"points": 1000,
"bandwidth": 1000.0
},
"calibration_name": "еуыеуые",
"standard": "open",
"sweep_number": 10,
"sweep_timestamp": 1758896372.20023,
"created_timestamp": "2025-09-26T17:19:50.015286",
"total_points": 1000
}

View File

@ -0,0 +1,16 @@
{
"preset": {
"filename": "s11_start100_stop8800_points1000_bw1khz.bin",
"mode": "s11",
"start_freq": 100000000.0,
"stop_freq": 8800000000.0,
"points": 1000,
"bandwidth": 1000.0
},
"calibration_name": "еуыеуые",
"standard": "short",
"sweep_number": 13,
"sweep_timestamp": 1758896378.4093437,
"created_timestamp": "2025-09-26T17:19:50.019159",
"total_points": 1000
}

View File

@ -0,0 +1,18 @@
{
"preset": {
"filename": "s11_start100_stop8800_points1000_bw1khz.bin",
"mode": "s11",
"start_freq": 100000000.0,
"stop_freq": 8800000000.0,
"points": 1000,
"bandwidth": 1000.0
},
"calibration_name": "яыф",
"standards": [
"open",
"load",
"short"
],
"created_timestamp": "2025-09-26T17:20:00.022650",
"is_complete": true
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,16 @@
{
"preset": {
"filename": "s11_start100_stop8800_points1000_bw1khz.bin",
"mode": "s11",
"start_freq": 100000000.0,
"stop_freq": 8800000000.0,
"points": 1000,
"bandwidth": 1000.0
},
"calibration_name": "яыф",
"standard": "load",
"sweep_number": 12,
"sweep_timestamp": 1758896376.33808,
"created_timestamp": "2025-09-26T17:20:00.020322",
"total_points": 1000
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,16 @@
{
"preset": {
"filename": "s11_start100_stop8800_points1000_bw1khz.bin",
"mode": "s11",
"start_freq": 100000000.0,
"stop_freq": 8800000000.0,
"points": 1000,
"bandwidth": 1000.0
},
"calibration_name": "яыф",
"standard": "open",
"sweep_number": 17,
"sweep_timestamp": 1758896395.4880857,
"created_timestamp": "2025-09-26T17:20:00.016886",
"total_points": 1000
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,16 @@
{
"preset": {
"filename": "s11_start100_stop8800_points1000_bw1khz.bin",
"mode": "s11",
"start_freq": 100000000.0,
"stop_freq": 8800000000.0,
"points": 1000,
"bandwidth": 1000.0
},
"calibration_name": "яыф",
"standard": "short",
"sweep_number": 13,
"sweep_timestamp": 1758896378.4093437,
"created_timestamp": "2025-09-26T17:20:00.022500",
"total_points": 1000
}

View File

@ -1,36 +1,36 @@
from __future__ import annotations
import io
import logging
import os
import struct
import threading
import time
from typing import BinaryIO, List, Tuple
from typing import BinaryIO
import serial
from vna_system.core import config as cfg
from vna_system.core.acquisition.port_manager import VNAPortLocator
from vna_system.core.acquisition.sweep_buffer import SweepBuffer
from vna_system.core.logging.logger import get_component_logger
logger = logging.getLogger(__name__)
logger = get_component_logger(__file__)
class VNADataAcquisition:
"""Main data acquisition class with asynchronous sweep collection."""
def __init__(self) -> None:
# Configuration
self.bin_log_path: str = cfg.BIN_INPUT_FILE_PATH
self.baud: int = cfg.DEFAULT_BAUD_RATE
# Dependencies
self.vna_port_locator = VNAPortLocator()
self._sweep_buffer = SweepBuffer()
# Control flags
self._running: bool = False
self._thread: threading.Thread | None = None
self._stop_event: threading.Event = threading.Event()
self._stop_event = threading.Event()
self._paused: bool = False
# Acquisition modes
@ -39,29 +39,30 @@ class VNADataAcquisition:
# Sweep collection state
self._collecting: bool = False
self._collected_rx_payloads: List[bytes] = []
self._collected_rx_payloads: list[bytes] = []
self._meas_cmds_in_sweep: int = 0
logger.debug("VNADataAcquisition initialized", baud=self.baud, bin_log_path=self.bin_log_path)
# --------------------------------------------------------------------- #
# Lifecycle
# --------------------------------------------------------------------- #
def start(self) -> None:
"""Start the data acquisition background thread."""
if self._running:
logger.debug("Acquisition already running; start() call ignored.")
logger.debug("start() ignored; acquisition already running")
return
self._running = True
self._stop_event.clear()
self._thread = threading.Thread(target=self._acquisition_loop, daemon=True)
self._thread = threading.Thread(target=self._acquisition_loop, daemon=True, name="VNA-Acq")
self._thread.start()
logger.info("Acquisition thread started.")
logger.info("Acquisition thread started")
def stop(self) -> None:
"""Stop the data acquisition background thread."""
if not self._running:
logger.debug("Acquisition not running; stop() call ignored.")
logger.debug("stop() ignored; acquisition not running")
return
self._running = False
@ -69,8 +70,7 @@ class VNADataAcquisition:
if self._thread and self._thread.is_alive():
self._thread.join(timeout=5.0)
logger.info("Acquisition thread joined.")
logger.info("Acquisition thread joined")
@property
def is_running(self) -> bool:
@ -95,7 +95,7 @@ class VNADataAcquisition:
def pause(self) -> None:
"""Pause the data acquisition."""
if not self._running:
logger.warning("Cannot pause: acquisition not running")
logger.warning("Cannot pause; acquisition not running")
return
self._paused = True
@ -105,24 +105,21 @@ class VNADataAcquisition:
"""Set continuous or single sweep mode. Also resumes if paused."""
self._continuous_mode = continuous
# Resume acquisition if setting to continuous mode and currently paused
if continuous and self._paused:
self._paused = False
logger.info("Data acquisition resumed (continuous mode)")
logger.info("Data acquisition resumed (continuous mode=True)")
mode_str = "continuous" if continuous else "single sweep"
logger.info(f"Acquisition mode set to: {mode_str}")
logger.info("Acquisition mode updated", continuous=continuous)
def trigger_single_sweep(self) -> None:
"""Trigger a single sweep. Automatically switches to single sweep mode if needed."""
if not self._running:
logger.warning("Cannot trigger single sweep: acquisition not running")
logger.warning("Cannot trigger single sweep; acquisition not running")
return
# Switch to single sweep mode if currently in continuous mode
if self._continuous_mode:
self.set_continuous_mode(False)
logger.info("Switched from continuous to single sweep mode")
logger.info("Switched from continuous to single-sweep mode")
self._single_sweep_requested = True
if self._paused:
@ -131,155 +128,158 @@ class VNADataAcquisition:
logger.info("Single sweep triggered")
# --------------------------------------------------------------------- #
# Serial management
# Serial helpers
# --------------------------------------------------------------------- #
def _drain_serial_input(self, ser: serial.Serial) -> None:
"""Drain any pending bytes from the serial input buffer."""
if not ser:
return
drained = 0
while True:
bytes_waiting = getattr(ser, "in_waiting", 0)
if bytes_waiting <= 0:
waiting = getattr(ser, "in_waiting", 0)
if waiting <= 0:
break
drained += len(ser.read(bytes_waiting))
drained += len(ser.read(waiting))
time.sleep(cfg.SERIAL_DRAIN_CHECK_DELAY)
if drained:
logger.warning("Drained %d pending byte(s) from serial input.", drained)
logger.warning("Drained pending bytes from serial input", bytes=drained)
# --------------------------------------------------------------------- #
# Acquisition loop
# --------------------------------------------------------------------- #
def _acquisition_loop(self) -> None:
"""Main acquisition loop executed by the background thread."""
while self._running and not self._stop_event.is_set():
try:
# Check if paused
# Honor pause
if self._paused:
time.sleep(0.1)
continue
# Auto-detect port
self.port: str = cfg.get_vna_port()
logger.info(f"Using auto-detected port: {self.port}")
# Auto-detect and validate port
port = self.vna_port_locator.find_vna_port()
if port is None:
logger.warning("VNA port not found; retrying shortly")
time.sleep(0.5)
continue
with serial.Serial(self.port, self.baud) as ser:
logger.debug("Using port", device=port, baud=self.baud)
# Open serial + process one sweep from the binary log
with serial.Serial(port, self.baud) as ser:
self._drain_serial_input(ser)
# Open the log file each iteration to read the next sweep from start
with open(self.bin_log_path, "rb") as raw:
buffered = io.BufferedReader(raw, buffer_size=cfg.SERIAL_BUFFER_SIZE)
self._process_sweep_data(buffered, ser)
# Handle single sweep mode
# Handle single-sweep mode transitions
if not self._continuous_mode:
if self._single_sweep_requested:
self._single_sweep_requested = False
logger.info("Single sweep completed, pausing acquisition")
logger.info("Single sweep completed; pausing acquisition")
self.pause()
else:
# In single sweep mode but no sweep requested, pause
self.pause()
except Exception as exc: # noqa: BLE001
logger.error("Acquisition error: %s", exc)
logger.error("Acquisition loop error", error=repr(exc))
time.sleep(1.0)
# --------------------------------------------------------------------- #
# Log processing
# --------------------------------------------------------------------- #
def _process_sweep_data(self, f: BinaryIO, ser: serial.Serial) -> None:
"""Process the binary log file and collect sweep data one sweep at a time."""
"""Process the binary log file and collect sweep data for a single sweep."""
try:
# Start from beginning of file for each sweep
f.seek(0)
# Validate header
header = self._read_exact(f, len(cfg.MAGIC))
if header != cfg.MAGIC:
raise ValueError("Invalid log format: MAGIC header mismatch.")
raise ValueError("Invalid log format: MAGIC header mismatch")
self._reset_sweep_state()
# Process one complete sweep
# Read until exactly one sweep is completed
sweep_completed = False
while not sweep_completed and self._running and not self._stop_event.is_set():
# Read record header
dir_byte = f.read(1)
if not dir_byte:
# EOF reached without completing sweep - wait and retry
logger.debug("EOF reached, waiting for more data...")
dir_b = f.read(1)
if not dir_b:
# EOF reached; wait for more data to arrive on disk
logger.debug("EOF reached; waiting for more data")
time.sleep(0.1)
break
direction = dir_byte[0]
direction = dir_b[0]
(length,) = struct.unpack(">I", self._read_exact(f, 4))
if direction == cfg.DIR_TO_DEV:
# TX path: stream to device and inspect for sweep start
# TX path: forward to device and inspect for sweep start
first = self._serial_write_from_file(f, length, ser)
if not self._collecting and self._is_sweep_start_command(length, first):
self._collecting = True
self._collected_rx_payloads = []
self._collected_rx_payloads.clear()
self._meas_cmds_in_sweep = 0
logger.info("Starting sweep data collection from device")
logger.info("Sweep collection started")
elif direction == cfg.DIR_FROM_DEV:
# RX path: read exact number of bytes from device
# RX path: capture bytes from device; keep file pointer in sync
rx_bytes = self._serial_read_exact(length, ser, capture=self._collecting)
self._skip_bytes(f, length) # Keep log file pointer in sync
self._skip_bytes(f, length)
if self._collecting:
self._collected_rx_payloads.append(rx_bytes)
self._meas_cmds_in_sweep += 1
# Check for sweep completion
if self._meas_cmds_in_sweep >= cfg.MEAS_CMDS_PER_SWEEP:
self._finalize_sweep()
sweep_completed = True
else:
# Unknown record type: skip bytes to keep in sync
logger.warning("Unknown record direction; skipping", direction=direction, length=length)
self._skip_bytes(f, length)
except Exception as exc: # noqa: BLE001
logger.error("Processing error: %s", exc)
logger.error("Processing error", error=repr(exc))
time.sleep(1.0)
def _finalize_sweep(self) -> None:
"""Parse collected payloads into points and push to the buffer."""
all_points: List[Tuple[float, float]] = []
all_points: list[tuple[float, float]] = []
for payload in self._collected_rx_payloads:
all_points.extend(self._parse_measurement_data(payload))
if payload:
all_points.extend(self._parse_measurement_data(payload))
if all_points:
sweep_number = self._sweep_buffer.add_sweep(all_points)
logger.info(f"Collected sweep #{sweep_number} with {len(all_points)} data points")
logger.info("Sweep collected", sweep_number=sweep_number, points=len(all_points))
if len(all_points) != cfg.EXPECTED_POINTS_PER_SWEEP:
logger.warning(
"Expected %d points, got %d.",
cfg.EXPECTED_POINTS_PER_SWEEP,
len(all_points),
"Unexpected number of points",
expected=cfg.EXPECTED_POINTS_PER_SWEEP,
actual=len(all_points),
)
else:
logger.warning("No points parsed for sweep")
self._reset_sweep_state()
def _reset_sweep_state(self) -> None:
"""Reset internal state for the next sweep collection."""
self._collecting = False
self._collected_rx_payloads = []
self._collected_rx_payloads.clear()
self._meas_cmds_in_sweep = 0
# --------------------------------------------------------------------- #
# I/O helpers
# --------------------------------------------------------------------- #
def _read_exact(self, f: BinaryIO, n: int) -> bytes:
"""Read exactly *n* bytes from a file-like object or raise EOFError."""
buf = bytearray()
while len(buf) < n:
chunk = f.read(n - len(buf))
if not chunk:
raise EOFError(f"Unexpected EOF while reading {n} bytes.")
raise EOFError(f"Unexpected EOF while reading {n} bytes")
buf += chunk
return bytes(buf)
@ -290,14 +290,13 @@ class VNADataAcquisition:
f.seek(n, os.SEEK_CUR)
return
except (OSError, io.UnsupportedOperation):
# Fall back to manual skipping below.
pass
remaining = n
while remaining > 0:
chunk = f.read(min(cfg.FILE_CHUNK_SIZE, remaining))
if not chunk:
raise EOFError(f"Unexpected EOF while skipping {n} bytes.")
raise EOFError(f"Unexpected EOF while skipping {n} bytes")
remaining -= len(chunk)
def _serial_write_from_file(self, f: BinaryIO, nbytes: int, ser: serial.Serial) -> bytes:
@ -313,12 +312,12 @@ class VNADataAcquisition:
to_read = min(cfg.TX_CHUNK_SIZE, remaining)
chunk = f.read(to_read)
if not chunk:
raise EOFError("Log truncated while sending.")
raise EOFError("Log truncated while sending")
# Capture a peek for command inspection
needed = max(0, cfg.SERIAL_PEEK_SIZE - len(first))
if needed:
first.extend(chunk[:needed])
need = max(0, cfg.SERIAL_PEEK_SIZE - len(first))
if need:
first.extend(chunk[:need])
# Write to serial
written = 0
@ -334,17 +333,16 @@ class VNADataAcquisition:
def _serial_read_exact(self, nbytes: int, ser: serial.Serial, capture: bool = False) -> bytes:
"""Read exactly *nbytes* from the serial port; optionally capture and return them."""
deadline = time.monotonic() + cfg.RX_TIMEOUT
total = 0
out = bytearray() if capture else None
old_timeout = ser.timeout
ser.timeout = min(cfg.SERIAL_IDLE_TIMEOUT, cfg.RX_TIMEOUT)
try:
while total < nbytes:
if time.monotonic() >= deadline:
raise TimeoutError(f"Timeout while waiting for {nbytes} bytes.")
raise TimeoutError(f"Timeout while waiting for {nbytes} bytes")
chunk = ser.read(nbytes - total)
if chunk:
total += len(chunk)
@ -357,25 +355,18 @@ class VNADataAcquisition:
# --------------------------------------------------------------------- #
# Parsing & detection
# --------------------------------------------------------------------- #
def _parse_measurement_data(self, payload: bytes) -> List[Tuple[float, float]]:
def _parse_measurement_data(self, payload: bytes) -> list[tuple[float, float]]:
"""Parse complex measurement samples (float32 pairs) from a payload."""
if len(payload) <= cfg.MEAS_HEADER_LEN:
return []
data = memoryview(payload)[cfg.MEAS_HEADER_LEN:]
out: List[Tuple[float, float]] = []
n_pairs = len(data) // 8 # 2 × float32 per point
for i in range(n_pairs):
off = i * 8
real = struct.unpack_from("<f", data, off)[0]
imag = struct.unpack_from("<f", data, off + 4)[0]
out.append((real, imag))
return out
# Use iter_unpack for speed and clarity
points: list[tuple[float, float]] = []
for real, imag in struct.iter_unpack("<ff", data[: (len(data) // 8) * 8]):
points.append((real, imag))
return points
def _is_sweep_start_command(self, tx_len: int, first_bytes: bytes) -> bool:
"""Return True if a TX command indicates the start of a sweep."""
return tx_len == cfg.SWEEP_CMD_LEN and first_bytes.startswith(cfg.SWEEP_CMD_PREFIX)

View File

@ -0,0 +1,136 @@
import glob
import serial.tools.list_ports
from vna_system.core.logging.logger import get_component_logger
from vna_system.core.config import VNA_PID, VNA_VID
logger = get_component_logger(__file__)
class VNAPortLocator:
"""
Robust VNA serial port locator with in-memory cache.
Strategy
--------
1) Prefer the cached port if it exists *and* matches expected VID/PID.
2) Scan all ports and pick an exact VID/PID match.
3) As a last resort, pick the first /dev/ttyACM* (unverified).
"""
__slots__ = ("_cached_port")
def __init__(self) -> None:
self._cached_port: str | None = None
logger.debug("VNAPortLocator initialized")
# ---------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------
def _enumerate_ports(self) -> list:
"""Return a list of pyserial ListPortInfo entries."""
try:
ports = list(serial.tools.list_ports.comports())
logger.debug("Serial ports enumerated", count=len(ports))
return ports
except Exception as exc:
logger.warning("Failed to enumerate serial ports", error=repr(exc))
return []
def _get_port_info(self, device: str):
"""Return pyserial ListPortInfo for a device path, or None if absent."""
for p in self._enumerate_ports():
if p.device == device:
return p
return None
@staticmethod
def _is_exact_vna_port(p) -> bool:
"""True if VID/PID match exactly the expected VNA device."""
return getattr(p, "vid", None) == VNA_VID and getattr(p, "pid", None) == VNA_PID
def verify_port_identity(self, port: str) -> bool:
"""
Verify that a device path belongs to *our* VNA by VID/PID.
Returns True only if the device exists and matches VID/PID exactly.
"""
info = self._get_port_info(port)
if not info:
logger.debug("Port not present", device=port)
return False
if self._is_exact_vna_port(info):
logger.debug("Port verified by VID/PID", device=port, vid=info.vid, pid=info.pid)
return True
logger.warning(
"Port belongs to a different device",
device=port,
vid=getattr(info, "vid", None),
pid=getattr(info, "pid", None),
expected_vid=VNA_VID,
expected_pid=VNA_PID,
)
return False
# ---------------------------------------------------------------------
# Discovery
# ---------------------------------------------------------------------
def find_vna_port(self) -> str | None:
"""
Locate the VNA serial port following the strategy described in the class docstring.
Returns
-------
str | None
Device path if found; otherwise None.
"""
cached = self._cached_port
# 1) Try the cached port (must be present and VID/PID-correct)
if cached and self.verify_port_identity(cached):
logger.info("Using cached VNA port", device=cached)
return cached
elif cached:
logger.debug("Ignoring cached port due to VID/PID mismatch", device=cached)
# 2) Enumerate ports and pick exact VID/PID match (prefer stable identity)
exact_candidates: list[str] = []
for p in self._enumerate_ports():
logger.debug(
"Inspecting port",
device=p.device,
vid=getattr(p, "vid", None),
pid=getattr(p, "pid", None),
manufacturer=getattr(p, "manufacturer", None),
description=getattr(p, "description", None),
)
if self._is_exact_vna_port(p):
exact_candidates.append(p.device)
logger.debug("Exact candidates collected", count=len(exact_candidates), candidates=exact_candidates)
if exact_candidates:
# If the cached path is among exact matches, keep its priority
selected = cached if cached in exact_candidates else exact_candidates[0]
logger.info("VNA device found by VID/PID", device=selected)
self._cached_port = selected
logger.debug("Cached port updated", device=selected)
return selected
# 3) Last resort: first ACM device (best-effort on Linux; not cached)
try:
acm_ports = sorted(glob.glob("/dev/ttyACM*"))
logger.debug("ACM ports scanned", ports=acm_ports)
if acm_ports:
selected = acm_ports[0]
logger.info("Using first available ACM port (unverified)", device=selected)
return selected
except Exception as exc:
logger.warning("Error during ACM port detection", error=repr(exc))
logger.warning("VNA device not found by auto-detection")
return None

View File

@ -1,54 +1,108 @@
import math
import threading
import time
from collections import deque
from dataclasses import dataclass
from typing import List, Tuple
import threading
import time
from vna_system.core.config import SWEEP_BUFFER_MAX_SIZE
from vna_system.core.logging.logger import get_component_logger
logger = get_component_logger(__file__)
Point = tuple[float, float] # (real, imag)
@dataclass
@dataclass(slots=True, frozen=True)
class SweepData:
"""Container for a single sweep with metadata"""
"""
Immutable container for a single sweep with metadata.
Attributes
----------
sweep_number:
Monotonically increasing identifier for the sweep.
timestamp:
UNIX timestamp (seconds since epoch) when the sweep was stored.
points:
Sequence of complex-valued points represented as (real, imag) tuples.
total_points:
Cached number of points in `points` for quick access.
"""
sweep_number: int
timestamp: float
points: List[Tuple[float, float]] # Complex pairs (real, imag)
points: list[Point]
total_points: int
@property
def magnitude_phase_data(self) -> List[Tuple[float, float, float, float]]:
"""Convert to magnitude/phase representation"""
result = []
for real, imag in self.points:
magnitude = (real * real + imag * imag) ** 0.5
phase = math.atan2(imag, real) if (real != 0.0 or imag != 0.0) else 0.0
result.append((real, imag, magnitude, phase))
return result
class SweepBuffer:
"""Thread-safe circular buffer for sweep data"""
"""
Thread-safe circular buffer for sweep data.
def __init__(self, max_size: int = SWEEP_BUFFER_MAX_SIZE, initial_sweep_number: int = 0):
self._buffer = deque(maxlen=max_size)
Parameters
----------
max_size:
Maximum number of sweeps to retain. Old entries are discarded when the
buffer exceeds this size.
initial_sweep_number:
Starting value for the internal sweep counter.
"""
def __init__(self, max_size: int = SWEEP_BUFFER_MAX_SIZE, initial_sweep_number: int = 0) -> None:
self._buffer: deque[SweepData] = deque(maxlen=max_size)
self._lock = threading.RLock()
self._sweep_counter = initial_sweep_number
logger.debug("SweepBuffer initialized", max_size=max_size, initial_sweep_number=initial_sweep_number)
def add_sweep(self, points: List[Tuple[float, float]]) -> int:
"""Add a new sweep to the buffer and return its number"""
# ------------------------------
# Introspection utilities
# ------------------------------
@property
def current_sweep_number(self) -> int:
"""Return the last assigned sweep number (0 if none were added yet)."""
with self._lock:
logger.debug("Current sweep number retrieved", sweep_number=self._sweep_counter)
return self._sweep_counter
# ------------------------------
# Core API
# ------------------------------
def add_sweep(self, points: list[Point]) -> int:
"""
Add a new sweep to the buffer.
Parameters
----------
points:
Sequence of (real, imag) tuples representing a sweep.
Returns
-------
int
The assigned sweep number for the newly added sweep.
"""
timestamp = time.time()
with self._lock:
self._sweep_counter += 1
sweep = SweepData(
sweep_number=self._sweep_counter,
timestamp=time.time(),
points=points,
total_points=len(points)
timestamp=timestamp,
points=list(points), # ensure we store our own list
total_points=len(points),
)
self._buffer.append(sweep)
logger.debug(
"New sweep added",
sweep_number=sweep.sweep_number,
total_points=sweep.total_points,
buffer_size=len(self._buffer),
)
return self._sweep_counter
def get_latest_sweep(self) -> SweepData | None:
"""Get the most recent sweep"""
"""Return the most recent sweep, or None if the buffer is empty."""
with self._lock:
return self._buffer[-1] if self._buffer else None
sweep = self._buffer[-1] if self._buffer else None
# if sweep: # TOO NOISY
# logger.debug("Latest sweep retrieved", sweep_number=sweep.sweep_number)
# else:
# logger.debug("Latest sweep requested but buffer is empty")
return sweep

View File

@ -1,124 +1,76 @@
#!/usr/bin/env python3
"""
Configuration file for VNA data acquisition system
"""
import glob
import logging
from pathlib import Path
import serial.tools.list_ports
# Base directory for VNA system
# -----------------------------------------------------------------------------
# Project paths
# -----------------------------------------------------------------------------
BASE_DIR = Path(__file__).parent.parent
# Serial communication settings
DEFAULT_BAUD_RATE = 115200
DEFAULT_PORT = "/dev/ttyACM0"
# -----------------------------------------------------------------------------
# API / Server settings
# -----------------------------------------------------------------------------
API_HOST = "0.0.0.0"
API_PORT = 8000
# VNA device identification
VNA_VID = 0x0483 # STMicroelectronics
VNA_PID = 0x5740 # STM32 Virtual ComPort
VNA_MANUFACTURER = "STMicroelectronics"
VNA_PRODUCT = "STM32 Virtual ComPort"
# -----------------------------------------------------------------------------
# Logging settings (используются из main)
# -----------------------------------------------------------------------------
LOG_LEVEL = "INFO" # {"DEBUG","INFO","WARNING","ERROR","CRITICAL"}
LOG_DIR = BASE_DIR / "logs" # Directory for application logs
LOG_APP_FILE = LOG_DIR / "vna_system.log" # Main application log file
# -----------------------------------------------------------------------------
# Serial communication settings
# -----------------------------------------------------------------------------
DEFAULT_BAUD_RATE = 115200
RX_TIMEOUT = 5.0
TX_CHUNK_SIZE = 64 * 1024
# -----------------------------------------------------------------------------
# VNA device identification
# -----------------------------------------------------------------------------
VNA_VID = 0x0483 # STMicroelectronics
VNA_PID = 0x5740 # STM32 Virtual ComPort
# -----------------------------------------------------------------------------
# Sweep detection and parsing constants
# -----------------------------------------------------------------------------
SWEEP_CMD_LEN = 515
SWEEP_CMD_PREFIX = bytes([0xAA, 0x00, 0xDA])
MEAS_HEADER_LEN = 21
MEAS_CMDS_PER_SWEEP = 17
EXPECTED_POINTS_PER_SWEEP = 1000
# -----------------------------------------------------------------------------
# Buffer settings
# -----------------------------------------------------------------------------
SWEEP_BUFFER_MAX_SIZE = 100 # Maximum number of sweeps to store in circular buffer
SERIAL_BUFFER_SIZE = 512 * 1024
# Log file settings
BIN_INPUT_FILE_PATH = "./vna_system/binary_input/current_input.bin" # Symbolic link to the current log file
# -----------------------------------------------------------------------------
# Log file settings (binary input path, not to be confused with text logs)
# -----------------------------------------------------------------------------
BIN_INPUT_FILE_PATH = "./vna_system/binary_input/current_input.bin" # Symlink to current binary input
# -----------------------------------------------------------------------------
# Binary log format constants
# -----------------------------------------------------------------------------
MAGIC = b"VNALOG1\n"
DIR_TO_DEV = 0x01 # '>'
DIR_FROM_DEV = 0x00 # '<'
# -----------------------------------------------------------------------------
# File I/O settings
# -----------------------------------------------------------------------------
FILE_CHUNK_SIZE = 256 * 1024
SERIAL_PEEK_SIZE = 32
# -----------------------------------------------------------------------------
# Timeout settings
# -----------------------------------------------------------------------------
SERIAL_IDLE_TIMEOUT = 0.5
SERIAL_DRAIN_DELAY = 0.05
SERIAL_DRAIN_CHECK_DELAY = 0.01
SERIAL_CONNECT_DELAY = 0.01
def find_vna_port():
"""
Automatically find VNA device port.
Returns:
str: Port path (e.g., '/dev/ttyACM1') or None if not found
"""
logger = logging.getLogger(__name__)
# Method 1: Use pyserial port detection by VID/PID
try:
ports = list(serial.tools.list_ports.comports())
logger.debug(f"Found {len(ports)} serial ports")
for port in ports:
logger.debug(f"Checking port {port.device}")
# Check by VID/PID
if port.vid == VNA_VID and port.pid == VNA_PID:
logger.debug(f"Found VNA device by VID/PID at {port.device}")
return port.device
# Fallback: Check by manufacturer/product strings
if (port.manufacturer and VNA_MANUFACTURER.lower() in port.manufacturer.lower() and
port.description and VNA_PRODUCT.lower() in port.description.lower()):
logger.debug(f"Found VNA device by description at {port.device}")
return port.device
except Exception as e:
logger.warning(f"Error during VID/PID port detection: {e}")
# Method 2: Search ttyACM devices (Linux-specific)
try:
acm_ports = glob.glob('/dev/ttyACM*')
logger.debug(f"Found ACM ports: {acm_ports}")
if acm_ports:
# Sort to get consistent ordering (ttyACM0, ttyACM1, etc.)
acm_ports.sort()
logger.info(f"Using first available ACM port: {acm_ports[0]}")
return acm_ports[0]
except Exception as e:
logger.warning(f"Error during ACM port detection: {e}")
# Method 3: Fallback to default
logger.warning(f"VNA device not found, using default port: {DEFAULT_PORT}")
return DEFAULT_PORT
def get_vna_port():
"""
Get VNA port, trying auto-detection first, then falling back to default.
Returns:
str: Port path to use for VNA connection
"""
logger = logging.getLogger(__name__)
try:
port = find_vna_port()
if port and port != DEFAULT_PORT:
logger.info(f"Auto-detected VNA port: {port}")
return port
except Exception as e:
logger.error(f"Port detection failed: {e}")
logger.info(f"Using default port: {DEFAULT_PORT}")
return DEFAULT_PORT
PROCESSORS_CONFIG_DIR_PATH = "vna_system/core/processors/configs"

View File

@ -0,0 +1,9 @@
"""
VNA System Logging Module
Provides centralized, consistent logging across all system components.
"""
from .logger import get_logger, get_component_logger, setup_logging, VNALogger
__all__ = ['get_logger', 'get_component_logger', 'setup_logging', 'VNALogger']

View File

@ -0,0 +1,257 @@
import logging
import sys
from enum import StrEnum
from pathlib import Path
from datetime import datetime
from typing import Any
class LogLevel(StrEnum):
"""Log level enumeration with associated prefixes and ANSI colors."""
DEBUG = "DEBUG"
INFO = "INFO"
WARNING = "WARNING"
ERROR = "ERROR"
CRITICAL = "CRITICAL"
@property
def prefix(self) -> str:
return {
LogLevel.DEBUG: "DEBUG",
LogLevel.INFO: "INFO",
LogLevel.WARNING: "WARN",
LogLevel.ERROR: "ERROR",
LogLevel.CRITICAL: "FATAL",
}[self]
@property
def color(self) -> str:
# ANSI colors; disabled if stdout is not a TTY
if not sys.stdout.isatty():
return ""
return {
LogLevel.DEBUG: "\033[36m", # Cyan
LogLevel.INFO: "\033[32m", # Green
LogLevel.WARNING: "\033[33m", # Yellow
LogLevel.ERROR: "\033[31m", # Red
LogLevel.CRITICAL: "\033[35m", # Magenta
}[self]
@staticmethod
def reset_color() -> str:
return "" if not sys.stdout.isatty() else "\033[0m"
class VNALogger:
"""
Enhanced logger for VNA system with consistent formatting and optional colors.
Features
--------
- Consistent color coding across all modules (TTY-aware).
- Component name namespacing (logger name: `vna.<component>`).
- Optional file logging per component.
- Lightweight performance timers.
- Structured metadata via keyword arguments.
"""
__slots__ = ("component_name", "log_file", "_logger", "_timers")
_loggers: dict[str, "VNALogger"] = {}
_base_config_set = False
def __init__(self, component_name: str, log_file: Path | None = None) -> None:
self.component_name = component_name
self.log_file = log_file
self._logger = logging.getLogger(f"vna.{component_name}")
self._logger.setLevel(logging.DEBUG)
self._logger.propagate = True # use root handlers configured once
self._timers: dict[str, float] = {}
if not VNALogger._base_config_set:
self._configure_base_logging()
VNALogger._base_config_set = True
if self.log_file:
self._add_file_handler(self.log_file)
# ---------------------------------------------------------------------
# Base configuration (root logger + console format)
# ---------------------------------------------------------------------
def _configure_base_logging(self) -> None:
root = logging.getLogger()
root.setLevel(logging.DEBUG)
# Remove existing handlers to avoid duplicates on reloads
for h in root.handlers[:]:
root.removeHandler(h)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(self._create_console_formatter())
root.addHandler(console_handler)
def _create_console_formatter(self) -> logging.Formatter:
class ColoredFormatter(logging.Formatter):
def format(self, record: logging.LogRecord) -> str:
level = LogLevel(record.levelname) if record.levelname in LogLevel.__members__ else LogLevel.INFO
timestamp = datetime.fromtimestamp(record.created).strftime("%H:%M:%S.%f")[:-3]
component = f"[{record.name.replace('vna.', '')}]"
color = level.color
reset = LogLevel.reset_color()
# Use record.getMessage() to apply %-formatting already handled by logging
return f"{color}{level.prefix} {timestamp} {component:<20} {record.getMessage()}{reset}"
return ColoredFormatter()
# ---------------------------------------------------------------------
# File handler
# ---------------------------------------------------------------------
def _add_file_handler(self, path: Path) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
# Avoid adding duplicate file handlers for the same path
for h in self._logger.handlers:
if isinstance(h, logging.FileHandler) and Path(h.baseFilename) == path:
return
file_handler = logging.FileHandler(path, encoding="utf-8")
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
self._logger.addHandler(file_handler)
# ---------------------------------------------------------------------
# Public logging API
# ---------------------------------------------------------------------
def debug(self, message: str, /, **metadata: Any) -> None:
self._log_with_metadata(logging.DEBUG, message, metadata)
def info(self, message: str, /, **metadata: Any) -> None:
self._log_with_metadata(logging.INFO, message, metadata)
def warning(self, message: str, /, **metadata: Any) -> None:
self._log_with_metadata(logging.WARNING, message, metadata)
def error(self, message: str, /, **metadata: Any) -> None:
self._log_with_metadata(logging.ERROR, message, metadata)
def critical(self, message: str, /, **metadata: Any) -> None:
self._log_with_metadata(logging.CRITICAL, message, metadata)
def _log_with_metadata(self, level: int, message: str, metadata: dict[str, Any]) -> None:
if metadata:
# Render key=value; repr() helps keep types unambiguous in logs
meta_str = " ".join(f"{k}={repr(v)}" for k, v in metadata.items())
self._logger.log(level, f"{message} | {meta_str}")
else:
self._logger.log(level, message)
# ---------------------------------------------------------------------
# Timers
# ---------------------------------------------------------------------
def timer_start(self, operation: str) -> str:
"""
Start a high-resolution timer for performance measurement.
Returns
-------
str
Timer identifier to be passed to `timer_end`.
"""
# perf_counter() is monotonic & high-resolution
timer_id = f"{self.component_name}:{operation}:{datetime.now().timestamp()}"
self._timers[timer_id] = datetime.now().timestamp()
self.debug("Timer started", operation=operation, timer_id=timer_id)
return timer_id
def timer_end(self, timer_id: str, operation: str | None = None) -> float:
"""
End a timer and log the elapsed time.
Returns
-------
float
Elapsed time in milliseconds. Returns 0.0 if timer_id is unknown.
"""
started = self._timers.pop(timer_id, None)
if started is None:
self.warning("Timer not found", timer_id=timer_id)
return 0.0
elapsed_ms = (datetime.now().timestamp() - started) * 1000.0
self.info("Timer completed", operation=operation or "operation", timer_id=timer_id, elapsed_ms=round(elapsed_ms, 2))
return elapsed_ms
def get_logger(component_name: str, log_file: Path | None = None) -> VNALogger:
"""
Get or create a logger instance for a component.
Examples
--------
>>> logger = get_logger("magnitude_processor")
>>> logger.info("Processor initialized")
"""
cache_key = f"{component_name}|{log_file}"
logger = VNALogger._loggers.get(cache_key)
if logger is None:
logger = VNALogger(component_name, log_file)
VNALogger._loggers[cache_key] = logger
return logger
def get_component_logger(component_path: str) -> VNALogger:
"""
Create a logger with a component name derived from a file path.
The base name of the file (without extension) is used, with a few
opinionated adjustments for readability.
"""
path = Path(component_path)
component = path.stem
if "processor" in component and not component.endswith("_processor"):
component = f"{component}_processor"
elif path.parent.name in {"websocket", "acquisition", "settings"}:
component = f"{path.parent.name}_{component}"
return get_logger(component)
def setup_logging(log_level: str = "INFO", log_dir: Path | None = None) -> None:
"""
Configure application-wide logging defaults.
Parameters
----------
log_level:
One of {"DEBUG","INFO","WARNING","ERROR","CRITICAL"} (case-insensitive).
log_dir:
If provided, creates a rotating file per component on demand.
"""
level_name = log_level.upper()
numeric_level = getattr(logging, level_name, None)
if not isinstance(numeric_level, int):
raise ValueError(f"Invalid log level: {log_level}")
logging.getLogger("vna").setLevel(numeric_level)
# Add global file handler for all logs if log_dir provided
if log_dir:
log_dir.mkdir(parents=True, exist_ok=True)
global_log_file = log_dir / "vna_all.log"
root = logging.getLogger()
# Check if file handler already exists
has_file_handler = any(isinstance(h, logging.FileHandler) for h in root.handlers)
if not has_file_handler:
file_handler = logging.FileHandler(global_log_file, encoding="utf-8")
file_handler.setLevel(numeric_level)
file_handler.setFormatter(logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
))
root.addHandler(file_handler)
# Touch the app logger (and its file) early to confirm configuration.
log_path = (log_dir / "vna_system.log") if log_dir else None
app_logger = get_logger("vna_system", log_path)
app_logger.info("VNA System logging initialized", log_level=level_name, log_dir=str(log_dir) if log_dir else None)

View File

@ -1,190 +1,562 @@
from abc import ABC, abstractmethod
from typing import Dict, Any, List, Optional
from dataclasses import dataclass
from pathlib import Path
import json
import threading
from dataclasses import dataclass, asdict
from datetime import datetime
from pathlib import Path
from typing import Any
import json
import math
import threading
from vna_system.core.logging.logger import get_component_logger
from vna_system.core.settings.preset_manager import ConfigPreset
logger = get_component_logger(__file__)
@dataclass
# =============================================================================
# Data models
# =============================================================================
@dataclass(slots=True)
class UIParameter:
"""
Descriptor of a single UI control that also serves as a *schema* for config validation.
Fields
------
name:
Stable key used in configs and payloads.
label:
Human-readable label for the UI.
type:
One of: "slider", "toggle", "select", "input", "button".
value:
Default value (also used to seed a missing config key).
options:
Extra, type-specific metadata used for validation and UI behavior.
Supported `options` by type
---------------------------
slider:
{"min": <number>, "max": <number>, "step": <number>, "dtype": "int"|"float"}
Notes:
- dtype defaults to "float".
- step alignment is checked from `min` when provided, otherwise from 0/0.0.
toggle:
{}
select:
{"choices": [<allowed_value>, ...]}
Notes:
- `choices` MUST be present and must be a list.
input:
{"type": "int"|"float", "min": <number>, "max": <number>}
Notes:
- Strings are NOT allowed; only numeric input is accepted.
- `type` is required and controls casting/validation.
button:
{"action": "<human readable action>"}
Notes:
- The button value is ignored by validation (buttons are commands, not state).
"""
name: str
label: str
type: str # 'slider', 'toggle', 'select', 'input', 'button'
type: str
value: Any
options: Optional[Dict[str, Any]] = None # min/max for slider, choices for select, etc.
options: dict[str, Any] | None = None
@dataclass
@dataclass(slots=True)
class ProcessedResult:
"""
Result payload emitted by processors.
Fields
------
processor_id:
Logical processor identifier.
timestamp:
UNIX timestamp (float) when the result was produced.
data:
Arbitrary computed data (domain-specific).
plotly_config:
Prebuilt Plotly figure/config to render on the client.
ui_parameters:
The UI schema (possibly dynamic) that the client can render.
metadata:
Additional context useful to the UI or debugging (e.g., config snapshot).
"""
processor_id: str
timestamp: float
data: Dict[str, Any]
plotly_config: Dict[str, Any]
ui_parameters: List[UIParameter]
metadata: Dict[str, Any]
data: dict[str, Any]
plotly_config: dict[str, Any]
ui_parameters: list[UIParameter]
metadata: dict[str, Any]
class BaseProcessor(ABC):
def __init__(self, processor_id: str, config_dir: Path):
# =============================================================================
# Base processor
# =============================================================================
class BaseProcessor:
"""
Base class for sweep processors.
Responsibilities
----------------
• Manage a JSON config file (load → validate → save).
• Keep a bounded, thread-safe history of recent sweeps.
• Provide a uniform API for (re)calculation and result packaging.
• Validate config against the UI schema provided by `get_ui_parameters()`.
Integration contract (to be implemented by subclasses)
------------------------------------------------------
- `process_sweep(sweep_data, calibrated_data, vna_config) -> dict[str, Any]`
Perform the actual computation and return a pure-data dict.
- `generate_plotly_config(processed_data, vna_config) -> dict[str, Any]`
Convert computed data to a Plotly config the client can render.
- `get_ui_parameters() -> list[UIParameter]`
Provide the UI schema (and validation rules via `options`) for this processor.
- `_get_default_config() -> dict[str, Any]`
Provide defaults for config keys. Keys should match UIParameter names.
"""
# --------------------------------------------------------------------- #
# Lifecycle
# --------------------------------------------------------------------- #
def __init__(self, processor_id: str, config_dir: Path) -> None:
self.processor_id = processor_id
self.config_dir = config_dir
self.config_file = config_dir / f"{processor_id}_config.json"
self._lock = threading.RLock()
self._sweep_history: List[Any] = []
self._max_history = 1
self._config = {}
self._load_config()
# Concurrency: all shared state guarded by this lock
self._lock = threading.RLock()
# Bounded history of recent inputs/results (data-only dicts)
self._sweep_history: list[dict[str, Any]] = []
self._max_history = 1
# Current configuration snapshot
self._config: dict[str, Any] = {}
self._load_config()
logger.debug(
"Processor initialized",
processor_id=self.processor_id,
config_file=str(self.config_file),
)
# --------------------------------------------------------------------- #
# History management
# --------------------------------------------------------------------- #
@property
def max_history(self) -> int:
"""Maximum number of history entries retained in memory."""
return self._max_history
@max_history.setter
def max_history(self, value: int):
def max_history(self, value: int) -> None:
"""Change max history size (min 1) and trim existing history if needed."""
with self._lock:
self._max_history = max(1, value)
self._trim_history()
new_size = max(1, int(value))
if new_size != self._max_history:
self._max_history = new_size
self._trim_history()
logger.debug(
"Max history updated",
max_history=new_size,
current=len(self._sweep_history),
)
def _trim_history(self):
def clear_history(self) -> None:
"""Drop all stored history entries."""
with self._lock:
self._sweep_history.clear()
logger.debug("History cleared")
def _trim_history(self) -> None:
"""Internal: keep only the newest `_max_history` items."""
if len(self._sweep_history) > self._max_history:
dropped = len(self._sweep_history) - self._max_history
self._sweep_history = self._sweep_history[-self._max_history:]
logger.debug("History trimmed", dropped=dropped, kept=self._max_history)
def _load_config(self):
# --------------------------------------------------------------------- #
# Config I/O and updates
# --------------------------------------------------------------------- #
def _load_config(self) -> None:
"""
Load the JSON config from disk; on failure or first run, use defaults.
Strategy
--------
- Read file if present; ensure the root is a dict.
- Shallow-merge with defaults (unknown keys are preserved).
- Validate using UI schema (`_validate_config`).
- On any error, fall back to defaults and save them.
"""
defaults = self._get_default_config()
if self.config_file.exists():
try:
with open(self.config_file, 'r') as f:
self._config = json.load(f)
cfg = json.loads(self.config_file.read_text(encoding="utf-8"))
if not isinstance(cfg, dict):
raise ValueError("Config root must be an object")
merged = {**defaults, **cfg}
self._config = merged
self._validate_config()
except (json.JSONDecodeError, FileNotFoundError):
self._config = self._get_default_config()
self.save_config()
else:
self._config = self._get_default_config()
self.save_config()
logger.debug("Config loaded", file=str(self.config_file))
return
except Exception as exc: # noqa: BLE001
logger.warning(
"Config load failed; using defaults",
error=repr(exc),
file=str(self.config_file),
)
def save_config(self):
self._config = defaults
self.save_config()
def save_config(self) -> None:
"""
Save current config to disk atomically.
Implementation detail
---------------------
Write to a temporary sidecar file and then replace the target to avoid
partial writes in case of crashes.
"""
self.config_dir.mkdir(parents=True, exist_ok=True)
with open(self.config_file, 'w') as f:
json.dump(self._config, f, indent=2)
tmp = self.config_file.with_suffix(".json.tmp")
payload = json.dumps(self._config, indent=2, ensure_ascii=False)
tmp.write_text(payload, encoding="utf-8")
tmp.replace(self.config_file)
logger.debug("Config saved", file=str(self.config_file))
def update_config(self, updates: Dict[str, Any]):
def update_config(self, updates: dict[str, Any]) -> None:
"""
Update config with user-provided values.
- Performs type conversion based on current schema (`_convert_config_types`).
- Validates against UI schema; on failure rolls back to the previous state.
- Saves the resulting config when validation passes.
"""
with self._lock:
old_config = self._config.copy()
# Convert types based on existing config values
converted_updates = self._convert_config_types(updates)
self._config.update(converted_updates)
before = self._config.copy()
converted = self._convert_config_types(updates)
self._config.update(converted)
try:
self._validate_config()
self.save_config()
except Exception as e:
self._config = old_config
raise ValueError(f"Invalid configuration: {e}")
logger.info("Config updated", updates=converted)
except Exception as exc: # noqa: BLE001
self._config = before
logger.error("Invalid configuration update; rolled back", error=repr(exc))
raise ValueError(f"Invalid configuration: {exc}") from exc
def _convert_config_types(self, updates: Dict[str, Any]) -> Dict[str, Any]:
"""Convert string values to appropriate types based on existing config"""
converted = {}
for key, value in updates.items():
# If the key is not in the current config, keep the value as-is
if key not in self._config:
converted[key] = value
continue
existing_value = self._config[key]
# Convert booleans from string
if isinstance(existing_value, bool) and isinstance(value, str):
converted[key] = value.lower() in ('true', '1', 'on', 'yes')
continue
# Convert numbers from string
if isinstance(existing_value, (int, float)) and isinstance(value, str):
try:
if isinstance(existing_value, int):
# Handle cases like "50.0" → 50
converted[key] = int(float(value))
else:
converted[key] = float(value)
except ValueError:
# Keep the original string if conversion fails
converted[key] = value
continue
# For all other cases, keep the value as-is
converted[key] = value
return converted
def get_config(self) -> Dict[str, Any]:
return self._config.copy()
def add_sweep_data(self, sweep_data: Any, calibrated_data: Any, vna_config: ConfigPreset | None):
def get_config(self) -> dict[str, Any]:
"""Return a shallow copy of the current config snapshot."""
with self._lock:
self._sweep_history.append({
'sweep_data': sweep_data,
'calibrated_data': calibrated_data,
'vna_config': vna_config.__dict__ if vna_config is not None else {},
'timestamp': datetime.now().timestamp()
})
return self._config.copy()
def _convert_config_types(self, updates: dict[str, Any]) -> dict[str, Any]:
"""
Convert string inputs into the target types inferred from the current config.
Rules
-----
• Booleans: accept case-insensitive {"true","1","on","yes"}.
• Int/float: accept numeric strings; for ints, tolerate "50.0" → 50.
• Unknown keys: kept as-is (subclass validators may use them).
"""
out: dict[str, Any] = {}
for key, value in updates.items():
if key not in self._config:
out[key] = value
continue
current = self._config[key]
# bool
if isinstance(current, bool) and isinstance(value, str):
out[key] = value.strip().lower() in {"true", "1", "on", "yes"}
continue
# numbers
if isinstance(current, int) and isinstance(value, str):
try:
out[key] = int(float(value))
continue
except ValueError:
pass
if isinstance(current, float) and isinstance(value, str):
try:
out[key] = float(value)
continue
except ValueError:
pass
# fallback: unchanged
out[key] = value
return out
# --------------------------------------------------------------------- #
# Data path: accept new sweep, recompute, produce result
# --------------------------------------------------------------------- #
def add_sweep_data(self, sweep_data: Any, calibrated_data: Any, vna_config: ConfigPreset | None):
"""
Add the latest sweep to the in-memory history and trigger recalculation.
Parameters
----------
sweep_data:
Raw/parsed sweep data as produced by acquisition.
calibrated_data:
Data post-calibration (structure is processor-specific).
vna_config:
Snapshot of VNA settings (dataclass or pydantic model supported).
Returns
-------
ProcessedResult | None
The newly computed result or None when history is empty.
"""
with self._lock:
self._sweep_history.append(
{
"sweep_data": sweep_data,
"calibrated_data": calibrated_data,
"vna_config": asdict(vna_config) if vna_config is not None else {},
"timestamp": datetime.now().timestamp(),
}
)
self._trim_history()
return self.recalculate()
def recalculate(self) -> Optional[ProcessedResult]:
def recalculate(self) -> ProcessedResult | None:
"""
Recompute the processor output using the most recent history entry.
Notes
-----
Subclasses must ensure `process_sweep` and `generate_plotly_config`
are pure (no global side effects) and thread-safe w.r.t. the provided inputs.
"""
with self._lock:
if not self._sweep_history:
logger.debug("Recalculate skipped; history empty")
return None
latest = self._sweep_history[-1]
return self._process_data(
latest['sweep_data'],
latest['calibrated_data'],
latest['vna_config']
latest["sweep_data"],
latest["calibrated_data"],
latest["vna_config"],
)
def _process_data(self, sweep_data: Any, calibrated_data: Any, vna_config: Dict[str, Any]) -> ProcessedResult:
processed_data = self.process_sweep(sweep_data, calibrated_data, vna_config)
plotly_config = self.generate_plotly_config(processed_data, vna_config)
ui_parameters = self.get_ui_parameters()
def _process_data(self, sweep_data: Any, calibrated_data: Any, vna_config: dict[str, Any]) -> ProcessedResult:
"""
Internal: compute processed data, build a Plotly config, and wrap into `ProcessedResult`.
"""
processed = self.process_sweep(sweep_data, calibrated_data, vna_config)
plotly_conf = self.generate_plotly_config(processed, vna_config)
ui_params = self.get_ui_parameters()
return ProcessedResult(
result = ProcessedResult(
processor_id=self.processor_id,
timestamp=datetime.now().timestamp(),
data=processed_data,
plotly_config=plotly_config,
ui_parameters=ui_parameters,
metadata=self._get_metadata()
data=processed,
plotly_config=plotly_conf,
ui_parameters=ui_params,
metadata=self._get_metadata(),
)
logger.debug("Processed result produced", processor_id=self.processor_id)
return result
@abstractmethod
def process_sweep(self, sweep_data: Any, calibrated_data: Any, vna_config: Dict[str, Any]) -> Dict[str, Any]:
pass
# --------------------------------------------------------------------- #
# Abstracts to implement in concrete processors
# --------------------------------------------------------------------- #
def process_sweep(self, sweep_data: Any, calibrated_data: Any, vna_config: dict[str, Any]) -> dict[str, Any]:
"""Compute the processors domain result from input sweep data."""
raise NotImplementedError
@abstractmethod
def generate_plotly_config(self, processed_data: Dict[str, Any], vna_config: Dict[str, Any]) -> Dict[str, Any]:
pass
def generate_plotly_config(self, processed_data: dict[str, Any], vna_config: dict[str, Any]) -> dict[str, Any]:
"""Create a ready-to-render Plotly configuration from processed data."""
raise NotImplementedError
@abstractmethod
def get_ui_parameters(self) -> List[UIParameter]:
pass
def get_ui_parameters(self) -> list[UIParameter]:
"""Return the UI schema (used both for UI rendering and config validation)."""
raise NotImplementedError
@abstractmethod
def _get_default_config(self) -> Dict[str, Any]:
pass
def _get_default_config(self) -> dict[str, Any]:
"""Provide default config values; keys should match `UIParameter.name`."""
raise NotImplementedError
@abstractmethod
def _validate_config(self):
pass
# --------------------------------------------------------------------- #
# Validation using UI schema
# --------------------------------------------------------------------- #
def _validate_config(self) -> None:
"""
Validate `self._config` using the schema in `get_ui_parameters()`.
def _get_metadata(self) -> Dict[str, Any]:
return {
'processor_id': self.processor_id,
'config': self._config,
'history_count': len(self._sweep_history),
'max_history': self._max_history
}
Validation rules
----------------
slider:
- dtype: "int"|"float" (default "float")
- min/max: inclusive numeric bounds
- step: if present, enforce alignment from `min` (or 0/0.0)
toggle:
- must be bool
select:
- options must be {"choices": [ ... ]}
- value must be one of `choices`
input:
- options must be {"type": "int"|"float", "min"?, "max"?}
- value must be numeric (no strings)
button:
- options must be {"action": "<text>"}
- value is ignored by validation
Unknown control types emit a warning but do not block execution.
"""
params = {p.name: p for p in self.get_ui_parameters()}
for name, schema in params.items():
if name not in self._config:
# Seed missing keys from the UI default to maintain a consistent shape.
self._config[name] = schema.value
logger.debug("Config key missing; seeded from UI default", key=name, value=schema.value)
value = self._config[name]
ptype = (schema.type or "").lower()
opts = schema.options or {}
try:
if ptype == "slider":
self._validate_slider(name, value, opts)
elif ptype == "toggle":
self._validate_toggle(name, value)
elif ptype == "select":
self._validate_select_strict(name, value, opts)
elif ptype == "input":
self._validate_input_numeric(name, value, opts)
elif ptype == "button":
self._validate_button_opts(name, opts)
else:
logger.warning("Unknown UI control type; skipping validation", key=name, type=ptype)
except ValueError as exc:
# Prefix the processor id for easier debugging in multi-processor UIs.
raise ValueError(f"[{self.processor_id}] Invalid `{name}`: {exc}") from exc
# ---- Validators (per control type) ----------------------------------- #
def _validate_slider(self, key: str, value: Any, opts: dict[str, Any]) -> None:
"""
Validate a slider value; normalize to int/float based on `dtype`.
"""
dtype = str(opts.get("dtype", "float")).lower()
min_v = opts.get("min")
max_v = opts.get("max")
step = opts.get("step")
# Type / casting
if dtype == "int":
if not isinstance(value, int):
if isinstance(value, float) and value.is_integer():
value = int(value)
else:
raise ValueError(f"expected int, got {type(value).__name__}")
else:
if not isinstance(value, (int, float)):
raise ValueError(f"expected number, got {type(value).__name__}")
value = float(value)
# Bounds
if min_v is not None and value < min_v:
raise ValueError(f"{value} < min {min_v}")
if max_v is not None and value > max_v:
raise ValueError(f"{value} > max {max_v}")
# Step alignment
if step is not None:
base = min_v if min_v is not None else (0 if dtype == "int" else 0.0)
if dtype == "int":
if (value - int(base)) % int(step) != 0:
raise ValueError(f"value {value} not aligned to step {step} from base {base}")
else:
steps = (value - float(base)) / float(step)
if not math.isclose(steps, round(steps), rel_tol=1e-9, abs_tol=1e-9):
raise ValueError(f"value {value} not aligned to step {step} from base {base}")
# Normalize the stored value
self._config[key] = int(value) if dtype == "int" else float(value)
def _validate_toggle(self, key: str, value: Any) -> None:
"""Validate a boolean toggle."""
if not isinstance(value, bool):
raise ValueError(f"expected bool, got {type(value).__name__}")
def _validate_select_strict(self, key: str, value: Any, opts: dict[str, Any]) -> None:
"""
Validate a 'select' value against a required list of choices.
"""
if not isinstance(opts, dict) or "choices" not in opts or not isinstance(opts["choices"], list):
raise ValueError("select.options must be a dict with key 'choices' as a list")
choices = opts["choices"]
if value not in choices:
raise ValueError(f"value {value!r} not in choices {choices!r}")
def _validate_input_numeric(self, key: str, value: Any, opts: dict[str, Any]) -> None:
"""
Validate a numeric input.
options:
- type: "int" | "float" (required)
- min/max: optional numeric bounds
"""
t = str(opts.get("type", "")).lower()
if t not in {"int", "float"}:
raise ValueError("input.options.type must be 'int' or 'float'")
if t == "int":
# bool is a subclass of int in Python; explicitly reject it
if isinstance(value, bool) or not isinstance(value, (int, float)):
raise ValueError(f"expected int, got {type(value).__name__}")
if isinstance(value, float) and not value.is_integer():
raise ValueError(f"expected int, got non-integer float {value}")
iv = int(value)
self._numeric_bounds_check(key, iv, opts)
self._config[key] = iv
else:
if isinstance(value, bool) or not isinstance(value, (int, float)):
raise ValueError(f"expected float, got {type(value).__name__}")
fv = float(value)
self._numeric_bounds_check(key, fv, opts)
self._config[key] = fv
def _validate_button_opts(self, key: str, opts: dict[str, Any]) -> None:
"""
Validate a button descriptor; buttons are imperative actions, not state.
"""
if not isinstance(opts, dict) or "action" not in opts or not isinstance(opts["action"], str):
raise ValueError("button.options must be a dict with key 'action' (str)")
@staticmethod
def _numeric_bounds_check(key: str, value: float, opts: dict[str, Any]) -> None:
"""Shared numeric bounds check for input/slider."""
min_v = opts.get("min")
max_v = opts.get("max")
if min_v is not None and value < min_v:
raise ValueError(f"{key} {value} < min {min_v}")
if max_v is not None and value > max_v:
raise ValueError(f"{key} {value} > max {max_v}")
# --------------------------------------------------------------------- #
# Utilities
# --------------------------------------------------------------------- #
def _get_metadata(self) -> dict[str, Any]:
"""
Return diagnostic metadata bundled with each `ProcessedResult`.
"""
with self._lock:
return {
"processor_id": self.processor_id,
"config": self._config.copy(),
"history_count": len(self._sweep_history),
"max_history": self._max_history,
}

View File

@ -1,134 +1,136 @@
"""
Calibration Processor Module
Applies VNA calibrations to sweep data using stored calibration standards.
Supports both S11 and S21 measurement modes with appropriate correction algorithms.
"""
import numpy as np
from typing import List, Tuple
from ..acquisition.sweep_buffer import SweepData
from ..settings.preset_manager import VNAMode
from ..settings.calibration_manager import CalibrationSet, CalibrationStandard
from vna_system.core.logging.logger import get_component_logger
from vna_system.core.acquisition.sweep_buffer import SweepData
from vna_system.core.settings.preset_manager import VNAMode
from vna_system.core.settings.calibration_manager import CalibrationSet, CalibrationStandard
logger = get_component_logger(__file__)
class CalibrationProcessor:
"""
Processes sweep data by applying VNA calibrations.
Apply VNA calibration to raw sweeps.
For S11 mode: Uses OSL (Open-Short-Load) calibration
For S21 mode: Uses Through calibration
Supports:
- S11 (reflection) using OSL (OpenShortLoad) error model
- S21 (transmission) using THRU reference
All operations are vectorized with NumPy and return data as a list of (real, imag) tuples.
"""
def __init__(self):
pass
def apply_calibration(self, sweep_data: SweepData, calibration_set: CalibrationSet) -> List[Tuple[float, float]]:
def apply_calibration(self, sweep_data: SweepData, calibration_set: CalibrationSet) -> list[tuple[float, float]]:
"""
Apply calibration to sweep data and return corrected complex data as list of (real, imag) tuples.
Calibrate a sweep and return corrected complex points.
Args:
sweep_data: Raw sweep data from VNA
calibration_set: Calibration standards data
Parameters
----------
sweep_data
Raw sweep as (real, imag) tuples with metadata.
calibration_set
A complete set of standards for the current VNA mode.
Returns:
List of (real, imag) tuples with calibration applied
Returns
-------
list[tuple[float, float]]
Calibrated complex points as (real, imag) pairs.
Raises:
ValueError: If calibration is incomplete or mode not supported
Raises
------
ValueError
If the calibration set is incomplete or the VNA mode is unsupported.
"""
if not calibration_set.is_complete():
raise ValueError("Calibration set is incomplete")
# Convert sweep data to complex array
raw_signal = self._sweep_to_complex_array(sweep_data)
raw = self._to_complex_array(sweep_data)
# Apply calibration based on measurement mode
if calibration_set.preset.mode == VNAMode.S21:
calibrated_array = self._apply_s21_calibration(raw_signal, calibration_set)
logger.debug("Applying S21 calibration", sweep_number=sweep_data.sweep_number, points=sweep_data.total_points)
calibrated = self._apply_s21(raw, calibration_set)
elif calibration_set.preset.mode == VNAMode.S11:
calibrated_array = self._apply_s11_calibration(raw_signal, calibration_set)
logger.debug("Applying S11 calibration (OSL)", sweep_number=sweep_data.sweep_number, points=sweep_data.total_points)
calibrated = self._apply_s11_osl(raw, calibration_set)
else:
raise ValueError(f"Unsupported measurement mode: {calibration_set.preset.mode}")
# Convert back to list of (real, imag) tuples
return [(complex_val.real, complex_val.imag) for complex_val in calibrated_array]
return [(z.real, z.imag) for z in calibrated]
def _sweep_to_complex_array(self, sweep_data: SweepData) -> np.ndarray:
"""Convert SweepData to complex numpy array."""
complex_data = []
for real, imag in sweep_data.points:
complex_data.append(complex(real, imag))
return np.array(complex_data)
# --------------------------------------------------------------------- #
# Helpers
# --------------------------------------------------------------------- #
@staticmethod
def _to_complex_array(sweep: SweepData) -> np.ndarray:
"""Convert `SweepData.points` to a 1-D complex NumPy array."""
# Using vectorized construction for speed and clarity
if not sweep.points:
return np.empty(0, dtype=np.complex64)
arr = np.asarray(sweep.points, dtype=np.float32)
return arr[:, 0].astype(np.float32) + 1j * arr[:, 1].astype(np.float32)
def _apply_s21_calibration(self, raw_signal: np.ndarray, calibration_set: CalibrationSet) -> np.ndarray:
@staticmethod
def _safe_divide(num: np.ndarray, den: np.ndarray, eps: float = 1e-12) -> np.ndarray:
"""
Apply S21 (transmission) calibration using through standard.
Elementwise complex-safe division with small epsilon guard.
Calibrated_S21 = Raw_Signal / Through_Reference
Avoids division by zero by clamping |den|<eps to eps (preserves phase).
"""
mask = np.abs(den) < eps
if np.any(mask):
den = den.copy()
# Scale only magnitude; keep angle of denominator
den[mask] = eps * np.exp(1j * np.angle(den[mask]))
return num / den
# Get through calibration data
through_sweep = calibration_set.standards[CalibrationStandard.THROUGH]
through_reference = self._sweep_to_complex_array(through_sweep)
# Validate array sizes
if len(raw_signal) != len(through_reference):
raise ValueError("Signal and calibration data have different lengths")
# Avoid division by zero
through_reference = np.where(through_reference == 0, 1e-12, through_reference)
# Apply through calibration
calibrated_signal = raw_signal / through_reference
return calibrated_signal
def _apply_s11_calibration(self, raw_signal: np.ndarray, calibration_set: CalibrationSet) -> np.ndarray:
# --------------------------------------------------------------------- #
# S21 calibration (THRU)
# --------------------------------------------------------------------- #
def _apply_s21(self, raw: np.ndarray, calib: CalibrationSet) -> np.ndarray:
"""
Apply S11 (reflection) calibration using OSL (Open-Short-Load) method.
S21: normalize the DUT response by the THRU reference.
This implements the standard OSL error correction:
- Ed (Directivity): Load standard
- Es (Source Match): Calculated from Open, Short, Load
- Er (Reflection Tracking): Calculated from Open, Short, Load
Final correction: S11 = (Raw - Ed) / (Er + Es * (Raw - Ed))
Calibrated = Raw / Through
"""
through = self._to_complex_array(calib.standards[CalibrationStandard.THROUGH])
if raw.size != through.size:
raise ValueError("Signal and THRU reference have different lengths")
return self._safe_divide(raw, through)
# Get calibration standards
open_sweep = calibration_set.standards[CalibrationStandard.OPEN]
short_sweep = calibration_set.standards[CalibrationStandard.SHORT]
load_sweep = calibration_set.standards[CalibrationStandard.LOAD]
# --------------------------------------------------------------------- #
# S11 calibration (OSL)
# --------------------------------------------------------------------- #
def _apply_s11_osl(self, raw: np.ndarray, calib: CalibrationSet) -> np.ndarray:
"""
S11 OSL correction using a 3-term error model.
# Convert to complex arrays
open_cal = self._sweep_to_complex_array(open_sweep)
short_cal = self._sweep_to_complex_array(short_sweep)
load_cal = self._sweep_to_complex_array(load_sweep)
Error terms
-----------
Ed (directivity) := Load
Es (source match) := (Open + Short - 2*Load) / (Open - Short)
Er (reflection tracking) := -2*(Open - Load)*(Short - Load) / (Open - Short)
# Validate array sizes
if not (len(raw_signal) == len(open_cal) == len(short_cal) == len(load_cal)):
raise ValueError("Signal and calibration data have different lengths")
Final correction
----------------
S11 = (Raw - Ed) / (Er + Es * (Raw - Ed))
"""
open_ref = self._to_complex_array(calib.standards[CalibrationStandard.OPEN])
short_ref = self._to_complex_array(calib.standards[CalibrationStandard.SHORT])
load_ref = self._to_complex_array(calib.standards[CalibrationStandard.LOAD])
# Calculate error terms
directivity = load_cal.copy() # Ed = Load
n = raw.size
if not (open_ref.size == short_ref.size == load_ref.size == n):
raise ValueError("Signal and OSL standards have different lengths")
# Source match: Es = (Open + Short - 2*Load) / (Open - Short)
denominator = open_cal - short_cal
denominator = np.where(np.abs(denominator) < 1e-12, 1e-12, denominator)
source_match = (open_cal + short_cal - 2 * load_cal) / denominator
# Ed
ed = load_ref
# Reflection tracking: Er = -2 * (Open - Load) * (Short - Load) / (Open - Short)
reflection_tracking = -2 * (open_cal - load_cal) * (short_cal - load_cal) / denominator
# Es, Er with guarded denominators
denom = open_ref - short_ref
denom = np.where(np.abs(denom) < 1e-12, 1e-12 * np.exp(1j * np.angle(denom)), denom)
# Apply OSL correction
corrected_numerator = raw_signal - directivity
corrected_denominator = reflection_tracking + source_match * corrected_numerator
es = (open_ref + short_ref - 2.0 * load_ref) / denom
er = -2.0 * (open_ref - load_ref) * (short_ref - load_ref) / denom
# Avoid division by zero
corrected_denominator = np.where(np.abs(corrected_denominator) < 1e-12, 1e-12, corrected_denominator)
calibrated_signal = corrected_numerator / corrected_denominator
return calibrated_signal
num = raw - ed
den = er + es * num
return self._safe_divide(num, den)

View File

@ -1,9 +1,10 @@
{
"y_min": -80,
"y_max": 15,
"y_min": -60,
"y_max": 20,
"smoothing_enabled": false,
"smoothing_window": 5,
"marker_enabled": true,
"marker_frequency": 100000009,
"grid_enabled": true
"marker_enabled": false,
"marker_frequency": 100000009.0,
"grid_enabled": false,
"reset_smoothing": false
}

View File

@ -1,12 +1,12 @@
{
"y_min": -210,
"y_min": -360,
"y_max": 360,
"unwrap_phase": false,
"phase_offset": 0,
"smoothing_enabled": true,
"smoothing_enabled": false,
"smoothing_window": 5,
"marker_enabled": false,
"marker_frequency": "asdasd",
"marker_enabled": true,
"marker_frequency": 8000000000.0,
"reference_line_enabled": false,
"reference_phase": 0,
"grid_enabled": true

View File

@ -1,221 +1,293 @@
import numpy as np
from typing import Dict, Any, List
from pathlib import Path
from typing import Any
from ..base_processor import BaseProcessor, UIParameter
from vna_system.core.logging.logger import get_component_logger
from vna_system.core.processors.base_processor import BaseProcessor, UIParameter
logger = get_component_logger(__file__)
class MagnitudeProcessor(BaseProcessor):
def __init__(self, config_dir: Path):
"""
Compute and visualize magnitude (in dB) over frequency from calibrated sweep data.
Pipeline
--------
1) Derive frequency axis from VNA config (start/stop, N points).
2) Compute |S| in dB per point (20*log10(|complex|), clamped for |complex|==0).
3) Optionally smooth using moving-average (odd window).
4) Provide Plotly configuration including an optional marker.
Notes
-----
- `calibrated_data` is expected to be a `SweepData` with `.points: list[tuple[float,float]]`.
- Marker frequency is validated by BaseProcessor via `UIParameter(options=...)`.
"""
def __init__(self, config_dir: Path) -> None:
super().__init__("magnitude", config_dir)
# State for smoothing that can be reset by button
self._smoothing_history = []
# Internal state that can be reset via a UI "button"
self._smoothing_history: list[float] = []
def process_sweep(self, sweep_data: Any, calibrated_data: Any, vna_config: Dict[str, Any]) -> Dict[str, Any]:
if not calibrated_data or not hasattr(calibrated_data, 'points'):
return {'error': 'No calibrated data available'}
# ------------------------------------------------------------------ #
# Core processing
# ------------------------------------------------------------------ #
def process_sweep(self, sweep_data: Any, calibrated_data: Any, vna_config: dict[str, Any]) -> dict[str, Any]:
"""
Produce magnitude trace (dB) and ancillary info from a calibrated sweep.
frequencies = []
magnitudes_db = []
for i, (real, imag) in enumerate(calibrated_data.points):
complex_val = complex(real, imag)
magnitude_db = 20 * np.log10(abs(complex_val)) if abs(complex_val) > 0 else -120
# Calculate frequency based on VNA config
start_freq = vna_config.get('start_frequency', 100e6)
stop_freq = vna_config.get('stop_frequency', 8.8e9)
total_points = len(calibrated_data.points)
frequency = start_freq + (stop_freq - start_freq) * i / (total_points - 1)
frequencies.append(frequency)
magnitudes_db.append(magnitude_db)
# Apply smoothing if enabled
if self._config.get('smoothing_enabled', False):
window_size = self._config.get('smoothing_window', 5)
magnitudes_db = self._apply_moving_average(magnitudes_db, window_size)
return {
'frequencies': frequencies,
'magnitudes_db': magnitudes_db,
'y_min': self._config.get('y_min', -80),
'y_max': self._config.get('y_max', 10),
'marker_enabled': self._config.get('marker_enabled', True),
'marker_frequency': self._config.get('marker_frequency', frequencies[len(frequencies)//2] if frequencies else 1e9),
'grid_enabled': self._config.get('grid_enabled', True)
}
def generate_plotly_config(self, processed_data: Dict[str, Any], vna_config: Dict[str, Any]) -> Dict[str, Any]:
if 'error' in processed_data:
return {'error': processed_data['error']}
frequencies = processed_data['frequencies']
magnitudes_db = processed_data['magnitudes_db']
# Find marker point
marker_freq = processed_data['marker_frequency']
marker_idx = min(range(len(frequencies)), key=lambda i: abs(frequencies[i] - marker_freq))
marker_mag = magnitudes_db[marker_idx]
traces = [{
'x': [f / 1e9 for f in frequencies], # Convert to GHz
'y': magnitudes_db,
'type': 'scatter',
'mode': 'lines',
'name': 'S11 Magnitude',
'line': {'color': 'blue', 'width': 2}
}]
# Add marker if enabled
if processed_data['marker_enabled']:
traces.append({
'x': [frequencies[marker_idx] / 1e9],
'y': [marker_mag],
'type': 'scatter',
'mode': 'markers',
'name': f'Marker: {frequencies[marker_idx]/1e9:.3f} GHz, {marker_mag:.2f} dB',
'marker': {'color': 'red', 'size': 8, 'symbol': 'circle'}
})
return {
'data': traces,
'layout': {
'title': 'S11 Magnitude Response',
'xaxis': {
'title': 'Frequency (GHz)',
'showgrid': processed_data['grid_enabled']
},
'yaxis': {
'title': 'Magnitude (dB)',
'range': [processed_data['y_min'], processed_data['y_max']],
'showgrid': processed_data['grid_enabled']
},
'hovermode': 'x unified',
'showlegend': True
Returns
-------
dict[str, Any]
{
'frequencies': list[float],
'magnitudes_db': list[float],
'y_min': float,
'y_max': float,
'marker_enabled': bool,
'marker_frequency': float,
'grid_enabled': bool
}
}
"""
if not calibrated_data or not hasattr(calibrated_data, "points"):
logger.warning("No calibrated data available for magnitude processing")
return {"error": "No calibrated data available"}
def get_ui_parameters(self) -> List[UIParameter]:
points: list[tuple[float, float]] = calibrated_data.points # list of (real, imag)
n = len(points)
if n == 0:
logger.warning("Calibrated sweep contains zero points")
return {"error": "Empty calibrated sweep"}
# Frequency axis from VNA config (defaults if not provided)
start_freq = float(vna_config.get("start_frequency", 100e6))
stop_freq = float(vna_config.get("stop_frequency", 8.8e9))
if n == 1:
freqs = [start_freq]
else:
step = (stop_freq - start_freq) / (n - 1)
freqs = [start_freq + i * step for i in range(n)]
# Magnitude in dB (clamp zero magnitude to -120 dB)
mags_db: list[float] = []
for real, imag in points:
mag = abs(complex(real, imag))
mags_db.append(20.0 * np.log10(mag) if mag > 0.0 else -120.0)
# Optional smoothing
if self._config.get("smoothing_enabled", False):
window = int(self._config.get("smoothing_window", 5))
mags_db = self._apply_moving_average(mags_db, window)
result = {
"frequencies": freqs,
"magnitudes_db": mags_db,
"y_min": float(self._config.get("y_min", -80)),
"y_max": float(self._config.get("y_max", 10)),
"marker_enabled": bool(self._config.get("marker_enabled", True)),
"marker_frequency": float(
self._config.get("marker_frequency", freqs[len(freqs) // 2] if freqs else 1e9)
),
"grid_enabled": bool(self._config.get("grid_enabled", True)),
}
logger.debug("Magnitude sweep processed", points=n)
return result
def generate_plotly_config(self, processed_data: dict[str, Any], vna_config: dict[str, Any]) -> dict[str, Any]:
"""
Build a Plotly figure config for the magnitude trace and optional marker.
"""
if "error" in processed_data:
return {"error": processed_data["error"]}
freqs: list[float] = processed_data["frequencies"]
mags_db: list[float] = processed_data["magnitudes_db"]
grid_enabled: bool = processed_data["grid_enabled"]
# Marker resolution
marker_freq: float = processed_data["marker_frequency"]
if freqs:
idx = min(range(len(freqs)), key=lambda i: abs(freqs[i] - marker_freq))
marker_mag = mags_db[idx]
marker_x = freqs[idx] / 1e9
marker_trace = {
"x": [marker_x],
"y": [marker_mag],
"type": "scatter",
"mode": "markers",
"name": f"Marker: {freqs[idx]/1e9:.3f} GHz, {marker_mag:.2f} dB",
"marker": {"color": "red", "size": 8, "symbol": "circle"},
}
else:
idx = 0
marker_trace = None
traces = [
{
"x": [f / 1e9 for f in freqs], # Hz -> GHz
"y": mags_db,
"type": "scatter",
"mode": "lines",
"name": "Magnitude",
"line": {"color": "blue", "width": 2},
}
]
if processed_data["marker_enabled"] and marker_trace:
traces.append(marker_trace)
fig = {
"data": traces,
"layout": {
"title": "Magnitude Response",
"xaxis": {"title": "Frequency (GHz)", "showgrid": grid_enabled},
"yaxis": {
"title": "Magnitude (dB)",
"range": [processed_data["y_min"], processed_data["y_max"]],
"showgrid": grid_enabled,
},
"hovermode": "x unified",
"showlegend": True,
},
}
return fig
# ------------------------------------------------------------------ #
# UI schema
# ------------------------------------------------------------------ #
def get_ui_parameters(self) -> list[UIParameter]:
"""
UI/validation schema.
Conforms to BaseProcessor rules:
- slider: requires dtype + min/max/step alignment checks
- toggle: bool only
- input: numeric only, with {"type": "int"|"float", "min"?, "max"?}
- button: {"action": "..."}; value ignored by validation
"""
return [
UIParameter(
name='y_min',
label='Y Axis Min (dB)',
type='slider',
value=self._config.get('y_min', -80),
options={'min': -120, 'max': 0, 'step': 5}
name="y_min",
label="Y Axis Min (dB)",
type="slider",
value=self._config.get("y_min", -80),
options={"min": -120, "max": 0, "step": 5, "dtype": "int"},
),
UIParameter(
name='y_max',
label='Y Axis Max (dB)',
type='slider',
value=self._config.get('y_max', 10),
options={'min': -20, 'max': 20, 'step': 5}
name="y_max",
label="Y Axis Max (dB)",
type="slider",
value=self._config.get("y_max", 10),
options={"min": -20, "max": 20, "step": 5, "dtype": "int"},
),
UIParameter(
name='smoothing_enabled',
label='Enable Smoothing',
type='toggle',
value=self._config.get('smoothing_enabled', False)
name="smoothing_enabled",
label="Enable Smoothing",
type="toggle",
value=self._config.get("smoothing_enabled", False),
options={},
),
UIParameter(
name='smoothing_window',
label='Smoothing Window Size',
type='slider',
value=self._config.get('smoothing_window', 5),
options={'min': 3, 'max': 21, 'step': 2}
name="smoothing_window",
label="Smoothing Window Size",
type="slider",
value=self._config.get("smoothing_window", 5),
options={"min": 3, "max": 21, "step": 2, "dtype": "int"},
),
UIParameter(
name='marker_enabled',
label='Show Marker',
type='toggle',
value=self._config.get('marker_enabled', True)
name="marker_enabled",
label="Show Marker",
type="toggle",
value=self._config.get("marker_enabled", True),
options={},
),
UIParameter(
name='marker_frequency',
label='Marker Frequency (Hz)',
type='input',
value=self._config.get('marker_frequency', 1e9),
options={'type': 'number', 'min': 100e6, 'max': 8.8e9}
name="marker_frequency",
label="Marker Frequency (Hz)",
type="input",
value=self._config.get("marker_frequency", 1e9),
options={"type": "float", "min": 100e6, "max": 8.8e9},
),
UIParameter(
name='grid_enabled',
label='Show Grid',
type='toggle',
value=self._config.get('grid_enabled', True)
name="grid_enabled",
label="Show Grid",
type="toggle",
value=self._config.get("grid_enabled", True),
options={},
),
UIParameter(
name='reset_smoothing',
label='Reset Smoothing',
type='button',
value=False, # Always False for buttons, will be set to True temporarily when clicked
options={'action': 'Reset the smoothing filter state'}
)
name="reset_smoothing",
label="Reset Smoothing",
type="button",
value=False, # buttons carry no state; ignored by validator
options={"action": "Reset the smoothing filter state"},
),
]
def _get_default_config(self) -> Dict[str, Any]:
def _get_default_config(self) -> dict[str, Any]:
"""Defaults that align with the UI schema above."""
return {
'y_min': -80,
'y_max': 10,
'smoothing_enabled': False,
'smoothing_window': 5,
'marker_enabled': True,
'marker_frequency': 1e9,
'grid_enabled': True
"y_min": -80,
"y_max": 10,
"smoothing_enabled": False,
"smoothing_window": 5,
"marker_enabled": True,
"marker_frequency": 1e9,
"grid_enabled": True,
}
def update_config(self, updates: Dict[str, Any]):
print(f"🔧 update_config called with: {updates}")
# Handle button parameters specially
button_actions = {}
config_updates = {}
# ------------------------------------------------------------------ #
# Config updates & actions
# ------------------------------------------------------------------ #
def update_config(self, updates: dict[str, Any]) -> None:
"""
Apply config updates; handle UI buttons out-of-band.
Any key that corresponds to a button triggers an action when True and
is *not* persisted in the config. Other keys are forwarded to the
BaseProcessor (with type conversion + validation).
"""
ui_params = {param.name: param for param in self.get_ui_parameters()}
button_actions: dict[str, bool] = {}
config_updates: dict[str, Any] = {}
for key, value in updates.items():
# Check if this is a button parameter
ui_params = {param.name: param for param in self.get_ui_parameters()}
if key in ui_params and ui_params[key].type == 'button':
if value: # Button was clicked
button_actions[key] = value
# Don't add button values to config
schema = ui_params.get(key)
if schema and schema.type == "button":
if value:
button_actions[key] = True
else:
config_updates[key] = value
# Update config with non-button parameters
if config_updates:
super().update_config(config_updates)
# Handle button actions
for action, pressed in button_actions.items():
if pressed and action == 'reset_smoothing':
# Reset smoothing state (could be a counter, filter state, etc.)
self._smoothing_history = [] # Reset any internal smoothing state
print(f"🔄 Smoothing state reset by button action")
# Note: recalculate() will be called automatically by the processing system
# Execute button actions
if button_actions.get("reset_smoothing"):
self._smoothing_history.clear()
logger.info("Smoothing state reset via UI button")
def _validate_config(self):
required_keys = ['y_min', 'y_max', 'smoothing_enabled', 'smoothing_window',
'marker_enabled', 'marker_frequency', 'grid_enabled']
# ------------------------------------------------------------------ #
# Smoothing
# ------------------------------------------------------------------ #
@staticmethod
def _apply_moving_average(data: list[float], window_size: int) -> list[float]:
"""
Centered moving average with clamped edges.
for key in required_keys:
if key not in self._config:
raise ValueError(f"Missing required config key: {key}")
if self._config['y_min'] >= self._config['y_max']:
raise ValueError("y_min must be less than y_max")
if self._config['smoothing_window'] < 3 or self._config['smoothing_window'] % 2 == 0:
raise ValueError("smoothing_window must be odd and >= 3")
def _apply_moving_average(self, data: List[float], window_size: int) -> List[float]:
if window_size >= len(data):
Requirements
------------
- window_size must be odd and >= 3 (enforced by UI schema).
"""
n = len(data)
if n == 0 or window_size <= 1 or window_size >= n:
return data
smoothed = []
half_window = window_size // 2
for i in range(len(data)):
start_idx = max(0, i - half_window)
end_idx = min(len(data), i + half_window + 1)
smoothed.append(sum(data[start_idx:end_idx]) / (end_idx - start_idx))
return smoothed
half = window_size // 2
out: list[float] = []
for i in range(n):
lo = max(0, i - half)
hi = min(n, i + half + 1)
out.append(sum(data[lo:hi]) / (hi - lo))
return out

View File

@ -1,242 +1,307 @@
import numpy as np
from typing import Dict, Any, List
from pathlib import Path
from typing import Any
from ..base_processor import BaseProcessor, UIParameter
from vna_system.core.logging.logger import get_component_logger
from vna_system.core.processors.base_processor import BaseProcessor, UIParameter
logger = get_component_logger(__file__)
class PhaseProcessor(BaseProcessor):
def __init__(self, config_dir: Path):
"""
Compute and visualize phase (degrees) over frequency from calibrated sweep data.
Pipeline
--------
1) Derive frequency axis from VNA config (start/stop, N points).
2) Compute phase in degrees from complex samples.
3) Optional simple unwrapping (+/-180° jumps) and offset.
4) Optional moving-average smoothing.
5) Provide Plotly configuration including an optional marker and reference line.
"""
def __init__(self, config_dir: Path) -> None:
super().__init__("phase", config_dir)
def process_sweep(self, sweep_data: Any, calibrated_data: Any, vna_config: Dict[str, Any]) -> Dict[str, Any]:
if not calibrated_data or not hasattr(calibrated_data, 'points'):
return {'error': 'No calibrated data available'}
# ------------------------------------------------------------------ #
# Core processing
# ------------------------------------------------------------------ #
def process_sweep(self, sweep_data: Any, calibrated_data: Any, vna_config: dict[str, Any]) -> dict[str, Any]:
"""
Produce phase trace (degrees) and ancillary info from a calibrated sweep.
frequencies = []
phases_deg = []
for i, (real, imag) in enumerate(calibrated_data.points):
complex_val = complex(real, imag)
phase_rad = np.angle(complex_val)
phase_deg = np.degrees(phase_rad)
# Phase unwrapping if enabled
if self._config.get('unwrap_phase', True) and i > 0:
phase_diff = phase_deg - phases_deg[-1]
if phase_diff > 180:
phase_deg -= 360
elif phase_diff < -180:
phase_deg += 360
# Calculate frequency
start_freq = vna_config.get('start_frequency', 100e6)
stop_freq = vna_config.get('stop_frequency', 8.8e9)
total_points = len(calibrated_data.points)
frequency = start_freq + (stop_freq - start_freq) * i / (total_points - 1)
frequencies.append(frequency)
phases_deg.append(phase_deg)
# Apply offset if configured
phase_offset = self._config.get('phase_offset', 0)
if phase_offset != 0:
phases_deg = [phase + phase_offset for phase in phases_deg]
# Apply smoothing if enabled
if self._config.get('smoothing_enabled', False):
window_size = self._config.get('smoothing_window', 5)
phases_deg = self._apply_moving_average(phases_deg, window_size)
return {
'frequencies': frequencies,
'phases_deg': phases_deg,
'y_min': self._config.get('y_min', -180),
'y_max': self._config.get('y_max', 180),
'marker_enabled': self._config.get('marker_enabled', True),
'marker_frequency': self._config.get('marker_frequency', frequencies[len(frequencies)//2] if frequencies else 1e9),
'grid_enabled': self._config.get('grid_enabled', True),
'reference_line_enabled': self._config.get('reference_line_enabled', False),
'reference_phase': self._config.get('reference_phase', 0)
}
def generate_plotly_config(self, processed_data: Dict[str, Any], vna_config: Dict[str, Any]) -> Dict[str, Any]:
if 'error' in processed_data:
return {'error': processed_data['error']}
frequencies = processed_data['frequencies']
phases_deg = processed_data['phases_deg']
# Find marker point
marker_freq = processed_data['marker_frequency']
marker_idx = min(range(len(frequencies)), key=lambda i: abs(frequencies[i] - marker_freq))
marker_phase = phases_deg[marker_idx]
traces = [{
'x': [f / 1e9 for f in frequencies], # Convert to GHz
'y': phases_deg,
'type': 'scatter',
'mode': 'lines',
'name': 'S11 Phase',
'line': {'color': 'green', 'width': 2}
}]
# Add marker if enabled
if processed_data['marker_enabled']:
traces.append({
'x': [frequencies[marker_idx] / 1e9],
'y': [marker_phase],
'type': 'scatter',
'mode': 'markers',
'name': f'Marker: {frequencies[marker_idx]/1e9:.3f} GHz, {marker_phase:.1f}°',
'marker': {'color': 'red', 'size': 8, 'symbol': 'circle'}
})
# Add reference line if enabled
if processed_data['reference_line_enabled']:
traces.append({
'x': [frequencies[0] / 1e9, frequencies[-1] / 1e9],
'y': [processed_data['reference_phase'], processed_data['reference_phase']],
'type': 'scatter',
'mode': 'lines',
'name': f'Reference: {processed_data["reference_phase"]:.1f}°',
'line': {'color': 'gray', 'width': 1, 'dash': 'dash'}
})
return {
'data': traces,
'layout': {
'title': 'S11 Phase Response',
'xaxis': {
'title': 'Frequency (GHz)',
'showgrid': processed_data['grid_enabled']
},
'yaxis': {
'title': 'Phase (degrees)',
'range': [processed_data['y_min'], processed_data['y_max']],
'showgrid': processed_data['grid_enabled']
},
'hovermode': 'x unified',
'showlegend': True
Returns
-------
dict[str, Any]
{
'frequencies': list[float],
'phases_deg': list[float],
'y_min': float,
'y_max': float,
'marker_enabled': bool,
'marker_frequency': float,
'grid_enabled': bool,
'reference_line_enabled': bool,
'reference_phase': float
}
}
"""
if not calibrated_data or not hasattr(calibrated_data, "points"):
logger.warning("No calibrated data available for phase processing")
return {"error": "No calibrated data available"}
def get_ui_parameters(self) -> List[UIParameter]:
points: list[tuple[float, float]] = calibrated_data.points
n = len(points)
if n == 0:
logger.warning("Calibrated sweep contains zero points")
return {"error": "Empty calibrated sweep"}
# Frequency axis from VNA config (defaults if not provided)
start_freq = float(vna_config.get("start_frequency", 100e6))
stop_freq = float(vna_config.get("stop_frequency", 8.8e9))
if n == 1:
freqs = [start_freq]
else:
step = (stop_freq - start_freq) / (n - 1)
freqs = [start_freq + i * step for i in range(n)]
# Phase in degrees
phases_deg: list[float] = []
unwrap = bool(self._config.get("unwrap_phase", True))
for i, (real, imag) in enumerate(points):
z = complex(real, imag)
deg = float(np.degrees(np.angle(z)))
if unwrap and phases_deg:
diff = deg - phases_deg[-1]
if diff > 180.0:
deg -= 360.0
elif diff < -180.0:
deg += 360.0
phases_deg.append(deg)
# Offset
phase_offset = float(self._config.get("phase_offset", 0.0))
if phase_offset:
phases_deg = [p + phase_offset for p in phases_deg]
# Optional smoothing
if self._config.get("smoothing_enabled", False):
window = int(self._config.get("smoothing_window", 5))
phases_deg = self._apply_moving_average(phases_deg, window)
result = {
"frequencies": freqs,
"phases_deg": phases_deg,
"y_min": float(self._config.get("y_min", -180)),
"y_max": float(self._config.get("y_max", 180)),
"marker_enabled": bool(self._config.get("marker_enabled", True)),
"marker_frequency": float(
self._config.get("marker_frequency", freqs[len(freqs) // 2] if freqs else 1e9)
),
"grid_enabled": bool(self._config.get("grid_enabled", True)),
"reference_line_enabled": bool(self._config.get("reference_line_enabled", False)),
"reference_phase": float(self._config.get("reference_phase", 0)),
}
logger.debug("Phase sweep processed", points=n)
return result
def generate_plotly_config(self, processed_data: dict[str, Any], vna_config: dict[str, Any]) -> dict[str, Any]:
"""
Build a Plotly figure config for the phase trace, marker, and optional reference line.
"""
if "error" in processed_data:
return {"error": processed_data["error"]}
freqs: list[float] = processed_data["frequencies"]
phases: list[float] = processed_data["phases_deg"]
grid_enabled: bool = processed_data["grid_enabled"]
# Marker
marker_freq: float = processed_data["marker_frequency"]
if freqs:
idx = min(range(len(freqs)), key=lambda i: abs(freqs[i] - marker_freq))
marker_y = phases[idx]
marker_trace = {
"x": [freqs[idx] / 1e9],
"y": [marker_y],
"type": "scatter",
"mode": "markers",
"name": f"Marker: {freqs[idx]/1e9:.3f} GHz, {marker_y:.1f}°",
"marker": {"color": "red", "size": 8, "symbol": "circle"},
}
else:
marker_trace = None
traces = [
{
"x": [f / 1e9 for f in freqs], # Hz -> GHz
"y": phases,
"type": "scatter",
"mode": "lines",
"name": "Phase",
"line": {"color": "green", "width": 2},
}
]
if processed_data["marker_enabled"] and marker_trace:
traces.append(marker_trace)
# Reference line
if processed_data["reference_line_enabled"] and freqs:
ref = float(processed_data["reference_phase"])
traces.append(
{
"x": [freqs[0] / 1e9, freqs[-1] / 1e9],
"y": [ref, ref],
"type": "scatter",
"mode": "lines",
"name": f"Reference: {ref:.1f}°",
"line": {"color": "gray", "width": 1, "dash": "dash"},
}
)
fig = {
"data": traces,
"layout": {
"title": "Phase Response",
"xaxis": {"title": "Frequency (GHz)", "showgrid": grid_enabled},
"yaxis": {
"title": "Phase (degrees)",
"range": [processed_data["y_min"], processed_data["y_max"]],
"showgrid": grid_enabled,
},
"hovermode": "x unified",
"showlegend": True,
},
}
return fig
# ------------------------------------------------------------------ #
# UI schema
# ------------------------------------------------------------------ #
def get_ui_parameters(self) -> list[UIParameter]:
"""
UI/validation schema (compatible with BaseProcessor's validators).
"""
return [
UIParameter(
name='y_min',
label='Y Axis Min (degrees)',
type='slider',
value=self._config.get('y_min', -180),
options={'min': -360, 'max': 0, 'step': 15}
name="y_min",
label="Y Axis Min (degrees)",
type="slider",
value=self._config.get("y_min", -180),
options={"min": -360, "max": 0, "step": 15, "dtype": "int"},
),
UIParameter(
name='y_max',
label='Y Axis Max (degrees)',
type='slider',
value=self._config.get('y_max', 180),
options={'min': 0, 'max': 360, 'step': 15}
name="y_max",
label="Y Axis Max (degrees)",
type="slider",
value=self._config.get("y_max", 180),
options={"min": 0, "max": 360, "step": 15, "dtype": "int"},
),
UIParameter(
name='unwrap_phase',
label='Unwrap Phase',
type='toggle',
value=self._config.get('unwrap_phase', True)
name="unwrap_phase",
label="Unwrap Phase",
type="toggle",
value=self._config.get("unwrap_phase", True),
options={},
),
UIParameter(
name='phase_offset',
label='Phase Offset (degrees)',
type='slider',
value=self._config.get('phase_offset', 0),
options={'min': -180, 'max': 180, 'step': 5}
name="phase_offset",
label="Phase Offset (degrees)",
type="slider",
value=self._config.get("phase_offset", 0),
options={"min": -180, "max": 180, "step": 5, "dtype": "int"},
),
UIParameter(
name='smoothing_enabled',
label='Enable Smoothing',
type='toggle',
value=self._config.get('smoothing_enabled', False)
name="smoothing_enabled",
label="Enable Smoothing",
type="toggle",
value=self._config.get("smoothing_enabled", False),
options={},
),
UIParameter(
name='smoothing_window',
label='Smoothing Window Size',
type='slider',
value=self._config.get('smoothing_window', 5),
options={'min': 3, 'max': 21, 'step': 2}
name="smoothing_window",
label="Smoothing Window Size",
type="slider",
value=self._config.get("smoothing_window", 5),
options={"min": 3, "max": 21, "step": 2, "dtype": "int"},
),
UIParameter(
name='marker_enabled',
label='Show Marker',
type='toggle',
value=self._config.get('marker_enabled', True)
name="marker_enabled",
label="Show Marker",
type="toggle",
value=self._config.get("marker_enabled", True),
options={},
),
UIParameter(
name='marker_frequency',
label='Marker Frequency (Hz)',
type='input',
value=self._config.get('marker_frequency', 1e9),
options={'type': 'number', 'min': 100e6, 'max': 8.8e9}
name="marker_frequency",
label="Marker Frequency (Hz)",
type="input",
value=self._config.get("marker_frequency", 1e9),
options={"type": "float", "min": 100e6, "max": 8.8e9},
),
UIParameter(
name='reference_line_enabled',
label='Show Reference Line',
type='toggle',
value=self._config.get('reference_line_enabled', False)
name="reference_line_enabled",
label="Show Reference Line",
type="toggle",
value=self._config.get("reference_line_enabled", False),
options={},
),
UIParameter(
name='reference_phase',
label='Reference Phase (degrees)',
type='slider',
value=self._config.get('reference_phase', 0),
options={'min': -180, 'max': 180, 'step': 15}
name="reference_phase",
label="Reference Phase (degrees)",
type="slider",
value=self._config.get("reference_phase", 0),
options={"min": -180, "max": 180, "step": 15, "dtype": "int"},
),
UIParameter(
name='grid_enabled',
label='Show Grid',
type='toggle',
value=self._config.get('grid_enabled', True)
)
name="grid_enabled",
label="Show Grid",
type="toggle",
value=self._config.get("grid_enabled", True),
options={},
),
]
def _get_default_config(self) -> Dict[str, Any]:
def _get_default_config(self) -> dict[str, Any]:
"""Defaults aligned with the UI schema."""
return {
'y_min': -180,
'y_max': 180,
'unwrap_phase': True,
'phase_offset': 0,
'smoothing_enabled': False,
'smoothing_window': 5,
'marker_enabled': True,
'marker_frequency': 1e9,
'reference_line_enabled': False,
'reference_phase': 0,
'grid_enabled': True
"y_min": -180,
"y_max": 180,
"unwrap_phase": True,
"phase_offset": 0,
"smoothing_enabled": False,
"smoothing_window": 5,
"marker_enabled": True,
"marker_frequency": 1e9,
"reference_line_enabled": False,
"reference_phase": 0,
"grid_enabled": True,
}
def _validate_config(self):
required_keys = ['y_min', 'y_max', 'unwrap_phase', 'phase_offset',
'smoothing_enabled', 'smoothing_window', 'marker_enabled',
'marker_frequency', 'reference_line_enabled', 'reference_phase', 'grid_enabled']
# ------------------------------------------------------------------ #
# Smoothing
# ------------------------------------------------------------------ #
@staticmethod
def _apply_moving_average(data: list[float], window_size: int) -> list[float]:
"""
Centered moving average with clamped edges.
for key in required_keys:
if key not in self._config:
raise ValueError(f"Missing required config key: {key}")
if self._config['y_min'] >= self._config['y_max']:
raise ValueError("y_min must be less than y_max")
if self._config['smoothing_window'] < 3 or self._config['smoothing_window'] % 2 == 0:
raise ValueError("smoothing_window must be odd and >= 3")
def _apply_moving_average(self, data: List[float], window_size: int) -> List[float]:
if window_size >= len(data):
Requirements
------------
- `window_size` must be odd and >= 3 (enforced by UI schema).
"""
n = len(data)
if n == 0 or window_size <= 1 or window_size >= n:
return data
smoothed = []
half_window = window_size // 2
for i in range(len(data)):
start_idx = max(0, i - half_window)
end_idx = min(len(data), i + half_window + 1)
smoothed.append(sum(data[start_idx:end_idx]) / (end_idx - start_idx))
return smoothed
half = window_size // 2
out: list[float] = []
for i in range(n):
lo = max(0, i - half)
hi = min(n, i + half + 1)
out.append(sum(data[lo:hi]) / (hi - lo))
return out

View File

@ -1,74 +1,139 @@
from typing import Dict, List, Optional, Any, Callable
import threading
import logging
from pathlib import Path
from typing import Any, Callable
from vna_system.core.settings.preset_manager import ConfigPreset
import threading
from vna_system.core.logging.logger import get_component_logger
from vna_system.core.processors.base_processor import BaseProcessor, ProcessedResult
from vna_system.core.processors.calibration_processor import CalibrationProcessor
from vna_system.core.acquisition.sweep_buffer import SweepBuffer, SweepData
from vna_system.core.settings.preset_manager import ConfigPreset
from vna_system.core.settings.settings_manager import VNASettingsManager
logger = get_component_logger(__file__)
class ProcessorManager:
def __init__(self, sweep_buffer: SweepBuffer, settings_manager: VNASettingsManager, config_dir: Path):
self.config_dir = config_dir
self._processors: Dict[str, BaseProcessor] = {}
self._lock = threading.RLock()
self._result_callbacks: List[Callable[[str, ProcessedResult], None]] = []
self.logger = logging.getLogger(__name__)
"""
Orchestrates VNA processors and pushes sweeps through them in the background.
# Data acquisition integration
self.sweep_buffer: SweepBuffer = sweep_buffer
Responsibilities
----------------
• Keep a registry of `BaseProcessor` instances (add/get/list).
• Subscribe result callbacks that receive every `ProcessedResult`.
• Watch the `SweepBuffer` and, when a new sweep arrives:
1) Optionally apply calibration.
2) Fetch current VNA preset/config.
3) Feed the sweep to every registered processor.
4) Fan-out results to callbacks.
• Offer on-demand (re)calculation for a specific processor (with config updates).
Threading model
---------------
A single background thread (`_processing_loop`) polls the `SweepBuffer` for the
newest sweep. Access to internal state (processors, callbacks) is guarded by
`_lock` when mutation is possible.
"""
def __init__(self, sweep_buffer: SweepBuffer, settings_manager: VNASettingsManager, config_dir: Path) -> None:
# External deps
self.sweep_buffer = sweep_buffer
self.settings_manager = settings_manager
self.config_dir = config_dir
# Registry & fan-out
self._processors: dict[str, BaseProcessor] = {}
self._result_callbacks: list[Callable[[str, ProcessedResult], None]] = []
# Concurrency
self._lock = threading.RLock()
self._running = False
self._thread: Optional[threading.Thread] = None
self._thread: threading.Thread | None = None
self._stop_event = threading.Event()
# Sweep progress
self._last_processed_sweep = 0
# Calibration processor for applying calibrations
# Calibration facility
self.calibration_processor = CalibrationProcessor()
self.settings_manager = settings_manager
# Register default processors
# Default processors (safe to skip if missing)
self._register_default_processors()
logger.debug(
"ProcessorManager initialized",
processors=list(self._processors.keys()),
config_dir=str(self.config_dir),
)
def register_processor(self, processor: BaseProcessor):
# --------------------------------------------------------------------- #
# Registry
# --------------------------------------------------------------------- #
def register_processor(self, processor: BaseProcessor) -> None:
"""Register (or replace) a processor by its `processor_id`."""
with self._lock:
self._processors[processor.processor_id] = processor
self.logger.info(f"Registered processor: {processor.processor_id}")
logger.info("Processor registered", processor_id=processor.processor_id)
def get_processor(self, processor_id: str) -> Optional[BaseProcessor]:
def get_processor(self, processor_id: str) -> BaseProcessor | None:
"""Return a processor instance by id, or None if not found."""
return self._processors.get(processor_id)
def list_processors(self) -> List[str]:
def list_processors(self) -> list[str]:
"""Return a stable snapshot list of registered processor ids."""
return list(self._processors.keys())
def add_result_callback(self, callback: Callable[[str, ProcessedResult], None]):
print("adding callback")
self._result_callbacks.append(callback)
# --------------------------------------------------------------------- #
# Results fan-out
# --------------------------------------------------------------------- #
def add_result_callback(self, callback: Callable[[str, ProcessedResult], None]) -> None:
"""
Add a callback invoked for every produced result.
def process_sweep(self, sweep_data: SweepData, calibrated_data: Any, vna_config: ConfigPreset | None):
results = {}
print(f"Processing sweep {sweep_data.sweep_number=}")
Callback signature:
(processor_id: str, result: ProcessedResult) -> None
"""
with self._lock:
for processor_id, processor in self._processors.items():
try:
result = processor.add_sweep_data(sweep_data, calibrated_data, vna_config)
if result:
results[processor_id] = result
for callback in self._result_callbacks:
try:
callback(processor_id, result)
except Exception as e:
self.logger.error(f"Callback error for {processor_id}: {e}")
self._result_callbacks.append(callback)
logger.debug("Result callback added", callbacks=len(self._result_callbacks))
except Exception as e:
self.logger.error(f"Processing error in {processor_id}: {e}")
# --------------------------------------------------------------------- #
# Main processing actions
# --------------------------------------------------------------------- #
def process_sweep(self, sweep_data: SweepData, calibrated_data: Any, vna_config: ConfigPreset | None) -> dict[str, ProcessedResult]:
"""
Feed a sweep into all processors and dispatch results to callbacks.
Returns a map {processor_id: ProcessedResult} for successfully computed processors.
"""
results: dict[str, ProcessedResult] = {}
with self._lock:
# Snapshot to avoid holding the lock while user callbacks run
processors_items = list(self._processors.items())
callbacks = list(self._result_callbacks)
for processor_id, processor in processors_items:
try:
result = processor.add_sweep_data(sweep_data, calibrated_data, vna_config)
if result:
results[processor_id] = result
for cb in callbacks:
try:
cb(processor_id, result)
except Exception as exc: # noqa: BLE001
logger.error("Result callback failed", processor_id=processor_id, error=repr(exc))
except Exception as exc: # noqa: BLE001
logger.error("Processing error", processor_id=processor_id, error=repr(exc))
return results
def recalculate_processor(self, processor_id: str, config_updates: Optional[Dict[str, Any]] = None) -> Optional[ProcessedResult]:
def recalculate_processor(self, processor_id: str, config_updates: dict[str, Any] | None = None) -> ProcessedResult | None:
"""
Recalculate a single processor with optional config updates.
- If `config_updates` is provided, they are applied and validated first.
- The latest sweep (from the processor's own history) is used for recomputation.
- Result callbacks are invoked if a result is produced.
"""
processor = self.get_processor(processor_id)
if not processor:
raise ValueError(f"Processor {processor_id} not found")
@ -79,111 +144,134 @@ class ProcessorManager:
result = processor.recalculate()
if result:
for callback in self._result_callbacks:
with self._lock:
callbacks = list(self._result_callbacks)
for cb in callbacks:
try:
callback(processor_id, result)
except Exception as e:
self.logger.error(f"Callback error for {processor_id}: {e}")
cb(processor_id, result)
except Exception as exc: # noqa: BLE001
logger.error("Result callback failed", processor_id=processor_id, error=repr(exc))
return result
except Exception as e:
self.logger.error(f"Recalculation error in {processor_id}: {e}")
except Exception as exc: # noqa: BLE001
logger.error("Recalculation error", processor_id=processor_id, error=repr(exc))
raise
def get_processor_ui_parameters(self, processor_id: str):
processor = self.get_processor(processor_id)
if not processor:
raise ValueError(f"Processor {processor_id} not found")
return [param.__dict__ for param in processor.get_ui_parameters()]
def _register_default_processors(self):
"""Register default processors"""
try:
from .implementations import MagnitudeProcessor, PhaseProcessor, SmithChartProcessor
magnitude_processor = MagnitudeProcessor(self.config_dir)
self.register_processor(magnitude_processor)
phase_processor = PhaseProcessor(self.config_dir)
self.register_processor(phase_processor)
smith_processor = SmithChartProcessor(self.config_dir)
self.register_processor(smith_processor)
self.logger.info("Default processors registered successfully")
except Exception as e:
self.logger.error(f"Failed to register default processors: {e}")
def set_sweep_buffer(self, sweep_buffer: SweepBuffer):
"""Set the sweep buffer for data acquisition integration"""
# --------------------------------------------------------------------- #
# Runtime control
# --------------------------------------------------------------------- #
def set_sweep_buffer(self, sweep_buffer: SweepBuffer) -> None:
"""Swap the underlying sweep buffer (takes effect immediately)."""
self.sweep_buffer = sweep_buffer
logger.info("Sweep buffer updated")
def start_processing(self):
"""Start background processing of sweep data"""
if self._running or not self.sweep_buffer:
def start_processing(self) -> None:
"""
Start the background processing thread.
Safe to call multiple times (no-op if already running).
"""
if self._running:
logger.debug("start_processing ignored; already running")
return
self._running = True
self._stop_event.clear()
self._thread = threading.Thread(target=self._processing_loop, daemon=True)
self._thread = threading.Thread(target=self._processing_loop, daemon=True, name="VNA-ProcessorManager")
self._thread.start()
self.logger.info("Processor manager started")
logger.info("Processor manager started")
def stop_processing(self):
"""Stop background processing"""
def stop_processing(self) -> None:
"""
Stop the background thread and wait briefly for it to join.
"""
if not self._running:
logger.debug("stop_processing ignored; not running")
return
self._running = False
self._stop_event.set()
if self._thread:
self._thread.join(timeout=1.0)
self.logger.info("Processor manager stopped")
logger.info("Processor manager stopped")
def _processing_loop(self):
"""Main processing loop"""
# --------------------------------------------------------------------- #
# Background loop
# --------------------------------------------------------------------- #
def _processing_loop(self) -> None:
"""
Poll the sweep buffer and process any new sweep.
The loop:
• Grabs the latest sweep.
• Skips if it's already processed.
• Applies calibration (when available).
• Retrieves current VNA preset.
• Sends the sweep to all processors.
• Sleeps briefly to keep CPU usage low.
"""
while self._running and not self._stop_event.is_set():
try:
latest_sweep = self.sweep_buffer.get_latest_sweep()
latest = self.sweep_buffer.get_latest_sweep()
if latest and latest.sweep_number > self._last_processed_sweep:
calibrated = self._apply_calibration(latest)
vna_cfg = self.settings_manager.get_current_preset()
self.process_sweep(latest, calibrated, vna_cfg)
self._last_processed_sweep = latest.sweep_number
if latest_sweep and latest_sweep.sweep_number > self._last_processed_sweep:
# Apply calibration
calibrated_data = self._apply_calibration(latest_sweep)
# Get VNA configuration
vna_config = self.settings_manager.get_current_preset()
# Process through all processors (results handled by callbacks)
self.process_sweep(latest_sweep, calibrated_data, vna_config)
self._last_processed_sweep = latest_sweep.sweep_number
# Check every 50ms
# Light-duty polling to reduce wakeups
self._stop_event.wait(0.05)
except Exception as e:
self.logger.error(f"Error in processing loop: {e}")
except Exception as exc: # noqa: BLE001
logger.error("Error in processing loop", error=repr(exc))
self._stop_event.wait(0.1)
# --------------------------------------------------------------------- #
# Calibration
# --------------------------------------------------------------------- #
def _apply_calibration(self, sweep_data: SweepData) -> SweepData:
"""Apply calibration to sweep data"""
"""
Apply calibration to the sweep when a complete set is available.
Returns the original sweep on failure or when no calibration is present.
"""
try:
# Get current calibration set through settings manager
calibration_set = self.settings_manager.get_current_calibration()
if calibration_set and calibration_set.is_complete():
# Apply calibration using our calibration processor
calibrated_points = self.calibration_processor.apply_calibration(sweep_data, calibration_set)
return SweepData(
calib_set = self.settings_manager.get_current_calibration()
if calib_set and calib_set.is_complete():
points = self.calibration_processor.apply_calibration(sweep_data, calib_set)
calibrated = SweepData(
sweep_number=sweep_data.sweep_number,
timestamp=sweep_data.timestamp,
points=calibrated_points,
total_points=len(calibrated_points)
points=points,
total_points=len(points),
)
except Exception as e:
self.logger.error(f"Calibration failed: {e}")
logger.debug(
"Sweep calibrated",
sweep_number=calibrated.sweep_number,
points=calibrated.total_points,
)
return calibrated
except Exception as exc: # noqa: BLE001
logger.error("Calibration failed", error=repr(exc))
# Return original data if calibration fails or not available
# Fallback: return the original data
return sweep_data
# --------------------------------------------------------------------- #
# Defaults
# --------------------------------------------------------------------- #
def _register_default_processors(self) -> None:
"""
Attempt to import and register default processors.
This is best-effort: if anything fails (e.g., module not present),
we log an error and keep going with whatever is available.
"""
try:
from .implementations import MagnitudeProcessor, PhaseProcessor, SmithChartProcessor
self.register_processor(MagnitudeProcessor(self.config_dir))
self.register_processor(PhaseProcessor(self.config_dir))
# self.register_processor(SmithChartProcessor(self.config_dir))
logger.info("Default processors registered", count=len(self._processors))
except Exception as exc: # noqa: BLE001
logger.error("Failed to register default processors", error=repr(exc))

View File

@ -1,94 +1,114 @@
from typing import Dict, Any, Set, Optional
import json
import asyncio
import logging
import json
from dataclasses import asdict
from datetime import datetime
from typing import Any
from fastapi import WebSocket, WebSocketDisconnect
from vna_system.core.logging.logger import get_component_logger
from vna_system.core.processors.base_processor import ProcessedResult
from vna_system.core.processors.manager import ProcessorManager
from vna_system.core.processors.storage import DataStorage
logger = get_component_logger(__file__)
class ProcessorWebSocketHandler:
"""
Handles incoming websocket messages and broadcasts processor results
to all connected clients. Safe to call from non-async threads via
_broadcast_result_sync().
WebSocket hub for processor results.
Responsibilities
----------------
• Accept and manage client connections.
• Handle client commands (e.g., recalculate, fetch history).
• Broadcast `ProcessedResult` objects to all connected clients.
• Bridge results produced on worker threads into the main asyncio loop.
Threading model
---------------
- Processor callbacks arrive on non-async worker threads.
- We capture the main running event loop when the first client connects.
- Cross-thread scheduling uses `asyncio.run_coroutine_threadsafe`.
"""
def __init__(self, processor_manager: ProcessorManager, data_storage: DataStorage):
def __init__(self, processor_manager: ProcessorManager, data_storage: DataStorage) -> None:
self.processor_manager = processor_manager
self.data_storage = data_storage
self.active_connections: Set[WebSocket] = set()
self.logger = logging.getLogger(__name__)
# Главный (running) event loop FastAPI/uvicorn.
# Устанавливается при принятии первого соединения.
self._loop: Optional[asyncio.AbstractEventLoop] = None
self.active_connections: set[WebSocket] = set()
# Регистрируемся как колбэк на готовые результаты процессоров
# Main FastAPI/uvicorn event loop handle (set on first connection).
self._loop: asyncio.AbstractEventLoop | None = None
# Subscribe to processor results (thread-safe callback).
self.processor_manager.add_result_callback(self._on_processor_result)
logger.debug("ProcessorWebSocketHandler initialized")
# --------------- Публичные async-обработчики входящих сообщений ---------------
# --------------------------------------------------------------------- #
# Connection lifecycle
# --------------------------------------------------------------------- #
async def handle_websocket_connection(self, websocket: WebSocket) -> None:
"""
Accept a WebSocket, then serve client messages until disconnect.
async def handle_websocket_connection(self, websocket: WebSocket):
Stores a reference to the running event loop so worker threads can
safely schedule broadcasts into it.
"""
Accepts a websocket and serves messages until disconnect.
Сохраняет ссылку на главный running loop, чтобы из других потоков
можно было безопасно шедулить корутины.
"""
# Сохраним ссылку на активный loop (гарантированно внутри async-контекста)
if self._loop is None:
try:
self._loop = asyncio.get_running_loop()
self.logger.info("Stored main event loop reference for broadcasting")
except RuntimeError:
# Теоретически маловероятно здесь
self.logger.warning("Could not obtain running loop; broadcasts may be skipped")
self._loop = asyncio.get_running_loop()
logger.info("Main event loop captured for broadcasts")
await websocket.accept()
self.active_connections.add(websocket)
self.logger.info(f"WebSocket connected. Total connections: {len(self.active_connections)}")
logger.info("WebSocket connected", total_connections=len(self.active_connections))
try:
while True:
data = await websocket.receive_text()
await self.handle_message(websocket, data)
raw = await websocket.receive_text()
await self.handle_message(websocket, raw)
except WebSocketDisconnect:
await self.disconnect(websocket)
except Exception as e:
self.logger.error(f"WebSocket error: {e}")
except Exception as exc: # noqa: BLE001
logger.error("WebSocket error", error=repr(exc))
await self.disconnect(websocket)
async def handle_message(self, websocket: WebSocket, data: str):
try:
message = json.loads(data)
message_type = message.get('type')
if message_type == 'recalculate':
await self._handle_recalculate(websocket, message)
elif message_type == 'get_history':
await self._handle_get_history(websocket, message)
else:
await self._send_error(websocket, f"Unknown message type: {message_type}")
except json.JSONDecodeError:
await self._send_error(websocket, "Invalid JSON format")
except Exception as e:
self.logger.exception("Error handling websocket message")
await self._send_error(websocket, f"Internal error: {str(e)}")
async def disconnect(self, websocket: WebSocket):
async def disconnect(self, websocket: WebSocket) -> None:
"""Remove a connection and log the updated count."""
if websocket in self.active_connections:
self.active_connections.remove(websocket)
self.logger.info(f"WebSocket disconnected. Total connections: {len(self.active_connections)}")
logger.info("WebSocket disconnected", total_connections=len(self.active_connections))
# --------------- Команды клиента ---------------
# --------------------------------------------------------------------- #
# Inbound messages
# --------------------------------------------------------------------- #
async def handle_message(self, websocket: WebSocket, data: str) -> None:
"""Parse and route an inbound client message."""
try:
message = json.loads(data)
mtype = message.get("type")
async def _handle_recalculate(self, websocket: WebSocket, message: Dict[str, Any]):
processor_id = message.get('processor_id')
config_updates = message.get('config_updates')
if mtype == "recalculate":
await self._handle_recalculate(websocket, message)
elif mtype == "get_history":
await self._handle_get_history(websocket, message)
else:
await self._send_error(websocket, f"Unknown message type: {mtype!r}")
except json.JSONDecodeError:
await self._send_error(websocket, "Invalid JSON format")
except Exception as exc: # noqa: BLE001
logger.error("Error handling websocket message")
await self._send_error(websocket, f"Internal error: {exc}")
# --------------------------------------------------------------------- #
# Client commands
# --------------------------------------------------------------------- #
async def _handle_recalculate(self, websocket: WebSocket, message: dict[str, Any]) -> None:
"""
Recalculate a processor (optionally with config updates) and send the result back.
"""
processor_id = message.get("processor_id")
config_updates = message.get("config_updates")
if not processor_id:
await self._send_error(websocket, "processor_id is required")
@ -97,18 +117,19 @@ class ProcessorWebSocketHandler:
try:
result = self.processor_manager.recalculate_processor(processor_id, config_updates)
if result:
response = self._result_to_message(processor_id, result)
await websocket.send_text(json.dumps(response))
await websocket.send_text(json.dumps(self._result_to_message(processor_id, result)))
else:
await self._send_error(websocket, f"No result from processor {processor_id}")
except Exception as exc: # noqa: BLE001
logger.error("Recalculation failed")
await self._send_error(websocket, f"Recalculation failed: {exc}")
except Exception as e:
self.logger.exception("Recalculation failed")
await self._send_error(websocket, f"Recalculation failed: {str(e)}")
async def _handle_get_history(self, websocket: WebSocket, message: Dict[str, Any]):
processor_id = message.get('processor_id')
limit = message.get('limit', 10)
async def _handle_get_history(self, websocket: WebSocket, message: dict[str, Any]) -> None:
"""
Fetch recent results history for a given processor and send it to the client.
"""
processor_id = message.get("processor_id")
limit = int(message.get("limit", 10))
if not processor_id:
await self._send_error(websocket, "processor_id is required")
@ -117,108 +138,114 @@ class ProcessorWebSocketHandler:
try:
history = self.data_storage.get_results_history(processor_id, limit)
response = {
'type': 'processor_history',
'processor_id': processor_id,
'history': [
"type": "processor_history",
"processor_id": processor_id,
"history": [
{
'timestamp': r.timestamp,
'data': r.data,
'plotly_config': r.plotly_config,
'metadata': r.metadata
"timestamp": r.timestamp,
"data": r.data,
"plotly_config": r.plotly_config,
"metadata": r.metadata,
}
for r in history
]
],
}
await websocket.send_text(json.dumps(response))
except Exception as exc: # noqa: BLE001
logger.error("Error getting history")
await self._send_error(websocket, f"Error getting history: {exc}")
except Exception as e:
self.logger.exception("Error getting history")
await self._send_error(websocket, f"Error getting history: {str(e)}")
# --------------- Служебные методы ---------------
def _result_to_message(self, processor_id: str, result: ProcessedResult) -> Dict[str, Any]:
# --------------------------------------------------------------------- #
# Outbound helpers
# --------------------------------------------------------------------- #
def _result_to_message(self, processor_id: str, result: ProcessedResult) -> dict[str, Any]:
"""Convert a `ProcessedResult` into a JSON-serializable message."""
return {
'type': 'processor_result',
'processor_id': processor_id,
'timestamp': result.timestamp,
'data': result.data,
'plotly_config': result.plotly_config,
'ui_parameters': [param.__dict__ for param in result.ui_parameters],
'metadata': result.metadata
"type": "processor_result",
"processor_id": processor_id,
"timestamp": result.timestamp,
"data": result.data,
"plotly_config": result.plotly_config,
"ui_parameters": [asdict(param) for param in result.ui_parameters],
"metadata": result.metadata,
}
async def _send_error(self, websocket: WebSocket, message: str):
async def _send_error(self, websocket: WebSocket, message: str) -> None:
"""Send a standardized error payload to a single client."""
try:
response = {
'type': 'error',
'message': message,
'timestamp': datetime.now().timestamp()
payload = {
"type": "error",
"message": message,
"timestamp": datetime.now().timestamp(),
}
await websocket.send_text(json.dumps(response))
except Exception as e:
self.logger.error(f"Error sending error message: {e}")
await websocket.send_text(json.dumps(payload))
except Exception as exc: # noqa: BLE001
logger.error("Error sending error message", error=repr(exc))
# --------------- Получение результатов из процессоров (из другого потока) ---------------
# --------------------------------------------------------------------- #
# Result callback bridge (worker thread -> asyncio loop)
# --------------------------------------------------------------------- #
def _on_processor_result(self, processor_id: str, result: ProcessedResult) -> None:
"""
Callback invoked on a worker thread when a processor produces a result.
def _on_processor_result(self, processor_id: str, result: ProcessedResult):
We:
- Store the result synchronously in `DataStorage`.
- Schedule a coroutine on the main event loop to broadcast to clients.
"""
Колбэк вызывается из потока обработки свипов (не из asyncio loop).
Здесь нельзя напрямую await'ить — нужно перепоручить рассылку в главный loop.
"""
# Сохраняем результат в хранилище (синхронно)
# Best-effort persistence
try:
self.data_storage.store_result(processor_id, result)
except Exception:
self.logger.exception("Failed to store processor result")
except Exception: # noqa: BLE001
logger.error("Failed to store processor result")
# Рассылаем клиентам
# Broadcast to clients
self._broadcast_result_sync(processor_id, result)
def _broadcast_result_sync(self, processor_id: str, result: ProcessedResult):
def _broadcast_result_sync(self, processor_id: str, result: ProcessedResult) -> None:
"""
Потокобезопасная рассылка в активный event loop.
Вызывается из НЕ-async потока.
Thread-safe broadcast entrypoint from worker threads.
Serializes once and schedules `_send_to_connections(...)` on the main loop.
"""
if not self.active_connections:
return
# Подготовим строку JSON один раз
message_str = json.dumps(self._result_to_message(processor_id, result))
loop = self._loop
if loop is None or not loop.is_running():
# Луп ещё не был сохранён (нет подключений) или уже остановлен
self.logger.debug("No running event loop available for broadcast; skipping")
logger.debug("No running event loop available for broadcast; skipping")
return
try:
# Перекидываем корутину в главный loop из стороннего потока
fut = asyncio.run_coroutine_threadsafe(self._send_to_connections(message_str), loop)
# Опционально: можно добавить обработку результата/исключений:
# fut.add_done_callback(lambda f: f.exception() and self.logger.error(f"Broadcast error: {f.exception()}"))
except Exception as e:
self.logger.error(f"Failed to schedule broadcast: {e}")
message_str = json.dumps(self._result_to_message(processor_id, result))
except Exception as exc: # noqa: BLE001
logger.error(f"Failed to serialize result for broadcast, {processor_id=}", error=repr(exc))
return
async def _send_to_connections(self, message_str: str):
try:
asyncio.run_coroutine_threadsafe(self._send_to_connections(message_str), loop)
except Exception as exc: # noqa: BLE001
logger.error("Failed to schedule broadcast", error=repr(exc))
async def _send_to_connections(self, message_str: str) -> None:
"""
Реальная рассылка по всем активным соединениям (внутри event loop).
Broadcast a pre-serialized JSON string to all active connections.
Removes connections that fail during send.
"""
if not self.active_connections:
return
disconnected = []
# Снимок, чтобы итерация была стабильной
for websocket in list(self.active_connections):
disconnected: list[WebSocket] = []
for websocket in list(self.active_connections): # snapshot
try:
await websocket.send_text(message_str)
except Exception as e:
self.logger.error(f"Error broadcasting to a websocket: {e}")
except Exception as exc: # noqa: BLE001
logger.error("Broadcast to client failed; marking for disconnect", error=repr(exc))
disconnected.append(websocket)
# Очистим отключившиеся
for websocket in disconnected:
try:
await self.disconnect(websocket)
except Exception as e:
self.logger.error(f"Error during disconnect cleanup: {e}")
except Exception as exc: # noqa: BLE001
logger.error("Error during disconnect cleanup", error=repr(exc))

View File

@ -1,18 +1,19 @@
from __future__ import annotations
import json
import shutil
from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import Dict, List
from vna_system.core import config as cfg
from vna_system.core.logging.logger import get_component_logger
from vna_system.core.acquisition.sweep_buffer import SweepData
from .preset_manager import ConfigPreset, VNAMode
from vna_system.core.settings.preset_manager import ConfigPreset, VNAMode
logger = get_component_logger(__file__)
class CalibrationStandard(Enum):
"""Supported calibration standards."""
OPEN = "open"
SHORT = "short"
LOAD = "load"
@ -20,291 +21,354 @@ class CalibrationStandard(Enum):
class CalibrationSet:
def __init__(self, preset: ConfigPreset, name: str = ""):
"""
In-memory container of calibration measurements for a specific VNA preset.
A set is *complete* when all standards required by the preset mode are present:
- S11: OPEN, SHORT, LOAD
- S21: THROUGH
"""
def __init__(self, preset: ConfigPreset, name: str = "") -> None:
self.preset = preset
self.name = name
self.standards: Dict[CalibrationStandard, SweepData] = {}
self.standards: dict[CalibrationStandard, SweepData] = {}
def add_standard(self, standard: CalibrationStandard, sweep_data: SweepData):
"""Add calibration data for specific standard"""
# ------------------------------ mutation ------------------------------ #
def add_standard(self, standard: CalibrationStandard, sweep_data: SweepData) -> None:
"""Attach sweep data for a given standard."""
self.standards[standard] = sweep_data
logger.debug("Calibration standard added", standard=standard.value, points=sweep_data.total_points)
def remove_standard(self, standard: CalibrationStandard):
"""Remove calibration data for specific standard"""
def remove_standard(self, standard: CalibrationStandard) -> None:
"""Remove sweep data for a given standard if present."""
if standard in self.standards:
del self.standards[standard]
logger.debug("Calibration standard removed", standard=standard.value)
# ------------------------------ queries -------------------------------- #
def has_standard(self, standard: CalibrationStandard) -> bool:
"""Check if standard is present in calibration set"""
"""Return True if the standard is present in the set."""
return standard in self.standards
def is_complete(self) -> bool:
"""Check if all required standards are present"""
required_standards = self._get_required_standards()
return all(std in self.standards for std in required_standards)
"""Return True if all required standards for the preset mode are present."""
required = self._get_required_standards()
complete = all(s in self.standards for s in required)
logger.debug("Calibration completeness checked", complete=complete, required=[s.value for s in required])
return complete
def get_missing_standards(self) -> List[CalibrationStandard]:
"""Get list of missing required standards"""
required_standards = self._get_required_standards()
return [std for std in required_standards if std not in self.standards]
def get_missing_standards(self) -> list[CalibrationStandard]:
"""List the standards still missing for a complete set."""
required = self._get_required_standards()
return [s for s in required if s not in self.standards]
def get_progress(self) -> tuple[int, int]:
"""Get calibration progress as (completed, total)"""
required_standards = self._get_required_standards()
completed = len([std for std in required_standards if std in self.standards])
return completed, len(required_standards)
"""Return (completed, total_required) for progress display."""
required = self._get_required_standards()
completed = sum(1 for s in required if s in self.standards)
return completed, len(required)
def _get_required_standards(self) -> List[CalibrationStandard]:
"""Get required calibration standards for preset mode"""
# ------------------------------ internals ------------------------------ #
def _get_required_standards(self) -> list[CalibrationStandard]:
"""Standards required by the current preset mode."""
if self.preset.mode == VNAMode.S11:
return [CalibrationStandard.OPEN, CalibrationStandard.SHORT, CalibrationStandard.LOAD]
elif self.preset.mode == VNAMode.S21:
if self.preset.mode == VNAMode.S21:
return [CalibrationStandard.THROUGH]
return []
class CalibrationManager:
def __init__(self, base_dir: Path | None = None):
"""
Filesystem-backed manager for calibration sets.
Layout
------
<BASE_DIR>/calibration/
├─ current_calibration -> <preset_dir>/<calibration_name>
├─ <preset_name>/
│ └─ <calibration_name>/
│ ├─ open.json / short.json / load.json / through.json
│ ├─ open_metadata.json ... (per-standard metadata)
│ └─ calibration_info.json (set-level metadata)
"""
def __init__(self, base_dir: Path | None = None) -> None:
self.base_dir = Path(base_dir or cfg.BASE_DIR)
self.calibration_dir = self.base_dir / "calibration"
self.current_calibration_symlink = self.calibration_dir / "current_calibration"
self.calibration_dir.mkdir(parents=True, exist_ok=True)
# Current working calibration set
self._current_working_set: CalibrationSet | None = None
logger.debug("CalibrationManager initialized", base_dir=str(self.base_dir))
# ------------------------------------------------------------------ #
# Working set lifecycle
# ------------------------------------------------------------------ #
def start_new_calibration(self, preset: ConfigPreset) -> CalibrationSet:
"""Start new calibration set for preset"""
"""Start a new, empty working set for a given preset."""
self._current_working_set = CalibrationSet(preset)
logger.info("New calibration session started", preset=preset.filename, mode=preset.mode.value)
return self._current_working_set
def get_current_working_set(self) -> CalibrationSet | None:
"""Get current working calibration set"""
"""Return the current working set (if any)."""
return self._current_working_set
def add_calibration_standard(self, standard: CalibrationStandard, sweep_data: SweepData):
"""Add calibration standard to current working set"""
def add_calibration_standard(self, standard: CalibrationStandard, sweep_data: SweepData) -> None:
"""Add a standard measurement to the active working set."""
if self._current_working_set is None:
raise RuntimeError("No active calibration set. Call start_new_calibration first.")
self._current_working_set.add_standard(standard, sweep_data)
def remove_calibration_standard(self, standard: CalibrationStandard):
"""Remove calibration standard from current working set"""
def remove_calibration_standard(self, standard: CalibrationStandard) -> None:
"""Remove a standard measurement from the working set."""
if self._current_working_set is None:
raise RuntimeError("No active calibration set.")
self._current_working_set.remove_standard(standard)
# ------------------------------------------------------------------ #
# Persistence
# ------------------------------------------------------------------ #
def save_calibration_set(self, calibration_name: str) -> CalibrationSet:
"""Save current working calibration set to disk"""
"""
Persist the working set to disk under the preset directory.
Writes:
- per-standard sweeps as JSON
- per-standard metadata
- set-level metadata
"""
if self._current_working_set is None:
raise RuntimeError("No active calibration set to save.")
if not self._current_working_set.is_complete():
missing = self._current_working_set.get_missing_standards()
raise ValueError(f"Calibration incomplete. Missing standards: {[s.value for s in missing]}")
missing = [s.value for s in self._current_working_set.get_missing_standards()]
raise ValueError(f"Calibration incomplete. Missing standards: {missing}")
preset = self._current_working_set.preset
preset_dir = self._get_preset_calibration_dir(preset)
calib_dir = preset_dir / calibration_name
calib_dir = self._get_preset_calibration_dir(preset) / calibration_name
calib_dir.mkdir(parents=True, exist_ok=True)
# Save each standard
for standard, sweep_data in self._current_working_set.standards.items():
# Save sweep data as JSON
# Save standards
for standard, sweep in self._current_working_set.standards.items():
sweep_json = {
'sweep_number': sweep_data.sweep_number,
'timestamp': sweep_data.timestamp,
'points': sweep_data.points,
'total_points': sweep_data.total_points
"sweep_number": sweep.sweep_number,
"timestamp": sweep.timestamp,
"points": sweep.points,
"total_points": sweep.total_points,
}
self._atomic_json_write(calib_dir / f"{standard.value}.json", sweep_json)
file_path = calib_dir / f"{standard.value}.json"
with open(file_path, 'w') as f:
json.dump(sweep_json, f, indent=2)
# Save metadata for each standard
metadata = {
'preset': {
'filename': preset.filename,
'mode': preset.mode.value,
'start_freq': preset.start_freq,
'stop_freq': preset.stop_freq,
'points': preset.points,
'bandwidth': preset.bandwidth
"preset": {
"filename": preset.filename,
"mode": preset.mode.value,
"start_freq": preset.start_freq,
"stop_freq": preset.stop_freq,
"points": preset.points,
"bandwidth": preset.bandwidth,
},
'calibration_name': calibration_name,
'standard': standard.value,
'sweep_number': sweep_data.sweep_number,
'sweep_timestamp': sweep_data.timestamp,
'created_timestamp': datetime.now().isoformat(),
'total_points': sweep_data.total_points
"calibration_name": calibration_name,
"standard": standard.value,
"sweep_number": sweep.sweep_number,
"sweep_timestamp": sweep.timestamp,
"created_timestamp": datetime.now().isoformat(),
"total_points": sweep.total_points,
}
self._atomic_json_write(calib_dir / f"{standard.value}_metadata.json", metadata)
metadata_path = calib_dir / f"{standard.value}_metadata.json"
with open(metadata_path, 'w') as f:
json.dump(metadata, f, indent=2)
# Save calibration set metadata
# Save set-level metadata
set_metadata = {
'preset': {
'filename': preset.filename,
'mode': preset.mode.value,
'start_freq': preset.start_freq,
'stop_freq': preset.stop_freq,
'points': preset.points,
'bandwidth': preset.bandwidth
"preset": {
"filename": preset.filename,
"mode": preset.mode.value,
"start_freq": preset.start_freq,
"stop_freq": preset.stop_freq,
"points": preset.points,
"bandwidth": preset.bandwidth,
},
'calibration_name': calibration_name,
'standards': [std.value for std in self._current_working_set.standards.keys()],
'created_timestamp': datetime.now().isoformat(),
'is_complete': True
"calibration_name": calibration_name,
"standards": [s.value for s in self._current_working_set.standards],
"created_timestamp": datetime.now().isoformat(),
"is_complete": True,
}
self._atomic_json_write(calib_dir / "calibration_info.json", set_metadata)
set_metadata_path = calib_dir / "calibration_info.json"
with open(set_metadata_path, 'w') as f:
json.dump(set_metadata, f, indent=2)
# Set name for the working set
# Update working set name
self._current_working_set.name = calibration_name
logger.info("Calibration set saved", preset=preset.filename, name=calibration_name)
return self._current_working_set
def load_calibration_set(self, preset: ConfigPreset, calibration_name: str) -> CalibrationSet:
"""Load existing calibration set from disk"""
"""Load a calibration set from disk for the given preset."""
preset_dir = self._get_preset_calibration_dir(preset)
calib_dir = preset_dir / calibration_name
if not calib_dir.exists():
raise FileNotFoundError(f"Calibration not found: {calibration_name}")
calib_set = CalibrationSet(preset, calibration_name)
# Load all standard files
for standard in CalibrationStandard:
file_path = calib_dir / f"{standard.value}.json"
if file_path.exists():
with open(file_path, 'r') as f:
sweep_json = json.load(f)
sweep_data = SweepData(
sweep_number=sweep_json['sweep_number'],
timestamp=sweep_json['timestamp'],
points=sweep_json['points'],
total_points=sweep_json['total_points']
if not file_path.exists():
continue
try:
data = json.loads(file_path.read_text(encoding="utf-8"))
sweep = SweepData(
sweep_number=int(data["sweep_number"]),
timestamp=float(data["timestamp"]),
points=[(float(r), float(i)) for r, i in data["points"]],
total_points=int(data["total_points"]),
)
calib_set.add_standard(standard, sweep)
except Exception as exc: # noqa: BLE001
logger.warning("Failed to load standard file", file=str(file_path), error=repr(exc))
calib_set.add_standard(standard, sweep_data)
logger.info("Calibration set loaded", preset=preset.filename, name=calibration_name)
return calib_set
def get_available_calibrations(self, preset: ConfigPreset) -> List[str]:
"""Get list of available calibration sets for preset"""
# ------------------------------------------------------------------ #
# Discovery & info
# ------------------------------------------------------------------ #
def get_available_calibrations(self, preset: ConfigPreset) -> list[str]:
"""Return sorted list of calibration set names available for a preset."""
preset_dir = self._get_preset_calibration_dir(preset)
if not preset_dir.exists():
return []
names = sorted([p.name for p in preset_dir.iterdir() if p.is_dir()])
logger.debug("Available calibrations listed", preset=preset.filename, count=len(names))
return names
calibrations = []
for item in preset_dir.iterdir():
if item.is_dir():
calibrations.append(item.name)
return sorted(calibrations)
def get_calibration_info(self, preset: ConfigPreset, calibration_name: str) -> Dict:
"""Get information about specific calibration"""
def get_calibration_info(self, preset: ConfigPreset, calibration_name: str) -> dict:
"""
Return set-level info for a calibration (from cached metadata if present,
or by scanning the directory as a fallback).
"""
preset_dir = self._get_preset_calibration_dir(preset)
calib_dir = preset_dir / calibration_name
info_file = calib_dir / "calibration_info.json"
if info_file.exists():
with open(info_file, 'r') as f:
return json.load(f)
try:
return json.loads(info_file.read_text(encoding="utf-8"))
except Exception as exc: # noqa: BLE001
logger.warning("Failed to read calibration_info.json; falling back to scan", error=repr(exc))
# Fallback: scan files
standards = {}
required_standards = self._get_required_standards(preset.mode)
for standard in required_standards:
file_path = calib_dir / f"{standard.value}.json"
standards[standard.value] = file_path.exists()
required = self._get_required_standards(preset.mode)
standards: dict[str, bool] = {}
for s in required:
standards[s.value] = (calib_dir / f"{s.value}.json").exists()
return {
'calibration_name': calibration_name,
'standards': standards,
'is_complete': all(standards.values())
"calibration_name": calibration_name,
"standards": standards,
"is_complete": all(standards.values()),
}
def set_current_calibration(self, preset: ConfigPreset, calibration_name: str):
"""Set current calibration by creating symlink"""
# ------------------------------------------------------------------ #
# Current calibration (symlink)
# ------------------------------------------------------------------ #
def set_current_calibration(self, preset: ConfigPreset, calibration_name: str) -> None:
"""Point the `current_calibration` symlink to the chosen calibration dir."""
preset_dir = self._get_preset_calibration_dir(preset)
calib_dir = preset_dir / calibration_name
if not calib_dir.exists():
raise FileNotFoundError(f"Calibration not found: {calibration_name}")
# Check if calibration is complete
info = self.get_calibration_info(preset, calibration_name)
if not info.get('is_complete', False):
if not info.get("is_complete", False):
raise ValueError(f"Calibration {calibration_name} is incomplete")
# Remove existing symlink if present
if self.current_calibration_symlink.exists() or self.current_calibration_symlink.is_symlink():
self.current_calibration_symlink.unlink()
# Create new symlink
# Refresh symlink
try:
relative_path = calib_dir.relative_to(self.calibration_dir)
except ValueError:
relative_path = calib_dir
if self.current_calibration_symlink.exists() or self.current_calibration_symlink.is_symlink():
self.current_calibration_symlink.unlink()
except Exception as exc: # noqa: BLE001
logger.warning("Failed to remove existing current_calibration link", error=repr(exc))
self.current_calibration_symlink.symlink_to(relative_path)
try:
# Create a relative link when possible to keep the tree portable
relative = calib_dir
try:
relative = calib_dir.relative_to(self.calibration_dir)
except ValueError:
pass
self.current_calibration_symlink.symlink_to(relative)
logger.info("Current calibration set", preset=preset.filename, name=calibration_name)
except Exception as exc: # noqa: BLE001
logger.error("Failed to create current_calibration symlink", error=repr(exc))
raise
def get_current_calibration(self, current_preset: ConfigPreset) -> CalibrationSet | None:
"""Get currently selected calibration as CalibrationSet"""
"""
Resolve and load the calibration currently pointed to by the symlink.
Returns None if the link doesn't exist, points to an invalid location,
or targets a different preset.
"""
if not self.current_calibration_symlink.exists():
return None
try:
target = self.current_calibration_symlink.resolve()
calibration_name = target.name
preset_name = target.parent.name
preset_dir_name = target.parent.name # <preset_name> (without .bin)
# If current_preset matches, use it
if current_preset.filename == f"{preset_name}.bin":
return self.load_calibration_set(current_preset, calibration_name)
else:
raise RuntimeError("Current calibration is set and is meant for different preset.")
expected_preset_name = current_preset.filename.replace(".bin", "")
if preset_dir_name != expected_preset_name:
logger.warning(
"Current calibration preset mismatch",
expected=expected_preset_name,
actual=preset_dir_name,
)
raise RuntimeError("Current calibration belongs to a different preset")
except Exception:
return self.load_calibration_set(current_preset, calibration_name)
except Exception as exc: # noqa: BLE001
logger.warning("Failed to resolve current calibration", error=repr(exc))
return None
def clear_current_calibration(self):
"""Clear current calibration symlink"""
def clear_current_calibration(self) -> None:
"""Remove the current calibration symlink."""
if self.current_calibration_symlink.exists() or self.current_calibration_symlink.is_symlink():
self.current_calibration_symlink.unlink()
def delete_calibration(self, preset: ConfigPreset, calibration_name: str):
"""Delete calibration set"""
preset_dir = self._get_preset_calibration_dir(preset)
calib_dir = preset_dir / calibration_name
try:
self.current_calibration_symlink.unlink()
logger.info("Current calibration cleared")
except Exception as exc: # noqa: BLE001
logger.warning("Failed to clear current calibration", error=repr(exc))
# ------------------------------------------------------------------ #
# Deletion
# ------------------------------------------------------------------ #
def delete_calibration(self, preset: ConfigPreset, calibration_name: str) -> None:
"""Delete a calibration set directory."""
calib_dir = self._get_preset_calibration_dir(preset) / calibration_name
if calib_dir.exists():
shutil.rmtree(calib_dir)
logger.info("Calibration deleted", preset=preset.filename, name=calibration_name)
# ------------------------------------------------------------------ #
# Helpers
# ------------------------------------------------------------------ #
def _get_preset_calibration_dir(self, preset: ConfigPreset) -> Path:
"""Get calibration directory for specific preset"""
preset_dir = self.calibration_dir / preset.filename.replace('.bin', '')
"""Return the directory where calibrations for this preset are stored."""
preset_dir = self.calibration_dir / preset.filename.replace(".bin", "")
preset_dir.mkdir(parents=True, exist_ok=True)
return preset_dir
def _get_required_standards(self, mode: VNAMode) -> List[CalibrationStandard]:
"""Get required calibration standards for VNA mode"""
def _get_required_standards(self, mode: VNAMode) -> list[CalibrationStandard]:
"""Standards required for a given VNA mode."""
if mode == VNAMode.S11:
return [CalibrationStandard.OPEN, CalibrationStandard.SHORT, CalibrationStandard.LOAD]
elif mode == VNAMode.S21:
if mode == VNAMode.S21:
return [CalibrationStandard.THROUGH]
return []
return []
@staticmethod
def _atomic_json_write(path: Path, payload: dict) -> None:
"""Write JSON atomically via a temporary sidecar file."""
tmp = path.with_suffix(path.suffix + ".tmp")
tmp.write_text(json.dumps(payload, indent=2, ensure_ascii=False), encoding="utf-8")
tmp.replace(path)

View File

@ -1,21 +1,40 @@
from __future__ import annotations
import re
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import List
from vna_system.core import config as cfg
from vna_system.core.logging.logger import get_component_logger
logger = get_component_logger(__file__)
class VNAMode(Enum):
"""Supported VNA measurement modes."""
S11 = "s11"
S21 = "s21"
@dataclass
@dataclass(slots=True)
class ConfigPreset:
"""
Parsed configuration preset derived from a filename.
Fields
------
filename:
Original filename (e.g., 's11_start100_stop8800_points1000_bw1khz.bin').
mode:
VNA mode (S11 or S21).
start_freq:
Start frequency in Hz (None if not provided in the filename).
stop_freq:
Stop frequency in Hz (None if not provided in the filename).
points:
Number of sweep points (None if not provided).
bandwidth:
IF bandwidth in Hz (None if not provided).
"""
filename: str
mode: VNAMode
start_freq: float | None = None
@ -25,105 +44,187 @@ class ConfigPreset:
class PresetManager:
def __init__(self, binary_input_dir: Path | None = None):
self.binary_input_dir = Path(binary_input_dir or cfg.BASE_DIR / "vna_system" / "binary_input")
"""
Discover, parse, and manage configuration presets stored on disk.
Directory layout
----------------
<BASE_DIR>/vna_system/binary_input/
├─ config_inputs/
│ └─ *.bin (preset files; configuration encoded in filename)
└─ current_input.bin -> config_inputs/<chosen>.bin
Filenames encode parameters, e.g.:
s11_start100_stop8800_points1000_bw1khz.bin
s21_start0.1ghz_stop3.0ghz_points1001_bw10kHz.bin
Parsing rules
-------------
- Mode must be the prefix: 's11' or 's21'
- start/stop: numbers with optional unit suffix (hz|khz|mhz|ghz). If no suffix, defaults to MHz.
- points: integer (points or point also accepted)
- bw: number with optional unit suffix (hz|khz|mhz). Defaults to Hz if absent.
"""
def __init__(self, binary_input_dir: Path | None = None) -> None:
self.binary_input_dir = Path(binary_input_dir or (cfg.BASE_DIR / "vna_system" / "binary_input"))
self.config_inputs_dir = self.binary_input_dir / "config_inputs"
self.current_input_symlink = self.binary_input_dir / "current_input.bin"
self.config_inputs_dir.mkdir(parents=True, exist_ok=True)
logger.debug(
"PresetManager initialized",
binary_input=str(self.binary_input_dir),
config_inputs=str(self.config_inputs_dir),
)
def _parse_filename(self, filename: str) -> ConfigPreset | None:
"""Parse configuration parameters from filename like s11_start100_stop8800_points1000_bw1khz.bin"""
base_name = Path(filename).stem.lower()
# ------------------------------------------------------------------ #
# Parsing
# ------------------------------------------------------------------ #
def _parse_filename(self, filename: str) -> ConfigPreset | None: # type: ignore[name-defined]
"""
Parse configuration parameters from a preset filename.
# Extract mode - must be at the beginning
mode = None
if base_name.startswith('s11'):
Accepted fragments (case-insensitive):
- ^s11 or ^s21
- start<value><unit?>
- stop<value><unit?>
- points?<int>
- bw<value><unit?>
Units
-----
- For start/stop: hz|khz|mhz|ghz (default: MHz when absent)
- For bw: hz|khz|mhz (default: Hz when absent)
"""
base = Path(filename).stem.lower()
# Mode at the beginning
if base.startswith("s11"):
mode = VNAMode.S11
elif base_name.startswith('s21'):
elif base.startswith("s21"):
mode = VNAMode.S21
else:
logger.debug("Filename does not start with mode token", filename=filename)
return None
preset = ConfigPreset(filename=filename, mode=mode)
# Extract parameters using regex
patterns = {
'start': r'start(\d+(?:\.\d+)?)',
'stop': r'stop(\d+(?:\.\d+)?)',
'points': r'points?(\d+)',
'bw': r'bw(\d+(?:\.\d+)?)(hz|khz|mhz)?'
}
# Patterns with optional unit suffixes
pat_start = r"start(?P<val>\d+(?:\.\d+)?)(?P<unit>hz|khz|mhz|ghz)?"
pat_stop = r"stop(?P<val>\d+(?:\.\d+)?)(?P<unit>hz|khz|mhz|ghz)?"
pat_points = r"points?(?P<val>\d+)"
pat_bw = r"bw(?P<val>\d+(?:\.\d+)?)(?P<unit>hz|khz|mhz)?"
for param, pattern in patterns.items():
match = re.search(pattern, base_name)
if match:
value = float(match.group(1))
def _match(pattern: str) -> re.Match[str] | None:
return re.search(pattern, base, flags=re.IGNORECASE)
if param == 'start':
# Assume MHz if no unit specified
preset.start_freq = value * 1e6
elif param == 'stop':
# Assume MHz if no unit specified
preset.stop_freq = value * 1e6
elif param == 'points':
preset.points = int(value)
elif param == 'bw':
unit = match.group(2) if len(match.groups()) > 1 and match.group(2) else 'hz'
if unit == 'khz':
value *= 1e3
elif unit == 'mhz':
value *= 1e6
# hz is base unit, no multiplication needed
preset.bandwidth = value
m = _match(pat_start)
if m:
preset.start_freq = self._to_hz(float(m.group("val")), (m.group("unit") or "mhz"))
m = _match(pat_stop)
if m:
preset.stop_freq = self._to_hz(float(m.group("val")), (m.group("unit") or "mhz"))
m = _match(pat_points)
if m:
preset.points = int(m.group("val"))
m = _match(pat_bw)
if m:
preset.bandwidth = self._to_hz(float(m.group("val")), (m.group("unit") or "hz"))
logger.debug(
"Preset filename parsed",
filename=filename,
start=preset.start_freq,
stop=preset.stop_freq,
points=preset.points,
bw=preset.bandwidth,
)
return preset
def get_available_presets(self) -> List[ConfigPreset]:
"""Return list of all available configuration presets"""
presets = []
@staticmethod
def _to_hz(value: float, unit: str) -> float:
"""Convert a numeric value with textual unit into Hz."""
u = unit.lower()
if u == "hz":
return value
if u == "khz":
return value * 1e3
if u == "mhz":
return value * 1e6
if u == "ghz":
return value * 1e9
# Fallback: treat as Hz if unknown
return value
# ------------------------------------------------------------------ #
# Discovery & selection
# ------------------------------------------------------------------ #
def get_available_presets(self) -> list[ConfigPreset]:
"""
Return a list of available presets discovered in `config_inputs_dir`.
Only files that parse successfully are returned.
"""
presets: list[ConfigPreset] = []
if not self.config_inputs_dir.exists():
return presets
for file_path in self.config_inputs_dir.glob("*.bin"):
preset = self._parse_filename(file_path.name)
if preset is not None:
presets.append(preset)
return sorted(presets, key=lambda x: x.filename)
for path in self.config_inputs_dir.glob("*.bin"):
p = self._parse_filename(path.name)
if p is not None:
presets.append(p)
presets.sort(key=lambda x: x.filename.lower())
logger.debug("Available presets enumerated", count=len(presets))
return presets
def set_current_preset(self, preset: ConfigPreset) -> ConfigPreset:
"""Set current configuration by creating symlink to specified preset"""
preset_path = self.config_inputs_dir / preset.filename
if not preset_path.exists():
"""
Select a preset by (re)pointing `current_input.bin` to the chosen file.
"""
src = self.config_inputs_dir / preset.filename
if not src.exists():
raise FileNotFoundError(f"Preset file not found: {preset.filename}")
# Remove existing symlink if present
# Remove any existing link/file
if self.current_input_symlink.exists() or self.current_input_symlink.is_symlink():
self.current_input_symlink.unlink()
try:
self.current_input_symlink.unlink()
except Exception as exc: # noqa: BLE001
logger.warning("Failed to remove existing current_input.bin", error=repr(exc))
# Create new symlink
# Prefer a relative symlink for portability
try:
relative_path = preset_path.relative_to(self.binary_input_dir)
target = src.relative_to(self.binary_input_dir)
except ValueError:
relative_path = preset_path
self.current_input_symlink.symlink_to(relative_path)
target = src
self.current_input_symlink.symlink_to(target)
logger.info("Current preset set", filename=preset.filename)
return preset
def get_current_preset(self) -> ConfigPreset | None:
"""Get currently selected configuration preset"""
"""
Resolve the `current_input.bin` symlink and parse the underlying preset.
Returns None if the symlink is missing or the filename cannot be parsed.
"""
if not self.current_input_symlink.exists():
return None
try:
target = self.current_input_symlink.resolve()
return self._parse_filename(target.name)
except Exception:
except Exception as exc: # noqa: BLE001
logger.warning("Failed to resolve current preset", error=repr(exc))
return None
def preset_exists(self, preset: ConfigPreset) -> bool:
"""Check if preset file exists"""
return (self.config_inputs_dir / preset.filename).exists()
"""Return True if the underlying file for `preset` exists."""
exists = (self.config_inputs_dir / preset.filename).exists()
logger.debug("Preset existence checked", filename=preset.filename, exists=exists)
return exists

View File

@ -1,124 +1,194 @@
from __future__ import annotations
import logging
from pathlib import Path
from typing import Dict, List
from typing import Any
from vna_system.core import config as cfg
from vna_system.core.logging.logger import get_component_logger
from vna_system.core.acquisition.data_acquisition import VNADataAcquisition
from vna_system.core.acquisition.sweep_buffer import SweepData
from .preset_manager import PresetManager, ConfigPreset, VNAMode
from .calibration_manager import CalibrationManager, CalibrationSet, CalibrationStandard
from vna_system.core.settings.preset_manager import PresetManager, ConfigPreset, VNAMode
from vna_system.core.settings.calibration_manager import (
CalibrationManager,
CalibrationSet,
CalibrationStandard,
)
logger = logging.getLogger(__name__)
logger = get_component_logger(__file__)
class VNASettingsManager:
"""
Main settings manager that coordinates preset and calibration management.
High-level coordinator for presets and calibrations.
Provides high-level interface for:
- Managing configuration presets
- Managing calibration data
- Coordinating between preset selection and calibration
Responsibilities
----------------
• Discover/select configuration presets (via `PresetManager`).
Create/store/select calibration sets (via `CalibrationManager`).
• Provide combined status used by API/UI.
• Offer helpers to capture calibration directly from acquisition.
Notes
-----
- All IO paths are derived from `cfg.BASE_DIR`.
- Logging is performed via the project logger.
"""
def __init__(self, base_dir: Path | None = None):
def __init__(self, base_dir: Path | None = None) -> None:
self.base_dir = Path(base_dir or cfg.BASE_DIR)
# Initialize sub-managers
# Sub-managers
self.preset_manager = PresetManager(self.base_dir / "binary_input")
self.calibration_manager = CalibrationManager(self.base_dir)
# ---------- Preset Management ----------
logger.debug(
"VNASettingsManager initialized",
base_dir=str(self.base_dir),
)
def get_available_presets(self) -> List[ConfigPreset]:
"""Get all available configuration presets"""
# ------------------------------------------------------------------ #
# Preset Management
# ------------------------------------------------------------------ #
def get_available_presets(self) -> list[ConfigPreset]:
"""Return all available configuration presets."""
return self.preset_manager.get_available_presets()
def get_presets_by_mode(self, mode: VNAMode) -> List[ConfigPreset]:
"""Get presets filtered by VNA mode"""
all_presets = self.get_available_presets()
return [p for p in all_presets if p.mode == mode]
def get_presets_by_mode(self, mode: VNAMode) -> list[ConfigPreset]:
"""Return presets filtered by VNA mode."""
return [p for p in self.get_available_presets() if p.mode == mode]
def set_current_preset(self, preset: ConfigPreset) -> ConfigPreset:
"""Set current configuration preset"""
return self.preset_manager.set_current_preset(preset)
"""Set the current configuration preset (updates the symlink)."""
chosen = self.preset_manager.set_current_preset(preset)
logger.info("Current preset selected", filename=chosen.filename, mode=chosen.mode.value)
return chosen
def get_current_preset(self) -> ConfigPreset | None:
"""Get currently selected preset"""
"""Return the currently selected preset, or None if not set."""
return self.preset_manager.get_current_preset()
# ---------- Calibration Management ----------
# ------------------------------------------------------------------ #
# Calibration Management
# ------------------------------------------------------------------ #
def start_new_calibration(self, preset: ConfigPreset | None = None) -> CalibrationSet:
"""Start new calibration for current or specified preset"""
"""
Begin a new in-memory calibration session for a preset.
If `preset` is omitted, the current preset must be set.
"""
preset = preset or self.get_current_preset()
if preset is None:
preset = self.get_current_preset()
if preset is None:
raise RuntimeError("No current preset selected")
return self.calibration_manager.start_new_calibration(preset)
raise RuntimeError("No current preset selected")
calib = self.calibration_manager.start_new_calibration(preset)
logger.info("Calibration session started", preset=preset.filename, mode=preset.mode.value)
return calib
def get_current_working_calibration(self) -> CalibrationSet | None:
"""Get current working calibration set (in-progress, not yet saved)"""
"""Return the in-progress (unsaved) calibration set, if any."""
return self.calibration_manager.get_current_working_set()
def add_calibration_standard(self, standard: CalibrationStandard, sweep_data: SweepData):
"""Add calibration standard to current working set"""
def add_calibration_standard(self, standard: CalibrationStandard, sweep_data: SweepData) -> None:
"""Add a standard measurement to the working calibration set."""
self.calibration_manager.add_calibration_standard(standard, sweep_data)
logger.info(
"Calibration standard added",
standard=standard.value,
sweep_number=sweep_data.sweep_number,
points=sweep_data.total_points,
)
def remove_calibration_standard(self, standard: CalibrationStandard):
"""Remove calibration standard from current working set"""
def remove_calibration_standard(self, standard: CalibrationStandard) -> None:
"""Remove a standard measurement from the working calibration set."""
self.calibration_manager.remove_calibration_standard(standard)
logger.info("Calibration standard removed", standard=standard.value)
def save_calibration_set(self, calibration_name: str) -> CalibrationSet:
"""Save current working calibration set"""
return self.calibration_manager.save_calibration_set(calibration_name)
"""Persist the current working calibration set to disk."""
saved = self.calibration_manager.save_calibration_set(calibration_name)
logger.info("Calibration set saved", name=calibration_name, preset=saved.preset.filename)
return saved
def get_available_calibrations(self, preset: ConfigPreset | None = None) -> List[str]:
"""Get available calibrations for current or specified preset"""
def get_available_calibrations(self, preset: ConfigPreset | None = None) -> list[str]:
"""List available calibration set names for a preset (current if omitted)."""
preset = preset or self.get_current_preset()
if preset is None:
preset = self.get_current_preset()
if preset is None:
return []
return []
return self.calibration_manager.get_available_calibrations(preset)
def set_current_calibration(self, calibration_name: str, preset: ConfigPreset | None = None):
"""Set current calibration"""
def set_current_calibration(self, calibration_name: str, preset: ConfigPreset | None = None) -> None:
"""Activate a calibration set by updating the symlink."""
preset = preset or self.get_current_preset()
if preset is None:
preset = self.get_current_preset()
if preset is None:
raise RuntimeError("No current preset selected")
raise RuntimeError("No current preset selected")
self.calibration_manager.set_current_calibration(preset, calibration_name)
logger.info("Current calibration set", name=calibration_name, preset=preset.filename)
def get_current_calibration(self) -> CalibrationSet | None:
"""Get currently selected calibration set (saved and active via symlink)"""
current_preset = self.get_current_preset()
if current_preset is not None:
return self.calibration_manager.get_current_calibration(current_preset)
else:
"""
Return the active (saved and selected) calibration set for the current preset,
or None if not set/invalid.
"""
current = self.get_current_preset()
if current is None:
return None
return self.calibration_manager.get_current_calibration(current)
def get_calibration_info(self, calibration_name: str, preset: ConfigPreset | None = None) -> Dict:
"""Get calibration information"""
def get_calibration_info(self, calibration_name: str, preset: ConfigPreset | None = None) -> dict[str, Any]:
"""Return info/metadata for a specific calibration set."""
preset = preset or self.get_current_preset()
if preset is None:
preset = self.get_current_preset()
if preset is None:
raise RuntimeError("No current preset selected")
raise RuntimeError("No current preset selected")
return self.calibration_manager.get_calibration_info(preset, calibration_name)
# ---------- Combined Status and UI helpers ----------
@staticmethod
def get_required_standards(mode: VNAMode) -> list[CalibrationStandard]:
"""Return the list of required standards for a given VNA mode."""
if mode == VNAMode.S11:
return [CalibrationStandard.OPEN, CalibrationStandard.SHORT, CalibrationStandard.LOAD]
if mode == VNAMode.S21:
return [CalibrationStandard.THROUGH]
return []
def get_status_summary(self) -> Dict[str, object]:
# ------------------------------------------------------------------ #
# Acquisition integration
# ------------------------------------------------------------------ #
def capture_calibration_standard_from_acquisition(
self,
standard: CalibrationStandard,
data_acquisition: VNADataAcquisition,
) -> int:
"""
Capture the latest sweep from acquisition as a calibration standard.
Returns
-------
int
The sweep number captured.
Raises
------
RuntimeError
If no sweep is available in the acquisition buffer.
"""
latest = data_acquisition.sweep_buffer.get_latest_sweep()
if latest is None:
raise RuntimeError("No sweep data available in acquisition buffer")
self.add_calibration_standard(standard, latest)
logger.info(
"Captured calibration standard from acquisition",
standard=standard.value,
sweep_number=latest.sweep_number,
)
return latest.sweep_number
def get_status_summary(self) -> dict[str, object]:
"""Get comprehensive status of current configuration and calibration"""
current_preset = self.get_current_preset()
current_calibration = self.get_current_calibration()
working_calibration = self.get_current_working_calibration()
logger.info(f"Settings status requested")
summary = {
"current_preset": None,
"current_calibration": None,
@ -153,28 +223,4 @@ class VNASettingsManager:
"missing_standards": [s.value for s in working_calibration.get_missing_standards()]
}
return summary
@staticmethod
def get_required_standards(mode: VNAMode) -> List[CalibrationStandard]:
"""Get required calibration standards for VNA mode"""
if mode == VNAMode.S11:
return [CalibrationStandard.OPEN, CalibrationStandard.SHORT, CalibrationStandard.LOAD]
elif mode == VNAMode.S21:
return [CalibrationStandard.THROUGH]
return []
# ---------- Integration with VNADataAcquisition ----------
def capture_calibration_standard_from_acquisition(self, standard: CalibrationStandard, data_acquisition):
"""Capture calibration standard from VNADataAcquisition instance"""
# Get latest sweep from acquisition
latest_sweep = data_acquisition._sweep_buffer.get_latest_sweep()
if latest_sweep is None:
raise RuntimeError("No sweep data available in acquisition buffer")
# Add to current working calibration
self.add_calibration_standard(standard, latest_sweep)
logger.info(f"Captured {standard.value} calibration standard from sweep {latest_sweep.sweep_number}")
return latest_sweep.sweep_number
return summary

View File

@ -11,14 +11,14 @@ from vna_system.core.processors.storage.data_storage import DataStorage
from vna_system.core.settings.settings_manager import VNASettingsManager
from vna_system.core.processors.manager import ProcessorManager
from vna_system.core.processors.websocket_handler import ProcessorWebSocketHandler
from vna_system.core.config import PROCESSORS_CONFIG_DIR_PATH
# Global singleton instances
vna_data_acquisition_instance: VNADataAcquisition = VNADataAcquisition()
settings_manager: VNASettingsManager = VNASettingsManager()
# Processor system
processor_config_dir = Path("vna_system/core/processors/configs")
processor_manager: ProcessorManager = ProcessorManager(vna_data_acquisition_instance.sweep_buffer, settings_manager, processor_config_dir)
processor_manager: ProcessorManager = ProcessorManager(vna_data_acquisition_instance.sweep_buffer, settings_manager, Path(PROCESSORS_CONFIG_DIR_PATH))
data_storage = DataStorage()
processor_websocket_handler: ProcessorWebSocketHandler = ProcessorWebSocketHandler(
processor_manager, data_storage

View File

@ -1,264 +1,292 @@
import numpy as np
from typing import Dict, Any, List, Tuple
import json
from pathlib import Path
import json
from typing import Any
import numpy as np
from vna_system.core.logging.logger import get_component_logger
from vna_system.core.acquisition.sweep_buffer import SweepData
from vna_system.core.settings.preset_manager import ConfigPreset
logger = get_component_logger(__file__)
def generate_magnitude_plot_from_sweep_data(sweep_data: SweepData, preset: ConfigPreset = None) -> Dict[str, Any]:
# -----------------------------------------------------------------------------
# Plot builders
# -----------------------------------------------------------------------------
def generate_magnitude_plot_from_sweep_data(
sweep_data: SweepData,
preset: ConfigPreset | None = None,
) -> dict[str, Any]:
"""
Generate Plotly configuration for magnitude plot from SweepData
Build a Plotly configuration for magnitude-vs-frequency from a `SweepData`.
Args:
sweep_data: SweepData instance with points list of [real, imag] complex pairs
preset: Optional ConfigPreset with frequency info
Parameters
----------
sweep_data
Sweep payload with `.points: list[tuple[float, float]]` of (real, imag).
preset
Optional preset carrying frequency range (Hz). If absent, defaults are used.
Returns:
Plotly configuration dict for magnitude plot
Returns
-------
dict[str, Any]
Plotly figure spec. If input is invalid, returns {"error": "..."}.
"""
if not sweep_data or not sweep_data.points:
return {'error': 'Invalid sweep data'}
logger.warning("Invalid sweep passed to magnitude plot")
return {"error": "Invalid sweep data"}
# Extract frequency range from preset or use defaults
start_freq = 100e6 # 100 MHz
stop_freq = 8.8e9 # 8.8 GHz
# Frequency range (Hz)
start_freq = float(preset.start_freq) if (preset and preset.start_freq is not None) else 100e6
stop_freq = float(preset.stop_freq) if (preset and preset.stop_freq is not None) else 8.8e9
if preset:
start_freq = preset.start_freq or start_freq
stop_freq = preset.stop_freq or stop_freq
n = len(sweep_data.points)
if n == 1:
freqs = [start_freq]
else:
step = (stop_freq - start_freq) / max(1, n - 1)
freqs = [start_freq + i * step for i in range(n)]
frequencies = []
magnitudes_db = []
# Magnitudes (dB). Clamp zero magnitude to -120 dB to avoid -inf.
mags_db: list[float] = []
for real, imag in sweep_data.points:
mag = abs(complex(real, imag))
mags_db.append(20.0 * np.log10(mag) if mag > 0.0 else -120.0)
# Calculate magnitude in dB for each point
for i, (real, imag) in enumerate(sweep_data.points):
complex_val = complex(real, imag)
magnitude_db = 20 * np.log10(abs(complex_val)) if abs(complex_val) > 0 else -120
# Reasonable Y range with margin
ymin = float(min(mags_db))
ymax = float(max(mags_db))
ymargin = (ymax - ymin) * 0.1 if ymax > ymin else 10.0
y_min = max(ymin - ymargin, -120.0)
y_max = min(ymax + ymargin, 20.0)
# Calculate frequency based on point index
total_points = len(sweep_data.points)
frequency = start_freq + (stop_freq - start_freq) * i / (total_points - 1)
frequencies.append(frequency)
magnitudes_db.append(magnitude_db)
# Create Plotly trace
trace = {
'x': [f / 1e9 for f in frequencies], # Convert to GHz
'y': magnitudes_db,
'type': 'scatter',
'mode': 'lines',
'name': 'Magnitude',
'line': {'color': '#1f77b4', 'width': 2}
"x": [f / 1e9 for f in freqs], # Hz -> GHz
"y": mags_db,
"type": "scatter",
"mode": "lines",
"name": "Magnitude",
"line": {"color": "#1f77b4", "width": 2},
}
# Calculate reasonable Y-axis range
min_mag = min(magnitudes_db)
max_mag = max(magnitudes_db)
y_margin = (max_mag - min_mag) * 0.1
y_min = max(min_mag - y_margin, -120)
y_max = min(max_mag + y_margin, 20)
return {
'data': [trace],
'layout': {
'title': 'Magnitude Response',
'xaxis': {
'title': 'Frequency (GHz)',
'showgrid': True,
'gridcolor': '#e5e5e5',
'gridwidth': 1
fig = {
"data": [trace],
"layout": {
"title": "Magnitude Response",
"xaxis": {
"title": "Frequency (GHz)",
"showgrid": True,
"gridcolor": "#e5e5e5",
"gridwidth": 1,
},
'yaxis': {
'title': 'Magnitude (dB)',
'range': [y_min, y_max],
'showgrid': True,
'gridcolor': '#e5e5e5',
'gridwidth': 1
"yaxis": {
"title": "Magnitude (dB)",
"range": [y_min, y_max],
"showgrid": True,
"gridcolor": "#e5e5e5",
"gridwidth": 1,
},
'plot_bgcolor': '#fafafa',
'paper_bgcolor': '#ffffff',
'font': {
'family': 'Arial, sans-serif',
'size': 12,
'color': '#333333'
},
'hovermode': 'x unified',
'showlegend': True,
'margin': {'l': 60, 'r': 40, 't': 60, 'b': 60}
}
"plot_bgcolor": "#fafafa",
"paper_bgcolor": "#ffffff",
"font": {"family": "Arial, sans-serif", "size": 12, "color": "#333333"},
"hovermode": "x unified",
"showlegend": True,
"margin": {"l": 60, "r": 40, "t": 60, "b": 60},
},
}
logger.debug("Magnitude plot generated", points=n)
return fig
# -----------------------------------------------------------------------------
# IO helpers
# -----------------------------------------------------------------------------
def load_sweep_data_from_json(json_file: Path) -> SweepData:
"""
Load SweepData from JSON file
Load a `SweepData` structure from a JSON file.
Args:
json_file: Path to JSON file containing sweep data
The file is expected to contain:
{ "sweep_number": int, "timestamp": float, "points": [[r, i], ...], "total_points": int }
Returns:
SweepData instance
If `total_points` is missing, it is derived from the length of `points`.
"""
with open(json_file, 'r') as f:
data = json.load(f)
data = json.loads(Path(json_file).read_text(encoding="utf-8"))
return SweepData(
sweep_number=data.get('sweep_number', 0),
timestamp=data.get('timestamp', 0.0),
points=data.get('points', []),
total_points=data.get('total_points', len(data.get('points', [])))
points = data.get("points", [])
if not isinstance(points, list):
raise ValueError(f"Invalid 'points' in file: {json_file}")
# Normalize to list[tuple[float, float]]
norm_points: list[tuple[float, float]] = []
for pt in points:
if not (isinstance(pt, (list, tuple)) and len(pt) == 2):
raise ValueError(f"Invalid point format in {json_file}: {pt!r}")
r, i = pt
norm_points.append((float(r), float(i)))
sweep = SweepData(
sweep_number=int(data.get("sweep_number", 0)),
timestamp=float(data.get("timestamp", 0.0)),
points=norm_points,
total_points=int(data.get("total_points", len(norm_points))),
)
return sweep
def generate_standards_magnitude_plots(calibration_path: Path, preset: ConfigPreset = None) -> Dict[str, Any]:
# -----------------------------------------------------------------------------
# Calibration plots
# -----------------------------------------------------------------------------
def generate_standards_magnitude_plots(
calibration_path: Path,
preset: ConfigPreset | None = None,
) -> dict[str, Any]:
"""
Generate magnitude plots for all calibration standards in a calibration set
Build individual magnitude plots for all calibration standards found under a folder.
Args:
calibration_path: Path to calibration directory
preset: Optional ConfigPreset
The function scans `calibration_path` for `*.json` (ignoring `*metadata.json`), loads each
sweep, and produces a Plotly config per standard. Raw sweep data and (optional) frequency
info are embedded into the output for convenience.
Returns:
Dictionary with plots for each standard, including raw data
Returns
-------
dict[str, Any]
{ "<standard>": <plotly fig | {'error': str}>, ... }
"""
plots = {}
plots: dict[str, Any] = {}
standard_colors = {
'open': '#2ca02c', # Green
'short': '#d62728', # Red
'load': '#ff7f0e', # Orange
'through': '#1f77b4' # Blue
"open": "#2ca02c", # Green
"short": "#d62728", # Red
"load": "#ff7f0e", # Orange
"through": "#1f77b4", # Blue
}
# Find all standard JSON files
for standard_file in calibration_path.glob('*.json'):
for standard_file in Path(calibration_path).glob("*.json"):
standard_name = standard_file.stem
# Skip metadata files
if 'metadata' in standard_name:
if "metadata" in standard_name:
continue
try:
sweep_data = load_sweep_data_from_json(standard_file)
plot_config = generate_magnitude_plot_from_sweep_data(sweep_data, preset)
sweep = load_sweep_data_from_json(standard_file)
fig = generate_magnitude_plot_from_sweep_data(sweep, preset)
if 'error' not in plot_config:
# Customize color and title for this standard
if plot_config.get('data'):
plot_config['data'][0]['line']['color'] = standard_colors.get(standard_name, '#1f77b4')
plot_config['data'][0]['name'] = f'{standard_name.upper()} Standard'
plot_config['layout']['title'] = f'{standard_name.upper()} Standard Magnitude'
if "error" in fig:
plots[standard_name] = fig
continue
# Include raw sweep data for download
plot_config['raw_sweep_data'] = {
'sweep_number': sweep_data.sweep_number,
'timestamp': sweep_data.timestamp,
'total_points': sweep_data.total_points,
'points': sweep_data.points, # Raw complex data points
'file_path': str(standard_file)
# Customize per-standard appearance/title
if fig.get("data"):
fig["data"][0]["line"]["color"] = standard_colors.get(standard_name, "#1f77b4")
fig["data"][0]["name"] = f"{standard_name.upper()} Standard"
fig["layout"]["title"] = f"{standard_name.upper()} Standard Magnitude"
# Attach raw sweep block for UI download/inspection
fig["raw_sweep_data"] = {
"sweep_number": sweep.sweep_number,
"timestamp": sweep.timestamp,
"total_points": sweep.total_points,
"points": sweep.points,
"file_path": str(standard_file),
}
# Optional frequency info
if preset:
fig["frequency_info"] = {
"start_freq": preset.start_freq,
"stop_freq": preset.stop_freq,
"points": preset.points,
"bandwidth": preset.bandwidth,
}
# Add frequency information if available
if preset:
plot_config['frequency_info'] = {
'start_freq': preset.start_freq,
'stop_freq': preset.stop_freq,
'points': preset.points,
'bandwidth': preset.bandwidth
}
plots[standard_name] = fig
plots[standard_name] = plot_config
except (json.JSONDecodeError, FileNotFoundError, KeyError) as e:
plots[standard_name] = {'error': f'Failed to load {standard_name}: {str(e)}'}
except (json.JSONDecodeError, FileNotFoundError, KeyError, ValueError) as exc:
logger.warning("Failed to load standard plot", file=str(standard_file), error=repr(exc))
plots[standard_name] = {"error": f"Failed to load {standard_name}: {exc}"}
return plots
def generate_combined_standards_plot(calibration_path: Path, preset: ConfigPreset = None) -> Dict[str, Any]:
def generate_combined_standards_plot(
calibration_path: Path,
preset: ConfigPreset | None = None,
) -> dict[str, Any]:
"""
Generate a combined plot showing all calibration standards
Build a combined Plotly figure that overlays all available calibration standards.
Args:
calibration_path: Path to calibration directory
preset: Optional ConfigPreset
Returns:
Plotly configuration dict with all standards overlaid
Each standard is rendered as a separate trace with a canonical color.
"""
traces = []
traces: list[dict[str, Any]] = []
standard_colors = {
'open': '#2ca02c', # Green
'short': '#d62728', # Red
'load': '#ff7f0e', # Orange
'through': '#1f77b4' # Blue
"open": "#2ca02c", # Green
"short": "#d62728", # Red
"load": "#ff7f0e", # Orange
"through": "#1f77b4", # Blue
}
y_min, y_max = 0, -120
# Initialize Y range trackers inversely so first update sets them correctly.
y_min, y_max = 0.0, -120.0
# Process each standard
for standard_file in calibration_path.glob('*.json'):
standard_name = standard_file.stem
# Skip metadata files
if 'metadata' in standard_name:
for standard_file in Path(calibration_path).glob("*.json"):
name = standard_file.stem
if "metadata" in name:
continue
try:
sweep_data = load_sweep_data_from_json(standard_file)
plot_config = generate_magnitude_plot_from_sweep_data(sweep_data, preset)
sweep = load_sweep_data_from_json(standard_file)
fig = generate_magnitude_plot_from_sweep_data(sweep, preset)
if "error" in fig or not fig.get("data"):
continue
if 'error' not in plot_config and plot_config.get('data'):
trace = plot_config['data'][0].copy()
trace['line']['color'] = standard_colors.get(standard_name, '#1f77b4')
trace['name'] = f'{standard_name.upper()} Standard'
traces.append(trace)
trace = dict(fig["data"][0]) # shallow copy
trace["line"]["color"] = standard_colors.get(name, "#1f77b4")
trace["name"] = f"{name.upper()} Standard"
traces.append(trace)
# Update Y range
if trace['y']:
trace_min = min(trace['y'])
trace_max = max(trace['y'])
y_min = min(y_min, trace_min)
y_max = max(y_max, trace_max)
# Update Y range
y_vals = trace.get("y") or []
if y_vals:
tmin = float(min(y_vals))
tmax = float(max(y_vals))
y_min = min(y_min, tmin)
y_max = max(y_max, tmax)
except (json.JSONDecodeError, FileNotFoundError, KeyError):
except (json.JSONDecodeError, FileNotFoundError, KeyError, ValueError) as exc:
logger.warning("Failed to include standard in combined plot", file=str(standard_file), error=repr(exc))
continue
if not traces:
return {'error': 'No valid calibration standards found'}
return {"error": "No valid calibration standards found"}
# Add margin to Y range
y_margin = (y_max - y_min) * 0.1
y_min = max(y_min - y_margin, -120)
y_max = min(y_max + y_margin, 20)
ymargin = (y_max - y_min) * 0.1 if y_max > y_min else 10.0
y_min = max(y_min - ymargin, -120.0)
y_max = min(y_max + ymargin, 20.0)
return {
'data': traces,
'layout': {
'title': 'Calibration Standards Comparison',
'xaxis': {
'title': 'Frequency (GHz)',
'showgrid': True,
'gridcolor': '#e5e5e5',
'gridwidth': 1
fig = {
"data": traces,
"layout": {
"title": "Calibration Standards Comparison",
"xaxis": {
"title": "Frequency (GHz)",
"showgrid": True,
"gridcolor": "#e5e5e5",
"gridwidth": 1,
},
'yaxis': {
'title': 'Magnitude (dB)',
'range': [y_min, y_max],
'showgrid': True,
'gridcolor': '#e5e5e5',
'gridwidth': 1
"yaxis": {
"title": "Magnitude (dB)",
"range": [y_min, y_max],
"showgrid": True,
"gridcolor": "#e5e5e5",
"gridwidth": 1,
},
'plot_bgcolor': '#fafafa',
'paper_bgcolor': '#ffffff',
'font': {
'family': 'Arial, sans-serif',
'size': 12,
'color': '#333333'
},
'hovermode': 'x unified',
'showlegend': True,
'margin': {'l': 60, 'r': 40, 't': 60, 'b': 60}
}
}
"plot_bgcolor": "#fafafa",
"paper_bgcolor": "#ffffff",
"font": {"family": "Arial, sans-serif", "size": 12, "color": "#333333"},
"hovermode": "x unified",
"showlegend": True,
"margin": {"l": 60, "r": 40, "t": 60, "b": 60},
},
}
logger.debug("Combined standards plot generated", traces=len(traces))
return fig

106
vna_system/main.py Normal file
View File

@ -0,0 +1,106 @@
import logging
import os
from contextlib import asynccontextmanager
from pathlib import Path
import uvicorn
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
import vna_system.core.singletons as singletons
from vna_system.api.endpoints import acquisition, health, settings, web_ui
from vna_system.api.websockets import processing as ws_processing
from vna_system.core.config import API_HOST, API_PORT
from vna_system.core.logging.logger import get_component_logger, setup_logging
PROJECT_ROOT = Path(__file__).resolve().parents[1]
LOG_DIR = PROJECT_ROOT / "logs"
setup_logging(log_level=os.getenv("VNA_LOG_LEVEL", "INFO"), log_dir=LOG_DIR)
for noisy in (
"uvicorn.error",
"uvicorn.access",
):
logging.getLogger(noisy).setLevel(logging.ERROR)
logger = get_component_logger(__file__)
@asynccontextmanager
async def lifespan(app: FastAPI):
logger.info("Starting VNA API Server")
try:
logger.info("Starting data acquisition")
singletons.vna_data_acquisition_instance.start()
logger.info("Starting processor system")
singletons.processor_manager.start_processing()
logger.info(
"Processor system started",
processors=singletons.processor_manager.list_processors(),
)
logger.info("VNA API Server started successfully")
yield
except Exception as exc:
logger.error("Error during startup", error=repr(exc))
raise
logger.info("Shutting down VNA API Server")
if singletons.processor_manager:
singletons.processor_manager.stop_processing()
logger.info("Processor system stopped")
if getattr(singletons, "vna_data_acquisition_instance", None) and singletons.vna_data_acquisition_instance._running:
singletons.vna_data_acquisition_instance.stop()
logger.info("Acquisition stopped")
logger.info("VNA API Server shutdown complete")
app = FastAPI(
title="VNA System API",
description="Real-time VNA data acquisition and processing API",
version="1.0.0",
lifespan=lifespan,
)
WEB_UI_DIR = Path(__file__).parent / "web_ui"
STATIC_DIR = WEB_UI_DIR / "static"
if STATIC_DIR.exists():
app.mount("/static", StaticFiles(directory=str(STATIC_DIR)), name="static")
logger.info("Mounted static files", directory=str(STATIC_DIR))
else:
logger.warning("Static directory not found", directory=str(STATIC_DIR))
app.include_router(web_ui.router)
app.include_router(health.router)
app.include_router(acquisition.router)
app.include_router(settings.router)
app.include_router(ws_processing.router)
def main() -> None:
host = os.getenv("VNA_HOST", API_HOST)
port_env = os.getenv("VNA_PORT")
if port_env is not None:
try:
port = int(port_env)
except ValueError:
logger.warning("Invalid VNA_PORT, falling back to config", VNA_PORT=port_env)
port = API_PORT
else:
port = API_PORT
logger.info("Launching Uvicorn", host=host, port=port)
uvicorn.run(
"vna_system.main:app",
host=host,
port=port,
log_level="info",
reload=False,
)
if __name__ == "__main__":
main()

View File

@ -36,7 +36,7 @@ log_error() {
}
# Check if we're in the right directory
if [ ! -f "$PROJECT_ROOT/vna_system/api/main.py" ]; then
if [ ! -f "$PROJECT_ROOT/vna_system/main.py" ]; then
log_error "VNA System main.py not found. Please run this script from the project directory."
exit 1
fi
@ -89,6 +89,6 @@ log_info "Press Ctrl+C to stop the server"
echo
# Run the main application
exec python3 -m vna_system.api.main
exec python3 -m vna_system.main