This commit is contained in:
awe
2026-04-10 14:46:58 +03:00
parent 4dbedb48bc
commit 9aac162320
8 changed files with 227 additions and 22 deletions

View File

@ -71,8 +71,9 @@ def build_parser() -> argparse.ArgumentParser:
dest="bin_mode",
action="store_true",
help=(
"Бинарный протокол: старт свипа 0xFFFF,0xFFFF,0xFFFF,(CH<<8)|0x0A; "
"точки step,uint32(hi16,lo16),0x000A"
"8-байтный бинарный протокол: либо legacy старт "
"0xFFFF,0xFFFF,0xFFFF,(CH<<8)|0x0A и точки step,uint32(hi16,lo16),0x000A, "
"либо tty CH1/CH2 поток из kamil_adc в формате 0x000A,step,ch1_i16,ch2_i16"
),
)
parser.add_argument(

View File

@ -7,7 +7,7 @@ import sys
import threading
import time
from queue import Empty, Queue
from typing import Dict, List, Optional, Tuple
from typing import Dict, List, Optional, Sequence, Tuple
import numpy as np
@ -40,6 +40,7 @@ from rfg_adc_plotter.types import SweepAuxCurves, SweepInfo, SweepPacket
RAW_PLOT_MAX_POINTS = 4096
RAW_WATERFALL_MAX_POINTS = 2048
UI_MAX_PACKETS_PER_TICK = 8
DEBUG_FRAME_LOG_EVERY = 10
@ -242,6 +243,20 @@ def decimate_curve_for_display(
return x_arr[display_idx], y_arr[display_idx]
def coalesce_packets_for_ui(
packets: Sequence[SweepPacket],
*,
max_packets: int = UI_MAX_PACKETS_PER_TICK,
) -> Tuple[List[SweepPacket], int]:
"""Keep only the newest packets so a burst cannot starve the Qt event loop."""
packet_list = list(packets)
limit = max(1, int(max_packets))
if len(packet_list) <= limit:
return packet_list, 0
skipped = len(packet_list) - limit
return packet_list[-limit:], skipped
def resolve_visible_fft_curves(
fft_complex: Optional[np.ndarray],
fft_mag: Optional[np.ndarray],
@ -1244,6 +1259,7 @@ def run_pyqtgraph(args) -> None:
pass
processed_frames = 0
ui_frames_skipped = 0
ui_started_at = time.perf_counter()
def refresh_current_fft_cache(sweep_for_fft: np.ndarray, bins: int) -> None:
@ -1257,14 +1273,20 @@ def run_pyqtgraph(args) -> None:
runtime.current_fft_db = fft_mag_to_db(runtime.current_fft_mag)
def drain_queue() -> int:
nonlocal processed_frames
drained = 0
nonlocal processed_frames, ui_frames_skipped
pending_packets: List[SweepPacket] = []
while True:
try:
sweep, info, aux_curves = queue.get_nowait()
pending_packets.append(queue.get_nowait())
except Empty:
break
drained += 1
drained = len(pending_packets)
if drained <= 0:
return 0
pending_packets, skipped_packets = coalesce_packets_for_ui(pending_packets)
ui_frames_skipped += skipped_packets
for sweep, info, aux_curves in pending_packets:
base_freqs = np.linspace(SWEEP_FREQ_MIN_GHZ, SWEEP_FREQ_MAX_GHZ, sweep.size, dtype=np.float64)
runtime.full_current_aux_curves = None
runtime.full_current_fft_source = None
@ -1317,7 +1339,7 @@ def run_pyqtgraph(args) -> None:
elapsed_s = max(time.perf_counter() - ui_started_at, 1e-9)
frames_per_sec = float(processed_frames) / elapsed_s
sys.stderr.write(
"[debug] ui frames:%d rate:%.2f/s last_sweep:%s ch:%s width:%d queue:%d\n"
"[debug] ui frames:%d rate:%.2f/s last_sweep:%s ch:%s width:%d queue:%d dropped:%d\n"
% (
processed_frames,
frames_per_sec,
@ -1325,10 +1347,10 @@ def run_pyqtgraph(args) -> None:
str(info.get("ch") if isinstance(info, dict) else None),
int(getattr(sweep, "size", 0)),
int(queue_size),
int(ui_frames_skipped),
)
)
if drained > 0:
update_physical_axes()
update_physical_axes()
return drained
try:

View File

@ -32,6 +32,11 @@ def log_pair_to_sweep(avg_1: int, avg_2: int) -> float:
return abs(value_1 - value_2) * LOG_POSTSCALER
def tty_ch_pair_to_sweep(ch_1: int, ch_2: int) -> float:
"""Reduce a raw CH1/CH2 TTY point to a single sweep value."""
return float(abs(int(ch_1) - int(ch_2)))
class AsciiSweepParser:
"""Incremental parser for ASCII sweep streams."""
@ -139,7 +144,7 @@ class ComplexAsciiSweepParser:
class LegacyBinaryParser:
"""Byte-resynchronizing parser for legacy 8-byte binary records."""
"""Byte-resynchronizing parser for supported 8-byte binary record formats."""
def __init__(self):
self._buf = bytearray()
@ -158,6 +163,7 @@ class LegacyBinaryParser:
w0 = self._u16_at(self._buf, 0)
w1 = self._u16_at(self._buf, 2)
w2 = self._u16_at(self._buf, 4)
w3 = self._u16_at(self._buf, 6)
if w0 == 0xFFFF and w1 == 0xFFFF and w2 == 0xFFFF and self._buf[6] == 0x0A:
self._last_step = None
self._seen_points = False
@ -174,6 +180,29 @@ class LegacyBinaryParser:
events.append(PointEvent(ch=ch, x=int(w0), y=float(value)))
del self._buf[:8]
continue
if w0 == 0x000A and w1 == 0xFFFF and w2 == 0xFFFF and w3 == 0xFFFF:
self._last_step = None
self._seen_points = False
events.append(StartEvent(ch=0))
del self._buf[:8]
continue
if w0 == 0x000A and w1 != 0xFFFF:
if self._seen_points and self._last_step is not None and w1 <= self._last_step:
events.append(StartEvent(ch=0))
self._seen_points = True
self._last_step = int(w1)
ch_1 = u16_to_i16(w2)
ch_2 = u16_to_i16(w3)
events.append(
PointEvent(
ch=0,
x=int(w1),
y=tty_ch_pair_to_sweep(ch_1, ch_2),
aux=(float(ch_1), float(ch_2)),
)
)
del self._buf[:8]
continue
del self._buf[:1]
return events

View File

@ -22,6 +22,7 @@ from rfg_adc_plotter.types import ParserEvent, PointEvent, SweepPacket
_PARSER_16_BIT_X2_PROBE_BYTES = 64 * 1024
_LEGACY_STREAM_MIN_RECORDS = 32
_LEGACY_STREAM_MIN_MATCH_RATIO = 0.95
_TTY_STREAM_MIN_MATCH_RATIO = 0.60
_DEBUG_FRAME_LOG_EVERY = 10
@ -30,27 +31,42 @@ def _u16le_at(data: bytes, offset: int) -> int:
def _looks_like_legacy_8byte_stream(data: bytes) -> bool:
"""Heuristically detect the legacy 8-byte stream on an arbitrary byte offset."""
"""Heuristically detect supported 8-byte binary streams on an arbitrary byte offset."""
buf = bytes(data)
for offset in range(8):
blocks = (len(buf) - offset) // 8
if blocks < _LEGACY_STREAM_MIN_RECORDS:
continue
min_matches = max(_LEGACY_STREAM_MIN_RECORDS, int(blocks * _LEGACY_STREAM_MIN_MATCH_RATIO))
matched_steps: list[int] = []
matched_steps_legacy: list[int] = []
matched_steps_tty: list[int] = []
for block_idx in range(blocks):
base = offset + (block_idx * 8)
if (_u16le_at(buf, base + 6) & 0x00FF) != 0x000A:
w0 = _u16le_at(buf, base)
w1 = _u16le_at(buf, base + 2)
if w0 == 0x000A and w1 != 0xFFFF:
matched_steps_tty.append(w1)
continue
matched_steps.append(_u16le_at(buf, base))
if len(matched_steps) < min_matches:
continue
monotonic_or_reset = 0
for prev_step, next_step in zip(matched_steps, matched_steps[1:]):
if next_step == (prev_step + 1) or next_step <= prev_step:
monotonic_or_reset += 1
if monotonic_or_reset >= max(4, len(matched_steps) - 4):
return True
matched_steps_legacy.append(_u16le_at(buf, base))
if len(matched_steps_legacy) >= min_matches:
monotonic_or_reset = 0
for prev_step, next_step in zip(matched_steps_legacy, matched_steps_legacy[1:]):
if next_step == (prev_step + 1) or next_step <= prev_step:
monotonic_or_reset += 1
if monotonic_or_reset >= max(4, len(matched_steps_legacy) - 4):
return True
tty_min_matches = max(_LEGACY_STREAM_MIN_RECORDS, int(blocks * _TTY_STREAM_MIN_MATCH_RATIO))
if len(matched_steps_tty) >= tty_min_matches:
monotonic_or_reset = 0
for prev_step, next_step in zip(matched_steps_tty, matched_steps_tty[1:]):
if next_step == (prev_step + 1) or next_step <= 2:
monotonic_or_reset += 1
if monotonic_or_reset >= max(4, len(matched_steps_tty) - 4):
return True
return False