From 75b8810430ca2cd5718d6eff0cad97fd3d117d35 Mon Sep 17 00:00:00 2001 From: Alexander Poletaev Date: Thu, 29 Jan 2026 16:55:33 +0300 Subject: [PATCH] Rewrite all project for obd2 support --- can_sniffer/TECHNICAL_SPECIFICATION.md | 723 ++++++++++++++++ can_sniffer/config.json.example | 67 ++ can_sniffer/src/config.py | 500 ++++++----- can_sniffer/src/flipper/pages/__init__.py | 5 +- can_sniffer/src/flipper/pages/can_stats.py | 68 -- can_sniffer/src/flipper/pages/obd2_stats.py | 104 +++ can_sniffer/src/flipper/providers/__init__.py | 6 +- .../src/flipper/providers/can_provider.py | 135 --- .../src/flipper/providers/obd2_provider.py | 137 +++ can_sniffer/src/handlers/__init__.py | 9 +- can_sniffer/src/handlers/flipper_handler.py | 560 ++++-------- .../src/handlers/postgresql_handler.py | 151 ---- can_sniffer/src/handlers/realtime_handler.py | 114 +++ can_sniffer/src/handlers/storage_handler.py | 264 +++--- can_sniffer/src/main.py | 413 +++++++-- can_sniffer/src/obd2/__init__.py | 29 + can_sniffer/src/obd2/pids.py | 619 ++++++++++++++ can_sniffer/src/obd2/poller.py | 447 ++++++++++ can_sniffer/src/obd2/protocol.py | 316 +++++++ can_sniffer/src/obd2/response_matcher.py | 342 ++++++++ can_sniffer/src/obd2/transceiver.py | 370 ++++++++ .../src/postgresql_handler/__init__.py | 12 - .../postgresql_handler/postgresql_client.py | 759 ---------------- can_sniffer/src/socket_can/__init__.py | 7 - .../src/socket_can/message_processor.py | 434 ---------- can_sniffer/src/socket_can/src.py | 411 --------- can_sniffer/src/storage/storage.py | 807 ++++++++++++------ can_sniffer/src/vehicle/__init__.py | 14 + can_sniffer/src/vehicle/state.py | 283 ++++++ can_sniffer/src/vehicle/state_manager.py | 324 +++++++ 30 files changed, 5327 insertions(+), 3103 deletions(-) create mode 100644 can_sniffer/TECHNICAL_SPECIFICATION.md create mode 100644 can_sniffer/config.json.example delete mode 100644 can_sniffer/src/flipper/pages/can_stats.py create mode 100644 can_sniffer/src/flipper/pages/obd2_stats.py delete mode 100644 can_sniffer/src/flipper/providers/can_provider.py create mode 100644 can_sniffer/src/flipper/providers/obd2_provider.py delete mode 100644 can_sniffer/src/handlers/postgresql_handler.py create mode 100644 can_sniffer/src/handlers/realtime_handler.py create mode 100644 can_sniffer/src/obd2/__init__.py create mode 100644 can_sniffer/src/obd2/pids.py create mode 100644 can_sniffer/src/obd2/poller.py create mode 100644 can_sniffer/src/obd2/protocol.py create mode 100644 can_sniffer/src/obd2/response_matcher.py create mode 100644 can_sniffer/src/obd2/transceiver.py delete mode 100644 can_sniffer/src/postgresql_handler/__init__.py delete mode 100644 can_sniffer/src/postgresql_handler/postgresql_client.py delete mode 100644 can_sniffer/src/socket_can/__init__.py delete mode 100644 can_sniffer/src/socket_can/message_processor.py delete mode 100644 can_sniffer/src/socket_can/src.py create mode 100644 can_sniffer/src/vehicle/__init__.py create mode 100644 can_sniffer/src/vehicle/state.py create mode 100644 can_sniffer/src/vehicle/state_manager.py diff --git a/can_sniffer/TECHNICAL_SPECIFICATION.md b/can_sniffer/TECHNICAL_SPECIFICATION.md new file mode 100644 index 0000000..5459f84 --- /dev/null +++ b/can_sniffer/TECHNICAL_SPECIFICATION.md @@ -0,0 +1,723 @@ +# Техническое задание + +## Модернизация CAN Sniffer → OBD2 Onboard Computer + +**Версия:** 1.0 +**Дата:** 2025-01-29 +**Проект:** carpibord/can_sniffer + +--- + +## 1. Введение + +### 1.1 Цель документа + +Данное техническое задание описывает требования к модернизации существующего проекта `can_sniffer` из пассивного слушателя CAN-шины в полноценный OBD2 клиент для бортового компьютера. + +### 1.2 Текущее состояние + +**Архитектура can_sniffer (AS-IS):** + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ CAN Sniffer (текущий) │ +├─────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ +│ │ CANBusHandler│─────►│MessageProcessor│──►│ Handlers │ │ +│ │ (читает) │ │ (очередь) │ │ (обработка) │ │ +│ └─────────────┘ └─────────────┘ └─────────────┘ │ +│ │ │ │ +│ ▼ ▼ │ +│ ┌─────────────┐ ┌─────────────┐ │ +│ │ CAN Bus │ │ SQLite │ │ +│ │ (только RX)│ │ PostgreSQL │ │ +│ └─────────────┘ └─────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────┘ +``` + +**Ключевые характеристики:** +- Пассивный режим (только чтение) +- Сохранение сырых CAN-фреймов +- Нет понимания OBD2 протокола +- Нет отправки запросов +- Нет парсинга данных + +### 1.3 Целевое состояние + +**Архитектура OBD2 Client (TO-BE):** + +``` +┌─────────────────────────────────────────────────────────────────────────┐ +│ OBD2 Onboard Computer (целевой) │ +├─────────────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ +│ │ OBD2 Poller │────►│ CAN TX/RX │◄────│ Response │ │ +│ │ (запросы) │ │ Interface │ │ Matcher │ │ +│ └──────────────┘ └──────────────┘ └──────────────┘ │ +│ │ │ │ │ +│ ▼ ▼ ▼ │ +│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ +│ │ PID Registry │ │ CAN Bus │ │ PID Decoder │ │ +│ │ (конфиг) │ │ (TX + RX) │ │ (парсинг) │ │ +│ └──────────────┘ └──────────────┘ └──────────────┘ │ +│ │ │ +│ ┌─────────────────────┘ │ +│ ▼ │ +│ ┌──────────────────────────────────────────────────────────┐ │ +│ │ Data Pipeline │ │ +│ │ ┌────────────┐ ┌────────────┐ ┌────────────┐ │ │ +│ │ │ Storage │ │ Realtime │ │ WebSocket │ │ │ +│ │ │ Handler │ │ State │ │ Publisher │ │ │ +│ │ └────────────┘ └────────────┘ └────────────┘ │ │ +│ └──────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +--- + +## 2. Анализ текущей кодовой базы + +### 2.1 Сильные стороны (сохранить) + +| Компонент | Описание | Рекомендация | +|-----------|----------|--------------| +| `CANFrame` | Иммутабельная модель CAN-сообщения | ✅ Сохранить без изменений | +| `BaseHandler` | Абстрактный интерфейс обработчиков | ✅ Расширить для OBD2 | +| `MessageProcessor` | Очередь с backpressure | ✅ Адаптировать для request/response | +| `Storage` | SQLite с WAL и batch-операциями | ✅ Расширить схему для OBD2 | +| `Config (Pydantic)` | Валидация конфигурации | ✅ Добавить OBD2 секцию | +| `Logger` | Структурированное логирование | ✅ Без изменений | + +### 2.2 Требующие модификации + +| Компонент | Текущее | Требуемое | +|-----------|---------|-----------| +| `CANBusHandler` | Только RX | TX + RX с корреляцией | +| `CANSniffer` | Пассивный режим | Активный polling | +| Handlers | Сохранение сырых данных | Парсинг + сохранение значений | + +### 2.3 Новые компоненты + +| Компонент | Назначение | +|-----------|------------| +| `OBD2Poller` | Периодическая отправка OBD2 запросов | +| `OBD2Protocol` | Кодирование/декодирование OBD2 | +| `PIDRegistry` | Реестр поддерживаемых PIDs | +| `ResponseMatcher` | Сопоставление запросов и ответов | +| `VehicleState` | Актуальное состояние автомобиля | +| `RealtimeHandler` | Обновление in-memory состояния | + +--- + +## 3. Функциональные требования + +### 3.1 OBD2 Protocol Layer + +#### FR-3.1.1 Поддержка стандарта ISO 15765-4 (CAN) + +**Требования:** +- Отправка запросов на CAN ID `0x7DF` (broadcast) или `0x7E0` (физический адрес ECU) +- Приём ответов с CAN ID `0x7E8` - `0x7EF` +- Поддержка Single Frame (SF) для коротких сообщений +- Поддержка Multi Frame (FF/CF) для VIN и длинных ответов + +**Формат Single Frame запроса:** +``` +Byte 0: Length (0x02 для Mode 01) +Byte 1: Mode (0x01 = Current Data) +Byte 2: PID +Bytes 3-7: Padding (0x00) +``` + +**Формат Single Frame ответа:** +``` +Byte 0: Length +Byte 1: Mode + 0x40 (0x41 для Mode 01) +Byte 2: PID +Bytes 3+: Data +``` + +#### FR-3.1.2 Поддерживаемые режимы (Modes) + +| Mode | Название | Приоритет | Описание | +|------|----------|-----------|----------| +| 01 | Current Data | P0 (обязательно) | Текущие значения датчиков | +| 03 | Stored DTCs | P1 | Сохранённые коды ошибок | +| 09 | Vehicle Info | P1 | VIN, Calibration ID | +| 04 | Clear DTCs | P2 | Очистка ошибок | + +#### FR-3.1.3 Минимальный набор PIDs (Mode 01) + +**Обязательные (P0):** + +| PID | Название | Формула | Единицы | +|-----|----------|---------|---------| +| 00 | Supported PIDs | Bitmap | - | +| 0C | Engine RPM | (A×256+B)/4 | rpm | +| 0D | Vehicle Speed | A | km/h | +| 05 | Coolant Temp | A-40 | °C | +| 04 | Engine Load | A×100/255 | % | +| 11 | Throttle Position | A×100/255 | % | +| 2F | Fuel Level | A×100/255 | % | + +**Желательные (P1):** + +| PID | Название | Формула | Единицы | +|-----|----------|---------|---------| +| 0F | Intake Air Temp | A-40 | °C | +| 10 | MAF Flow | (A×256+B)/100 | g/s | +| 46 | Ambient Temp | A-40 | °C | +| 5C | Oil Temp | A-40 | °C | +| 5E | Fuel Rate | (A×256+B)/20 | L/h | +| 1F | Run Time | A×256+B | s | + +### 3.2 Polling Engine + +#### FR-3.2.1 Конфигурируемый polling + +```python +# Пример конфигурации +{ + "obd2": { + "enabled": true, + "polling_groups": [ + { + "name": "fast", + "interval_ms": 100, + "pids": ["0C", "0D", "11"] # RPM, Speed, Throttle + }, + { + "name": "medium", + "interval_ms": 1000, + "pids": ["05", "04", "2F"] # Coolant, Load, Fuel + }, + { + "name": "slow", + "interval_ms": 5000, + "pids": ["46", "5C", "0F"] # Temps + } + ], + "request_timeout_ms": 100, + "retry_count": 2 + } +} +``` + +#### FR-3.2.2 Request/Response корреляция + +**Требования:** +- Каждый запрос имеет уникальный correlation ID +- Таймаут ожидания ответа (configurable) +- Retry логика при отсутствии ответа +- Метрики: latency, success rate, timeouts + +### 3.3 Data Storage + +#### FR-3.3.1 Расширение SQLite схемы + +**Новая таблица `obd2_readings`:** + +```sql +CREATE TABLE obd2_readings ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + timestamp_ns INTEGER NOT NULL, + pid TEXT NOT NULL, + pid_name TEXT, + raw_value BLOB, + decoded_value REAL, + unit TEXT, + is_valid BOOLEAN DEFAULT TRUE, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX idx_obd2_timestamp ON obd2_readings(timestamp_ns); +CREATE INDEX idx_obd2_pid ON obd2_readings(pid); +``` + +**Новая таблица `obd2_sessions`:** + +```sql +CREATE TABLE obd2_sessions ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + started_at TIMESTAMP NOT NULL, + ended_at TIMESTAMP, + vin TEXT, + total_distance_km REAL, + avg_speed_kmh REAL, + max_speed_kmh REAL, + fuel_consumed_l REAL +); +``` + +#### FR-3.3.2 Режимы хранения + +| Режим | Описание | Retention | +|-------|----------|-----------| +| `full` | Все readings | 7 дней | +| `aggregated` | Средние за интервал | 30 дней | +| `events_only` | Только аномалии | 90 дней | + +### 3.4 Realtime State + +#### FR-3.4.1 In-memory состояние автомобиля + +```python +@dataclass +class VehicleState: + """Актуальное состояние автомобиля в памяти.""" + # Обновляется при каждом успешном ответе + timestamp: float + + # Двигатель + rpm: float = 0 + engine_load: float = 0 + coolant_temp: float = 0 + oil_temp: float = 0 + + # Движение + speed: float = 0 + throttle_pos: float = 0 + + # Топливо + fuel_level: float = 0 + fuel_rate: float = 0 + + # Мета + vin: str = "" + dtc_count: int = 0 + + # Состояние связи + ecu_connected: bool = False + last_response_time: float = 0 +``` + +#### FR-3.4.2 Подписка на изменения + +```python +class VehicleStateManager: + def subscribe(self, callback: Callable[[str, Any], None]) -> None: + """Подписаться на изменения параметров.""" + pass + + def get_current_state(self) -> VehicleState: + """Получить текущее состояние.""" + pass +``` + +--- + +## 4. Нефункциональные требования + +### 4.1 Производительность + +| Метрика | Требование | +|---------|------------| +| Polling latency | < 50ms (95 percentile) | +| State update delay | < 100ms | +| Memory footprint | < 100MB RSS | +| CPU usage (idle) | < 5% | +| CPU usage (polling) | < 15% | + +### 4.2 Надёжность + +| Требование | Описание | +|------------|----------| +| ECU disconnect | Автоматическое reconnect с backoff | +| Data integrity | Транзакции SQLite, fsync | +| Graceful shutdown | Сохранение всех pending данных | +| Error isolation | Ошибка одного PID не блокирует другие | + +### 4.3 Совместимость + +| Аспект | Требование | +|--------|------------| +| Python | 3.11+ | +| OS | Linux (Raspberry Pi OS) | +| CAN Hardware | SocketCAN совместимые (MCP2515, MCP251xFD) | +| OBD2 Protocol | ISO 15765-4 CAN (500 kbps) | + +--- + +## 5. Архитектура решения + +### 5.1 Структура каталогов (целевая) + +``` +can_sniffer/ +├── src/ +│ ├── main.py +│ ├── config.py # + OBD2Config +│ ├── can_frame.py # без изменений +│ ├── logger.py # без изменений +│ │ +│ ├── socket_can/ +│ │ ├── src.py # CANBusHandler + TX +│ │ ├── message_processor.py # адаптация +│ │ └── can_transceiver.py # НОВЫЙ: TX/RX с корреляцией +│ │ +│ ├── obd2/ # НОВЫЙ модуль +│ │ ├── __init__.py +│ │ ├── protocol.py # OBD2 encoding/decoding +│ │ ├── pids.py # PID registry +│ │ ├── poller.py # Polling engine +│ │ ├── response_matcher.py # Request/Response correlation +│ │ └── multi_frame.py # Multi-frame support +│ │ +│ ├── vehicle/ # НОВЫЙ модуль +│ │ ├── __init__.py +│ │ ├── state.py # VehicleState dataclass +│ │ └── state_manager.py # State management + subscriptions +│ │ +│ ├── handlers/ +│ │ ├── base.py # без изменений +│ │ ├── storage_handler.py # расширить для obd2_readings +│ │ ├── postgresql_handler.py # расширить схему +│ │ ├── flipper_handler.py # адаптировать вывод +│ │ └── realtime_handler.py # НОВЫЙ: обновление VehicleState +│ │ +│ └── storage/ +│ └── storage.py # + новые таблицы +│ +├── deploy/ +│ └── ... +│ +└── tests/ + ├── test_obd2_protocol.py + ├── test_poller.py + └── test_pid_decoder.py +``` + +### 5.2 Диаграмма потоков данных + +``` +┌─────────────────────────────────────────────────────────────────────────┐ +│ Main Thread │ +│ ┌─────────────┐ │ +│ │ OBD2Poller │──── Polling Timer ────┐ │ +│ └─────────────┘ │ │ +│ ▼ │ +│ ┌─────────────────┐ │ +│ │ CANTransceiver │ │ +│ │ │ │ +│ │ TX Queue ────► │──── CAN Bus │ +│ │ RX Handler ◄── │◄─── CAN Bus │ +│ └────────┬────────┘ │ +│ │ │ +│ ▼ │ +│ ┌─────────────────┐ │ +│ │ ResponseMatcher │ │ +│ │ │ │ +│ │ Pending: { │ │ +│ │ req_id: PID │ │ +│ │ } │ │ +│ └────────┬────────┘ │ +│ │ │ +│ ▼ │ +│ ┌─────────────────┐ │ +│ │ PID Decoder │ │ +│ │ │ │ +│ │ 0x0C → 3000 rpm │ │ +│ └────────┬────────┘ │ +│ │ │ +│ ┌────────────────────────┼────────────────────────┐ │ +│ ▼ ▼ ▼ │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ +│ │ Realtime │ │ Storage │ │ WebSocket │ │ +│ │ Handler │ │ Handler │ │ Publisher │ │ +│ │ │ │ │ │ (future) │ │ +│ │ VehicleState│ │ SQLite │ │ │ │ +│ └─────────────┘ └─────────────┘ └─────────────┘ │ +│ │ +└────────────────────────────────────────────────────────────────────────┘ +``` + +### 5.3 Последовательность обработки + +``` +┌──────────┐ ┌───────────┐ ┌─────────┐ ┌─────────┐ ┌─────────┐ +│ Poller │ │Transceiver│ │ ECU │ │ Matcher │ │ Decoder │ +└────┬─────┘ └─────┬─────┘ └────┬────┘ └────┬────┘ └────┬────┘ + │ │ │ │ │ + │ request(0x0C) │ │ │ │ + │────────────────►│ │ │ │ + │ │ │ │ │ + │ │ TX: 7DF#02010C │ │ │ + │ │───────────────►│ │ │ + │ │ │ │ │ + │ │ register(0x0C) │ │ │ + │ │───────────────────────────────►│ │ + │ │ │ │ │ + │ │ │ RX: 7E8#04410C2EE0 │ + │ │◄───────────────│ │ │ + │ │ │ │ │ + │ │ match(7E8, 0x0C) │ │ + │ │───────────────────────────────►│ │ + │ │ │ │ │ + │ │ │ matched(data) │ + │ │◄──────────────────────────────│ │ + │ │ │ │ │ + │ │ decode(0x0C, data) │ + │ │──────────────────────────────────────────────►│ + │ │ │ │ │ + │ │ │ │ 3000 rpm │ + │ │◄─────────────────────────────────────────────│ + │ │ │ │ │ + │ callback(rpm=3000) │ │ │ + │◄────────────────│ │ │ │ + │ │ │ │ │ +``` + +--- + +## 6. План реализации + +### 6.1 Фазы разработки + +#### Фаза 1: Core OBD2 (1-2 недели) + +**Задачи:** +- [ ] Создать модуль `obd2/protocol.py` — encoding/decoding +- [ ] Создать `obd2/pids.py` — реестр PIDs с формулами +- [ ] Модифицировать `CANBusHandler` для TX +- [ ] Создать `CANTransceiver` — unified TX/RX +- [ ] Написать unit-тесты для protocol и pids + +**Definition of Done:** +- Можно отправить OBD2 запрос и получить ответ +- Ответ корректно декодируется + +#### Фаза 2: Polling Engine (1 неделя) + +**Задачи:** +- [ ] Создать `obd2/poller.py` — polling с группами +- [ ] Создать `obd2/response_matcher.py` — корреляция +- [ ] Добавить таймауты и retry логику +- [ ] Интегрировать с конфигурацией + +**Definition of Done:** +- Автоматический опрос PIDs по расписанию +- Метрики latency и success rate + +#### Фаза 3: Data Layer (1 неделя) + +**Задачи:** +- [ ] Расширить SQLite схему +- [ ] Создать `vehicle/state.py` — VehicleState +- [ ] Создать `vehicle/state_manager.py` — управление состоянием +- [ ] Создать `handlers/realtime_handler.py` +- [ ] Модифицировать `storage_handler.py` + +**Definition of Done:** +- Данные сохраняются в БД +- VehicleState обновляется в realtime + +#### Фаза 4: Integration & Testing (1 неделя) + +**Задачи:** +- [ ] Интеграция всех компонентов +- [ ] Тестирование с OBD2 эмулятором +- [ ] Тестирование на реальном автомобиле (осторожно!) +- [ ] Оптимизация производительности +- [ ] Документация + +**Definition of Done:** +- Система работает стабильно +- Все метрики в пределах требований + +### 6.2 Приоритеты задач + +``` +P0 (Must Have) +├── OBD2 Protocol encoding/decoding +├── Basic PID support (RPM, Speed, Coolant) +├── Single Frame TX/RX +├── Polling engine +└── SQLite storage + +P1 (Should Have) +├── Extended PIDs +├── Multi Frame support +├── PostgreSQL sync +├── Retry/timeout logic +└── Graceful error handling + +P2 (Nice to Have) +├── DTC reading +├── VIN parsing +├── Trip statistics +├── WebSocket publisher +└── Flipper Zero display +``` + +--- + +## 7. API изменения + +### 7.1 Новые классы + +```python +# obd2/protocol.py +class OBD2Request: + mode: int + pid: int + def to_can_frame(self) -> CANFrame: ... + +class OBD2Response: + mode: int + pid: int + data: bytes + + @classmethod + def from_can_frame(cls, frame: CANFrame) -> Optional['OBD2Response']: ... + +# obd2/pids.py +@dataclass +class PIDDefinition: + pid: int + name: str + unit: str + decoder: Callable[[bytes], float] + min_value: float + max_value: float + +class PIDRegistry: + def get(self, pid: int) -> Optional[PIDDefinition]: ... + def decode(self, pid: int, data: bytes) -> Optional[float]: ... + +# obd2/poller.py +class OBD2Poller: + def __init__(self, transceiver: CANTransceiver, config: OBD2Config): ... + def start(self) -> None: ... + def stop(self) -> None: ... + def request_pid(self, pid: int) -> Future[float]: ... + +# vehicle/state_manager.py +class VehicleStateManager: + def get_state(self) -> VehicleState: ... + def update(self, pid: int, value: float) -> None: ... + def subscribe(self, callback: Callable[[str, Any], None]) -> None: ... +``` + +### 7.2 Изменения конфигурации + +```python +# config.py - добавить +class OBD2Config(BaseModel): + enabled: bool = True + request_id: int = 0x7DF + response_id_start: int = 0x7E8 + response_id_end: int = 0x7EF + request_timeout_ms: int = 100 + retry_count: int = 2 + + polling_groups: list[PollingGroup] = [ + PollingGroup(name="fast", interval_ms=100, pids=["0C", "0D"]), + PollingGroup(name="medium", interval_ms=1000, pids=["05", "04"]), + ] + +class PollingGroup(BaseModel): + name: str + interval_ms: int + pids: list[str] + enabled: bool = True +``` + +--- + +## 8. Тестирование + +### 8.1 Unit Tests + +| Модуль | Тесты | +|--------|-------| +| `obd2/protocol.py` | Encoding/decoding всех режимов | +| `obd2/pids.py` | Декодирование каждого PID | +| `obd2/poller.py` | Timing, retry, timeout | +| `vehicle/state.py` | State updates, validation | + +### 8.2 Integration Tests + +| Сценарий | Описание | +|----------|----------| +| Emulator test | Полный цикл с OBD2 эмулятором | +| Reconnect test | Disconnect/reconnect ECU | +| High load test | 100+ PIDs/sec | +| Long run test | 24h stability | + +### 8.3 Тестовая среда + +```bash +# Запуск эмулятора +python obd2_emulator/src/main.py -i vcan0 -s city + +# Запуск модифицированного sniffer +python can_sniffer/src/main.py -i vcan0 +``` + +--- + +## 9. Риски и митигация + +| Риск | Вероятность | Влияние | Митигация | +|------|-------------|---------|-----------| +| ECU не отвечает на все PIDs | Высокая | Среднее | Discovery поддерживаемых PIDs при старте | +| Timing issues (CAN bus busy) | Средняя | Высокое | Adaptive polling, backoff | +| Memory leak при долгой работе | Низкая | Высокое | Periodic GC, memory limits | +| Data corruption при crash | Низкая | Высокое | WAL mode, transactions | + +--- + +## 10. Приложения + +### 10.1 Справочник PID формул + +```python +PID_FORMULAS = { + 0x04: lambda a: a * 100 / 255, # Engine Load % + 0x05: lambda a: a - 40, # Coolant Temp °C + 0x0C: lambda a, b: (a * 256 + b) / 4, # RPM + 0x0D: lambda a: a, # Speed km/h + 0x0F: lambda a: a - 40, # Intake Temp °C + 0x10: lambda a, b: (a * 256 + b) / 100, # MAF g/s + 0x11: lambda a: a * 100 / 255, # Throttle % + 0x1F: lambda a, b: a * 256 + b, # Run Time s + 0x2F: lambda a: a * 100 / 255, # Fuel Level % + 0x46: lambda a: a - 40, # Ambient Temp °C + 0x5C: lambda a: a - 40, # Oil Temp °C + 0x5E: lambda a, b: (a * 256 + b) / 20, # Fuel Rate L/h +} +``` + +### 10.2 CAN Frame Examples + +``` +# Запрос RPM +TX: 7DF # 02 01 0C 00 00 00 00 00 + │ │ │ + │ │ └── PID: 0x0C (RPM) + │ └───── Mode: 0x01 (Current Data) + └──────── Length: 2 bytes + +# Ответ RPM = 3000 +RX: 7E8 # 04 41 0C 2E E0 00 00 00 + │ │ │ └──┴── Data: (0x2E * 256 + 0xE0) / 4 = 3000 + │ │ └─────── PID: 0x0C + │ └────────── Mode + 0x40: 0x41 + └───────────── Length: 4 bytes +``` + +--- + +## 11. Контакты и согласования + +**Автор ТЗ:** Claude (AI Assistant) +**Дата создания:** 2025-01-29 +**Статус:** Draft + +--- + +*Данное техническое задание является основой для разработки. Детали реализации могут уточняться в процессе работы.* diff --git a/can_sniffer/config.json.example b/can_sniffer/config.json.example new file mode 100644 index 0000000..4af9442 --- /dev/null +++ b/can_sniffer/config.json.example @@ -0,0 +1,67 @@ +{ + "can": { + "interface": "can0", + "bitrate": 500000 + }, + "obd2": { + "enabled": true, + "request_id": 2015, + "response_id_start": 2024, + "response_id_end": 2031, + "request_timeout_ms": 100, + "retry_count": 2, + "auto_discover": true, + "polling_groups": [ + { + "name": "fast", + "interval_ms": 100, + "pids": ["0C", "0D", "11"], + "enabled": true + }, + { + "name": "medium", + "interval_ms": 1000, + "pids": ["05", "04", "2F", "0F"], + "enabled": true + }, + { + "name": "slow", + "interval_ms": 5000, + "pids": ["46", "5C", "1F"], + "enabled": true + } + ] + }, + "storage": { + "database_path": "obd2_data.db", + "wal_mode": true, + "sync_mode": "NORMAL", + "retention_days": 7, + "aggregation_retention_days": 30 + }, + "postgresql": { + "enabled": false, + "host": "localhost", + "port": 5432, + "database": "obd2_data", + "user": "postgres", + "password": "" + }, + "flipper": { + "enabled": false, + "device": "/dev/ttyAMA0", + "baudrate": 115200, + "update_interval": 0.5 + }, + "logging": { + "level": "INFO", + "file": "obd2_client.log", + "max_bytes": 10485760, + "backup_count": 5 + }, + "vehicle": { + "stale_threshold_s": 5.0, + "disconnect_timeout_s": 10.0, + "session_auto_start": true + } +} diff --git a/can_sniffer/src/config.py b/can_sniffer/src/config.py index a408550..9fbe580 100644 --- a/can_sniffer/src/config.py +++ b/can_sniffer/src/config.py @@ -1,8 +1,8 @@ """ -Модуль конфигурации для CAN Sniffer проекта. +Configuration module for OBD2 Client. -Использует pydantic-settings для типобезопасной конфигурации с валидацией -и поддержкой загрузки из файла и переменных окружения. +Uses pydantic-settings for type-safe configuration with validation +and support for loading from JSON files and environment variables. """ from pathlib import Path @@ -12,240 +12,285 @@ from pydantic import BaseModel, Field, field_validator from pydantic_settings import BaseSettings, SettingsConfigDict -class CanConfig(BaseModel): - """Конфигурация CAN интерфейсов.""" - +class CANConfig(BaseModel): + """CAN interface configuration for OBD2.""" + model_config = {"extra": "ignore"} - - interfaces: List[str] = Field( - default=["can0", "can1"], - description="Список CAN интерфейсов для мониторинга" - ) - listen_only: bool = Field( - default=True, - description="Режим только чтения (listen-only mode)" + + interface: str = Field( + default="can0", + description="CAN interface for OBD2 communication" ) bitrate: int = Field( - default=1000000, - description="Скорость передачи CAN (бит/с). Должна соответствовать настройкам интерфейса (ip link set canX type can bitrate X)" + default=500000, + description="CAN bus bitrate (500000 for standard OBD2)" ) - filters: List[dict] = Field( + + +class PollingGroupConfig(BaseModel): + """Configuration for a PID polling group.""" + + model_config = {"extra": "ignore"} + + name: str = Field( + description="Group identifier" + ) + interval_ms: int = Field( + default=100, + description="Polling interval in milliseconds" + ) + pids: List[str] = Field( default_factory=list, - description="Список фильтров SocketCAN: [{'can_id': 0x123, 'can_mask': 0x7FF}, ...]" + description="List of PIDs to poll (hex strings like '0C', '0D')" ) - - @field_validator('interfaces', mode='before') + enabled: bool = Field( + default=True, + description="Whether this group is active" + ) + + @field_validator('pids', mode='before') @classmethod - def parse_interfaces(cls, v): - """Парсинг интерфейсов из строки (для env переменных).""" + def parse_pids(cls, v): + """Parse PIDs from comma-separated string.""" if isinstance(v, str): - return [item.strip() for item in v.split(',')] + return [item.strip() for item in v.split(',') if item.strip()] return v -class StorageConfig(BaseModel): - """Конфигурация локального хранилища (SQLite).""" +class OBD2Config(BaseModel): + """OBD2 protocol configuration.""" model_config = {"extra": "ignore"} - type: str = Field( - default="sqlite", - description="Тип хранилища" + enabled: bool = Field( + default=True, + description="Enable OBD2 polling" ) + request_id: int = Field( + default=0x7DF, + description="CAN ID for OBD2 requests (0x7DF = broadcast)" + ) + response_id_start: int = Field( + default=0x7E8, + description="Start of OBD2 response CAN ID range" + ) + response_id_end: int = Field( + default=0x7EF, + description="End of OBD2 response CAN ID range" + ) + request_timeout_ms: int = Field( + default=100, + description="Timeout for OBD2 request in milliseconds" + ) + retry_count: int = Field( + default=2, + description="Number of retries for failed requests" + ) + auto_discover: bool = Field( + default=True, + description="Auto-discover supported PIDs on startup" + ) + polling_groups: List[PollingGroupConfig] = Field( + default_factory=lambda: [ + PollingGroupConfig( + name="fast", + interval_ms=100, + pids=["0C", "0D", "11"], # RPM, Speed, Throttle + ), + PollingGroupConfig( + name="medium", + interval_ms=1000, + pids=["05", "04", "2F", "0F"], # Coolant, Load, Fuel, Intake Temp + ), + PollingGroupConfig( + name="slow", + interval_ms=5000, + pids=["46", "5C", "1F"], # Ambient, Oil Temp, Runtime + ), + ], + description="Polling groups with different intervals" + ) + + +class StorageConfig(BaseModel): + """Local storage configuration (SQLite).""" + + model_config = {"extra": "ignore"} + database_path: str = Field( - default="can_offline.db", - description="Путь к файлу базы данных SQLite" + default="obd2_data.db", + description="Path to SQLite database file" ) wal_mode: bool = Field( default=True, - description="Включить режим WAL (Write-Ahead Logging)" + description="Enable WAL mode for concurrent access" ) sync_mode: str = Field( default="NORMAL", - description="Режим синхронизации: NORMAL, FULL, OFF" + description="Sync mode: NORMAL, FULL, OFF" ) retention_days: int = Field( default=7, - description="Дней хранения обработанных записей (для автоочистки)" + description="Days to keep detailed readings" + ) + aggregation_retention_days: int = Field( + default=30, + description="Days to keep aggregated data" ) class PostgreSQLConfig(BaseModel): - """Конфигурация PostgreSQL.""" - - model_config = {"extra": "ignore"} - - enabled: bool = Field( - default=True, - description="Включить отправку данных в PostgreSQL" - ) - host: str = Field( - default="localhost", - description="Хост PostgreSQL сервера" - ) - port: int = Field( - default=5432, - description="Порт PostgreSQL сервера" - ) - database: str = Field( - default="can_bus", - description="Имя базы данных" - ) - user: str = Field( - default="postgres", - description="Имя пользователя PostgreSQL" - ) - password: str = Field( - default="", - description="Пароль пользователя PostgreSQL" - ) - batch_size: int = Field( - default=1000, - description="Размер батча для отправки данных" - ) - flush_interval: int = Field( - default=5, - description="Интервал отправки батча (секунды)" - ) - max_retries: int = Field( - default=3, - description="Максимальное количество попыток повтора при ошибке" - ) - retry_backoff: float = Field( - default=1.0, - description="Базовый интервал backoff для повторов (секунды)" - ) - connection_pool_size: int = Field( - default=5, - description="Размер пула соединений" - ) - connection_timeout: int = Field( - default=10, - description="Таймаут подключения (секунды)" - ) - sync_interval: float = Field( - default=30.0, - description="Интервал синхронизации из SQLite в PostgreSQL (секунды)" - ) - - -class LoggingConfig(BaseModel): - """Конфигурация логирования.""" - - model_config = {"extra": "ignore"} - - level: str = Field( - default="INFO", - description="Уровень логирования: DEBUG, INFO, WARNING, ERROR, CRITICAL" - ) - format: str = Field( - default="%(asctime)s - %(name)s - %(levelname)s - %(message)s", - description="Формат логов" - ) - file: str = Field( - default="can_edge.log", - description="Имя файла для логов" - ) - max_bytes: int = Field( - default=10485760, - description="Максимальный размер файла лога (байты)" - ) - backup_count: int = Field( - default=5, - description="Количество резервных копий логов" - ) - - -class FlipperConfig(BaseModel): - """Конфигурация Flipper Zero UART.""" + """PostgreSQL configuration.""" model_config = {"extra": "ignore"} enabled: bool = Field( default=False, - description="Включить отправку статистики на Flipper Zero" + description="Enable PostgreSQL synchronization" ) - device: str = Field( - default="/dev/ttyAMA0", - description="UART устройство для подключения Flipper Zero" + host: str = Field( + default="localhost", + description="PostgreSQL server host" ) - baudrate: int = Field( - default=115200, - description="Скорость UART (бод)" + port: int = Field( + default=5432, + description="PostgreSQL server port" ) - send_interval: float = Field( - default=1.0, - description="Интервал отправки статистики (секунды)" + database: str = Field( + default="obd2_data", + description="Database name" ) - - -class GeneralConfig(BaseModel): - """Общие настройки.""" - - model_config = {"extra": "ignore"} - - buffer_size: int = Field( - default=10000, - description="Размер буфера для данных" + user: str = Field( + default="postgres", + description="PostgreSQL username" + ) + password: str = Field( + default="", + description="PostgreSQL password" ) batch_size: int = Field( - default=1000, - description="Размер батча для обработки сообщений" + default=100, + description="Batch size for sync operations" ) - batch_interval: float = Field( - default=0.1, - description="Интервал обработки батча (секунды)" + sync_interval: float = Field( + default=30.0, + description="Sync interval in seconds" + ) + connection_timeout: int = Field( + default=10, + description="Connection timeout in seconds" ) max_retries: int = Field( default=3, - description="Максимальное количество попыток повтора" + description="Maximum retry attempts" ) - retry_delay: float = Field( - default=1.0, - description="Задержка между попытками (секунды)" + + +class LoggingConfig(BaseModel): + """Logging configuration.""" + + model_config = {"extra": "ignore"} + + level: str = Field( + default="INFO", + description="Log level: DEBUG, INFO, WARNING, ERROR, CRITICAL" + ) + format: str = Field( + default="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + description="Log format" + ) + file: str = Field( + default="obd2_client.log", + description="Log file name" + ) + max_bytes: int = Field( + default=10485760, + description="Maximum log file size in bytes" + ) + backup_count: int = Field( + default=5, + description="Number of backup log files" + ) + + +class FlipperConfig(BaseModel): + """Flipper Zero UART configuration.""" + + model_config = {"extra": "ignore"} + + enabled: bool = Field( + default=False, + description="Enable Flipper Zero display" + ) + device: str = Field( + default="/dev/ttyAMA0", + description="UART device for Flipper Zero" + ) + baudrate: int = Field( + default=115200, + description="UART baudrate" + ) + update_interval: float = Field( + default=0.5, + description="Display update interval in seconds" + ) + + +class VehicleConfig(BaseModel): + """Vehicle state configuration.""" + + model_config = {"extra": "ignore"} + + stale_threshold_s: float = Field( + default=5.0, + description="Threshold in seconds to consider a value stale" + ) + disconnect_timeout_s: float = Field( + default=10.0, + description="Timeout to consider ECU disconnected" + ) + session_auto_start: bool = Field( + default=True, + description="Auto-start session when engine starts" ) class Config(BaseSettings): - """Главный класс конфигурации проекта.""" - + """Main configuration class for OBD2 Client.""" + model_config = SettingsConfigDict( - env_prefix="CAN_SNIFFER_", + env_prefix="OBD2_", env_nested_delimiter="__", case_sensitive=False, extra="ignore", ) - - can: CanConfig = Field(default_factory=CanConfig) + + can: CANConfig = Field(default_factory=CANConfig) + obd2: OBD2Config = Field(default_factory=OBD2Config) storage: StorageConfig = Field(default_factory=StorageConfig) postgresql: PostgreSQLConfig = Field(default_factory=PostgreSQLConfig) - flipper: FlipperConfig = Field(default_factory=FlipperConfig) logging: LoggingConfig = Field(default_factory=LoggingConfig) - general: GeneralConfig = Field(default_factory=GeneralConfig) - + flipper: FlipperConfig = Field(default_factory=FlipperConfig) + vehicle: VehicleConfig = Field(default_factory=VehicleConfig) + @classmethod def _find_config_file(cls) -> Optional[Path]: - """Поиск конфигурационного файла.""" - # Определяем правильный путь к корню проекта can_sniffer - # __file__ = can_sniffer/src/config.py - # parent = can_sniffer/src - # parent.parent = can_sniffer + """Find configuration file.""" project_root = Path(__file__).parent.parent - + config_paths = [ - project_root / "config.json", # can_sniffer/config.json - Path(__file__).parent / "config.json", # can_sniffer/src/config.json - Path.home() / ".can_sniffer" / "config.json", + project_root / "config.json", + Path(__file__).parent / "config.json", + Path.home() / ".obd2_client" / "config.json", ] - + for config_path in config_paths: if config_path.exists(): return config_path return None - + def __init__(self, **kwargs): - """Инициализация конфигурации с загрузкой из JSON файла.""" - # Если kwargs пусты, пытаемся загрузить из файла + """Initialize configuration with JSON file loading.""" if not kwargs: config_file = self._find_config_file() if config_file: @@ -253,37 +298,23 @@ class Config(BaseSettings): try: with open(config_file, 'r', encoding='utf-8') as f: json_data = json.load(f) - - # Передаем данные из JSON в super().__init__() - # Pydantic автоматически создаст вложенные объекты CanConfig, StorageConfig и т.д. super().__init__(**json_data) return except Exception as e: - # Если не удалось загрузить JSON, выводим предупреждение import warnings - import traceback warnings.warn( - f"Failed to load config from {config_file}: {e}\n" - f"Traceback: {traceback.format_exc()}\n" + f"Failed to load config from {config_file}: {e}. " f"Using defaults." ) - - # Инициализация с переданными kwargs или defaults + super().__init__(**kwargs) - + @classmethod def load_from_file(cls, file_path: Optional[Path] = None) -> 'Config': - """Загрузка конфигурации из указанного файла или поиск автоматически. - - Args: - file_path: Путь к конфигурационному файлу. Если None, выполняется поиск. - - Returns: - Экземпляр Config - """ + """Load configuration from specified file or auto-find.""" if file_path is None: file_path = cls._find_config_file() - + if file_path and file_path.exists(): import json try: @@ -293,26 +324,14 @@ class Config(BaseSettings): except Exception as e: import warnings warnings.warn(f"Failed to load config from {file_path}: {e}") - + return cls() - + def get(self, key_path: str, default=None): - """Получение значения конфигурации по пути через точку. - - Args: - key_path: Путь к значению через точку, например 'can.interfaces' - default: Значение по умолчанию, если ключ не найден - - Returns: - Значение конфигурации или default - - Example: - >>> config.get('can.interfaces') - ['can0', 'can1'] - """ + """Get configuration value by dot-separated path.""" keys = key_path.split('.') current = self - + for key in keys: if hasattr(current, key): current = getattr(current, key) @@ -320,45 +339,20 @@ class Config(BaseSettings): current = current[key] else: return default - + return current - + def get_section(self, section: str): - """Получение всей секции конфигурации. - - Args: - section: Имя секции, например 'can', 'postgresql' - - Returns: - Объект конфигурации секции - - Example: - >>> can_config = config.get_section('can') - >>> print(can_config.interfaces) - """ + """Get configuration section by name.""" return getattr(self, section, None) -# Глобальный экземпляр конфигурации (singleton) +# Global configuration instance (singleton) _config_instance: Optional[Config] = None def get_config(reload: bool = False) -> Config: - """Получение глобального экземпляра конфигурации. - - Args: - reload: Если True, перезагружает конфигурацию из файла - - Returns: - Экземпляр Config - - Example: - >>> from config import get_config - >>> config = get_config() - >>> interfaces = config.can.interfaces - >>> # Перезагрузить конфигурацию после изменения файла - >>> config = get_config(reload=True) - """ + """Get global configuration instance.""" global _config_instance if _config_instance is None or reload: _config_instance = Config() @@ -366,48 +360,30 @@ def get_config(reload: bool = False) -> Config: def reload_config() -> Config: - """Перезагрузка конфигурации из файла. - - Returns: - Перезагруженный экземпляр Config - - Example: - >>> from config import reload_config - >>> config = reload_config() - """ + """Reload configuration from file.""" return get_config(reload=True) -# Для обратной совместимости и удобства -# Используем прокси для автоматического доступа к актуальной конфигурации class _ConfigProxy: - """Прокси для глобального доступа к конфигурации с поддержкой перезагрузки.""" - + """Proxy for global configuration access with reload support.""" + def __getattr__(self, name): - """Делегирование доступа к атрибутам конфигурации.""" - # Всегда получаем актуальный экземпляр конфигурации return getattr(get_config(), name) - + def reload(self): - """Перезагрузка конфигурации из файла.""" global _config_instance - _config_instance = None # Сбрасываем singleton + _config_instance = None return reload_config() - + def __repr__(self): - """Строковое представление прокси.""" return f"ConfigProxy({get_config()})" - - # Поддержка прямого доступа к методам Config + def get(self, key_path: str, default=None): - """Получение значения по пути.""" return get_config().get(key_path, default) - + def get_section(self, section: str): - """Получение секции конфигурации.""" return get_config().get_section(section) -# Глобальный прокси для удобного доступа -# ВАЖНО: После изменения config.json нужно вызвать config.reload() или перезапустить приложение +# Global proxy for convenient access config = _ConfigProxy() diff --git a/can_sniffer/src/flipper/pages/__init__.py b/can_sniffer/src/flipper/pages/__init__.py index 2a8e6ab..5795f07 100644 --- a/can_sniffer/src/flipper/pages/__init__.py +++ b/can_sniffer/src/flipper/pages/__init__.py @@ -6,7 +6,7 @@ and handles user actions. """ from flipper.pages.base import BasePage -from flipper.pages.can_stats import CANStatsPage +from flipper.pages.obd2_stats import OBD2StatsPage, OBD2CommPage from flipper.pages.ups_status import UPSStatusPage from flipper.pages.system_info import SystemInfoPage from flipper.pages.actions import ActionsPage @@ -14,7 +14,8 @@ from flipper.pages.app_status import AppStatusPage __all__ = [ "BasePage", - "CANStatsPage", + "OBD2StatsPage", + "OBD2CommPage", "UPSStatusPage", "SystemInfoPage", "ActionsPage", diff --git a/can_sniffer/src/flipper/pages/can_stats.py b/can_sniffer/src/flipper/pages/can_stats.py deleted file mode 100644 index e2f65da..0000000 --- a/can_sniffer/src/flipper/pages/can_stats.py +++ /dev/null @@ -1,68 +0,0 @@ -""" -CAN Statistics Page. - -Displays CAN sniffer statistics on Flipper Zero. -""" - -from flipper.pages.base import InfoPage -from flipper.providers.can_provider import CANProvider - - -class CANStatsPage(InfoPage): - """ - Page displaying CAN bus statistics. - - Shows: - - Total frames received - - Pending/processed frames - - Queue status - - Dropped frames (if any) - """ - - def __init__(self): - super().__init__( - name="can_stats", - title="CAN Statistics", - icon="can" - ) - self._provider = CANProvider() - - def get_lines(self) -> list[str]: - """Get statistics lines for display.""" - # Force refresh to get fresh data - self._provider.refresh() - data = self._provider.get_data() - - lines = [ - f"Total: {self._format_number(data.total_frames)}", - f"Processed: {self._format_number(data.processed_frames)}", - f"Queue: {data.queue_size}/{data.queue_capacity}", - ] - - # Add interface breakdown if available - if data.interfaces: - iface_str = ", ".join( - f"{k}: {self._format_number(v)}" - for k, v in data.interfaces.items() - ) - if len(iface_str) <= 25: - lines.append(iface_str) - - # Show dropped count if non-zero - if data.dropped_frames > 0: - lines.append(f"Dropped: {data.dropped_frames}") - - return lines - - def _format_number(self, num: int) -> str: - """Format large numbers with K/M suffix.""" - if num >= 1_000_000: - return f"{num / 1_000_000:.1f}M" - elif num >= 1_000: - return f"{num / 1_000:.1f}K" - else: - return str(num) - - def get_provider(self) -> CANProvider: - """Get the CAN provider instance.""" - return self._provider diff --git a/can_sniffer/src/flipper/pages/obd2_stats.py b/can_sniffer/src/flipper/pages/obd2_stats.py new file mode 100644 index 0000000..d4baa91 --- /dev/null +++ b/can_sniffer/src/flipper/pages/obd2_stats.py @@ -0,0 +1,104 @@ +""" +OBD2 Statistics Page. + +Displays OBD2 vehicle data on Flipper Zero. +""" + +from flipper.pages.base import InfoPage +from flipper.providers.obd2_provider import OBD2Provider + + +class OBD2StatsPage(InfoPage): + """ + Page displaying OBD2 vehicle statistics. + + Shows: + - RPM and Speed + - Coolant and Oil temperature + - Throttle and Engine load + - Fuel level + """ + + def __init__(self): + super().__init__( + name="obd2_stats", + title="OBD2 Data", + icon="car" + ) + self._provider = OBD2Provider() + + def get_lines(self) -> list[str]: + """Get OBD2 data lines for display.""" + self._provider.refresh() + data = self._provider.get_data() + + lines = [] + + # Connection status + if data.ecu_connected: + status = "ECU: OK" + else: + status = "ECU: ---" + lines.append(status) + + # Engine data - RPM and Speed on same line + rpm_str = f"{data.rpm:.0f}" if data.rpm is not None else "---" + spd_str = f"{data.speed:.0f}" if data.speed is not None else "---" + lines.append(f"RPM:{rpm_str} SPD:{spd_str}km/h") + + # Temperatures + cool_str = f"{data.coolant_temp:.0f}" if data.coolant_temp is not None else "--" + oil_str = f"{data.oil_temp:.0f}" if data.oil_temp is not None else "--" + lines.append(f"Cool:{cool_str}C Oil:{oil_str}C") + + # Throttle and Load + thr_str = f"{data.throttle:.0f}" if data.throttle is not None else "--" + load_str = f"{data.engine_load:.0f}" if data.engine_load is not None else "--" + lines.append(f"Thr:{thr_str}% Load:{load_str}%") + + # Fuel + if data.fuel_level is not None: + lines.append(f"Fuel: {data.fuel_level:.0f}%") + + return lines + + def get_provider(self) -> OBD2Provider: + """Get the OBD2 provider instance.""" + return self._provider + + +class OBD2CommPage(InfoPage): + """ + Page displaying OBD2 communication statistics. + + Shows: + - Request/Response counts + - Success rate + - Average latency + """ + + def __init__(self): + super().__init__( + name="obd2_comm", + title="OBD2 Comm", + icon="signal" + ) + self._provider = OBD2Provider() + + def get_lines(self) -> list[str]: + """Get communication stats for display.""" + self._provider.refresh() + data = self._provider.get_data() + + lines = [ + f"Requests: {data.total_requests}", + f"Responses: {data.successful_responses}", + f"Success: {data.success_rate:.1f}%", + f"Latency: {data.avg_latency_ms:.1f}ms", + ] + + return lines + + def get_provider(self) -> OBD2Provider: + """Get the OBD2 provider instance.""" + return self._provider diff --git a/can_sniffer/src/flipper/providers/__init__.py b/can_sniffer/src/flipper/providers/__init__.py index 360b653..6f9b88d 100644 --- a/can_sniffer/src/flipper/providers/__init__.py +++ b/can_sniffer/src/flipper/providers/__init__.py @@ -1,20 +1,20 @@ """ Data Providers for Flipper Zero Pages. -Providers abstract data sources (UPS, system metrics, CAN stats) +Providers abstract data sources (UPS, system metrics, OBD2 stats) from the pages that display them. """ from flipper.providers.base import BaseProvider from flipper.providers.ups_provider import UPSProvider from flipper.providers.system_provider import SystemProvider -from flipper.providers.can_provider import CANProvider +from flipper.providers.obd2_provider import OBD2Provider from flipper.providers.app_status_provider import AppStatusProvider __all__ = [ "BaseProvider", "UPSProvider", "SystemProvider", - "CANProvider", + "OBD2Provider", "AppStatusProvider", ] diff --git a/can_sniffer/src/flipper/providers/can_provider.py b/can_sniffer/src/flipper/providers/can_provider.py deleted file mode 100644 index bf3be98..0000000 --- a/can_sniffer/src/flipper/providers/can_provider.py +++ /dev/null @@ -1,135 +0,0 @@ -""" -CAN Statistics Provider. - -Provides CAN sniffer statistics from the message processor. -""" - -from typing import Dict, Any, Optional, Callable -from dataclasses import dataclass, field - -from flipper.providers.base import BaseProvider - - -@dataclass -class CANData: - """CAN statistics data.""" - - total_frames: int = 0 - pending_frames: int = 0 - processed_frames: int = 0 - dropped_frames: int = 0 - queue_size: int = 0 - queue_capacity: int = 100000 - interfaces: Dict[str, int] = field(default_factory=dict) - - -class CANProvider(BaseProvider): - """ - Provider for CAN sniffer statistics. - - This provider is updated by the FlipperHandler when - CAN frames are processed. - """ - - def __init__(self): - super().__init__(name="can", cache_ttl=0.5) - self._data = CANData() - self._stats_callback: Optional[Callable[[], Dict[str, Any]]] = None - - def set_stats_callback(self, callback: Callable[[], Dict[str, Any]]) -> None: - """ - Set callback to retrieve stats from message processor. - - Args: - callback: Function that returns stats dictionary - """ - self._stats_callback = callback - - def update_stats( - self, - total: int = 0, - pending: int = 0, - processed: int = 0, - dropped: int = 0, - queue_size: int = 0 - ) -> None: - """ - Update CAN statistics directly. - - Args: - total: Total frames received - pending: Pending frames in queue - processed: Processed frames - dropped: Dropped frames - queue_size: Current queue size - """ - self._data.total_frames = total - self._data.pending_frames = pending - self._data.processed_frames = processed - self._data.dropped_frames = dropped - self._data.queue_size = queue_size - self._set_cached("data", self._data) - - def update_interface_stats(self, interface: str, count: int) -> None: - """ - Update per-interface statistics. - - Args: - interface: Interface name (e.g., "can0") - count: Message count for this interface - """ - self._data.interfaces[interface] = count - - def refresh(self) -> bool: - """Refresh stats from callback if available.""" - if self._stats_callback: - try: - stats = self._stats_callback() - self._data.total_frames = stats.get("total_frames", 0) - self._data.pending_frames = stats.get("pending_frames", 0) - self._data.processed_frames = stats.get("processed_frames", 0) - self._data.dropped_frames = stats.get("dropped_count", 0) - self._data.queue_size = stats.get("queue_size", 0) - self._set_cached("data", self._data) - return True - except Exception as e: - self._last_error = str(e) - return False - - return True - - def get_data(self) -> CANData: - """Get current CAN statistics.""" - cached = self._get_cached("data") - if cached is not None: - return cached - - self.refresh() - return self._data - - def get_total_frames(self) -> int: - """Get total frames received.""" - return self.get_data().total_frames - - def get_pending_frames(self) -> int: - """Get pending frames in queue.""" - return self.get_data().pending_frames - - def get_processed_frames(self) -> int: - """Get processed frames.""" - return self.get_data().processed_frames - - def get_dropped_frames(self) -> int: - """Get dropped frames.""" - return self.get_data().dropped_frames - - def get_queue_fill_percent(self) -> float: - """Get queue fill percentage.""" - data = self.get_data() - if data.queue_capacity == 0: - return 0.0 - return (data.queue_size / data.queue_capacity) * 100 - - def get_interface_count(self, interface: str) -> int: - """Get message count for specific interface.""" - return self.get_data().interfaces.get(interface, 0) diff --git a/can_sniffer/src/flipper/providers/obd2_provider.py b/can_sniffer/src/flipper/providers/obd2_provider.py new file mode 100644 index 0000000..c5c1b89 --- /dev/null +++ b/can_sniffer/src/flipper/providers/obd2_provider.py @@ -0,0 +1,137 @@ +""" +OBD2 Data Provider. + +Provides OBD2 vehicle data from VehicleStateManager. +""" + +from typing import Dict, Any, Optional, Callable +from dataclasses import dataclass, field + +from flipper.providers.base import BaseProvider + + +@dataclass +class OBD2Data: + """OBD2 vehicle data for display.""" + + # Connection + ecu_connected: bool = False + last_update: float = 0.0 + + # Engine + rpm: Optional[float] = None + engine_load: Optional[float] = None + coolant_temp: Optional[float] = None + oil_temp: Optional[float] = None + + # Movement + speed: Optional[float] = None + throttle: Optional[float] = None + + # Fuel + fuel_level: Optional[float] = None + fuel_rate: Optional[float] = None + + # Stats + total_requests: int = 0 + successful_responses: int = 0 + success_rate: float = 0.0 + avg_latency_ms: float = 0.0 + + +class OBD2Provider(BaseProvider): + """ + Provider for OBD2 vehicle data. + + This provider reads data from VehicleStateManager + and poller statistics. + """ + + def __init__(self): + super().__init__(name="obd2", cache_ttl=0.25) + self._data = OBD2Data() + self._state_callback: Optional[Callable[[], Any]] = None + self._stats_callback: Optional[Callable[[], Dict[str, Any]]] = None + + def set_state_callback(self, callback: Callable[[], Any]) -> None: + """ + Set callback to retrieve VehicleState. + + Args: + callback: Function that returns VehicleState + """ + self._state_callback = callback + + def set_stats_callback(self, callback: Callable[[], Dict[str, Any]]) -> None: + """ + Set callback to retrieve poller statistics. + + Args: + callback: Function that returns stats dictionary + """ + self._stats_callback = callback + + def refresh(self) -> bool: + """Refresh data from VehicleState.""" + try: + # Get vehicle state + if self._state_callback: + state = self._state_callback() + if state: + self._data.ecu_connected = state.ecu_connected + self._data.last_update = state.last_response_time + self._data.rpm = state.rpm + self._data.engine_load = state.engine_load + self._data.coolant_temp = state.coolant_temp + self._data.oil_temp = state.oil_temp + self._data.speed = state.speed + self._data.throttle = state.throttle_pos + self._data.fuel_level = state.fuel_level + self._data.fuel_rate = state.fuel_rate + + # Get poller stats + if self._stats_callback: + stats = self._stats_callback() + poller = stats.get("poller", {}) + self._data.total_requests = poller.get("total_requests", 0) + self._data.successful_responses = poller.get("successful_responses", 0) + self._data.success_rate = poller.get("success_rate", 0.0) + self._data.avg_latency_ms = poller.get("avg_latency_ms", 0.0) + + self._set_cached("data", self._data) + return True + + except Exception as e: + self._last_error = str(e) + return False + + def get_data(self) -> OBD2Data: + """Get current OBD2 data.""" + cached = self._get_cached("data") + if cached is not None: + return cached + + self.refresh() + return self._data + + # Convenience methods + def get_rpm(self) -> Optional[float]: + return self.get_data().rpm + + def get_speed(self) -> Optional[float]: + return self.get_data().speed + + def get_coolant_temp(self) -> Optional[float]: + return self.get_data().coolant_temp + + def get_fuel_level(self) -> Optional[float]: + return self.get_data().fuel_level + + def get_throttle(self) -> Optional[float]: + return self.get_data().throttle + + def is_connected(self) -> bool: + return self.get_data().ecu_connected + + def get_success_rate(self) -> float: + return self.get_data().success_rate diff --git a/can_sniffer/src/handlers/__init__.py b/can_sniffer/src/handlers/__init__.py index 033a9d9..a6d6b67 100644 --- a/can_sniffer/src/handlers/__init__.py +++ b/can_sniffer/src/handlers/__init__.py @@ -1,18 +1,17 @@ """ -Модуль обработчиков CAN сообщений. +OBD2 Data Handlers Module. -Предоставляет плагинную архитектуру для обработки CAN фреймов. +Provides plugin architecture for processing OBD2 readings. """ from .base import BaseHandler from .storage_handler import StorageHandler -from .postgresql_handler import PostgreSQLHandler +from .realtime_handler import RealtimeHandler from .flipper_handler import FlipperHandler __all__ = [ 'BaseHandler', 'StorageHandler', - 'PostgreSQLHandler', + 'RealtimeHandler', 'FlipperHandler', ] - diff --git a/can_sniffer/src/handlers/flipper_handler.py b/can_sniffer/src/handlers/flipper_handler.py index 80ea282..a33c2b4 100644 --- a/can_sniffer/src/handlers/flipper_handler.py +++ b/can_sniffer/src/handlers/flipper_handler.py @@ -1,480 +1,212 @@ """ -Flipper Zero Dynamic UI Handler. +Flipper Zero Handler for OBD2 Data. -Provides multi-page interface with bidirectional communication via UART. -Supports pluggable pages for CAN stats, UPS status, system info, and actions. - -Protocol: - RPi -> Flipper: - PAGE:/|||<lines>|<actions>|<selected> - ACK:<device>,ip=<ip> - RESULT:<status>|<message> - - Flipper -> RPi: - INIT:<device> - STOP:<device> - CMD:NAV:<next|prev> - CMD:SELECT:<index> - CMD:CONFIRM / CMD:CANCEL - CMD:REFRESH +Sends OBD2 vehicle data to Flipper Zero via UART. """ -import socket import threading import time -from typing import Dict, Any, List, Optional +from typing import Dict, Any, Optional, Callable -from handlers.base import BaseHandler -from can_frame import CANFrame -from config import config from logger import get_logger - -from flipper.protocol import Protocol, Command, CommandType +from config import config +from obd2.pids import OBD2Reading +from flipper.protocol import FlipperProtocol from flipper.page_manager import PageManager -from flipper.pages import CANStatsPage, UPSStatusPage, SystemInfoPage, ActionsPage, AppStatusPage +from flipper.pages import OBD2StatsPage, OBD2CommPage, UPSStatusPage, SystemInfoPage, AppStatusPage +from flipper.providers.obd2_provider import OBD2Provider +from .base import BaseHandler logger = get_logger(__name__) -def get_ip_address() -> str: - """ - Get the primary IP address of this device. - - Returns: - IP address string or "0.0.0.0" if unable to determine - """ - try: - s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - s.settimeout(0.1) - s.connect(("8.8.8.8", 80)) - ip = s.getsockname()[0] - s.close() - return ip - except Exception: - pass - - try: - hostname = socket.gethostname() - ip = socket.gethostbyname(hostname) - if ip and not ip.startswith("127."): - return ip - except Exception: - pass - - return "0.0.0.0" - - class FlipperHandler(BaseHandler): """ - Handler that communicates with Flipper Zero via UART. + Handler that sends OBD2 data to Flipper Zero. - Provides dynamic multi-page interface: - - CAN Statistics - - UPS Status (if available) - - System Information - - Actions Menu - - Implements handshake protocol for connection management - and bidirectional command processing. + Uses UART connection to display vehicle data + on Flipper Zero screen. """ - def __init__(self, enabled: Optional[bool] = None): + def __init__( + self, + enabled: bool = True, + device: str = "/dev/ttyAMA0", + baudrate: int = 115200, + update_interval: float = 0.5, + ): """ Initialize Flipper handler. Args: - enabled: Whether handler is enabled. If None, reads from config. + enabled: Whether handler is active + device: UART device path + baudrate: UART baudrate + update_interval: Screen update interval in seconds """ - if enabled is None: - enabled = getattr(config, "flipper", None) is not None - if enabled: - enabled = getattr(config.flipper, "enabled", False) + super().__init__(name="flipper", enabled=enabled) + self._device = device + self._baudrate = baudrate + self._update_interval = update_interval - super().__init__(name="flipper_handler", enabled=enabled) + self._protocol: Optional[FlipperProtocol] = None + self._page_manager: Optional[PageManager] = None + self._obd2_provider: Optional[OBD2Provider] = None - # Serial configuration - self.serial_port: Optional[Any] = None - self.device = "/dev/ttyAMA0" - self.baudrate = 115200 - self.send_interval = 1.0 - - if hasattr(config, "flipper"): - flipper_cfg = config.flipper - self.device = getattr(flipper_cfg, "device", self.device) - self.baudrate = getattr(flipper_cfg, "baudrate", self.baudrate) - self.send_interval = getattr(flipper_cfg, "send_interval", self.send_interval) - - # Connection state - self._connected = False + self._update_thread: Optional[threading.Thread] = None self._running = False - # Statistics - self._stats_lock = threading.Lock() - self._total_frames = 0 - self._pending_frames = 0 - self._processed_frames = 0 - self._sent_count = 0 - self._error_count = 0 + # Callbacks for data access + self._state_callback: Optional[Callable[[], Any]] = None + self._stats_callback: Optional[Callable[[], Dict[str, Any]]] = None - # Threads - self._rx_thread: Optional[threading.Thread] = None - self._tx_thread: Optional[threading.Thread] = None + def set_state_callback(self, callback: Callable[[], Any]) -> None: + """Set callback to get VehicleState.""" + self._state_callback = callback + if self._obd2_provider: + self._obd2_provider.set_state_callback(callback) - # IP address - self._ip_address = "0.0.0.0" - - # Page manager - self._page_manager = PageManager() - self._setup_pages() - - def _setup_pages(self) -> None: - """Setup default pages.""" - # CAN Statistics (always available) - can_page = CANStatsPage() - self._page_manager.register_page(can_page) - - # Keep reference to CAN provider for stats updates - self._can_provider = can_page.get_provider() - - # Application Status (SQLite, PostgreSQL, Queue, etc.) - app_status_page = AppStatusPage() - self._page_manager.register_page(app_status_page) - - # Keep reference to app status provider for updates - self._app_status_provider = app_status_page.get_provider() - - # UPS Status (if available) - ups_page = UPSStatusPage() - self._page_manager.register_page(ups_page) - - # System Information - system_page = SystemInfoPage() - self._page_manager.register_page(system_page) - - # Actions Menu - actions_page = ActionsPage(on_result=self._on_action_result) - self._page_manager.register_page(actions_page) - - def _on_action_result(self, result: str) -> None: - """Handle action result from actions page.""" - self.logger.info(f"Action result: {result}") - # Send result to Flipper - if self._connected: - msg = Protocol.encode_result(True, result) - self._send_raw(msg) + def set_stats_callback(self, callback: Callable[[], Dict[str, Any]]) -> None: + """Set callback to get client stats.""" + self._stats_callback = callback + if self._obd2_provider: + self._obd2_provider.set_stats_callback(callback) def initialize(self) -> bool: - """ - Initialize UART connection. + """Initialize the handler.""" + if not self._enabled: + logger.info("Flipper handler disabled") + return False - Returns: - True if initialization successful - """ try: - import serial - - self.serial_port = serial.Serial( - port=self.device, - baudrate=self.baudrate, - bytesize=serial.EIGHTBITS, - parity=serial.PARITY_NONE, - stopbits=serial.STOPBITS_ONE, - timeout=0.1, + # Initialize UART protocol + self._protocol = FlipperProtocol( + device=self._device, + baudrate=self._baudrate, ) - self._ip_address = get_ip_address() + if not self._protocol.connect(): + logger.warning(f"Failed to connect to Flipper on {self._device}") + self._available = False + return False + + # Initialize page manager with OBD2 pages + self._page_manager = PageManager(self._protocol) + + # Create and register pages + obd2_page = OBD2StatsPage() + comm_page = OBD2CommPage() + ups_page = UPSStatusPage() + sys_page = SystemInfoPage() + app_page = AppStatusPage() + + self._page_manager.register_page(obd2_page) + self._page_manager.register_page(comm_page) + self._page_manager.register_page(ups_page) + self._page_manager.register_page(sys_page) + self._page_manager.register_page(app_page) + + # Get OBD2 provider from page + self._obd2_provider = obd2_page.get_provider() + + # Set callbacks if already provided + if self._state_callback: + self._obd2_provider.set_state_callback(self._state_callback) + if self._stats_callback: + self._obd2_provider.set_stats_callback(self._stats_callback) + + # Start update thread + self._running = True + self._update_thread = threading.Thread( + target=self._update_loop, + name="Flipper-Update", + daemon=True, + ) + self._update_thread.start() + self._initialized = True - - self.logger.info( - f"Flipper handler initialized on {self.device} @ {self.baudrate} baud, " - f"IP: {self._ip_address}" + logger.info( + "Flipper handler initialized", + extra={ + "device": self._device, + "update_interval": self._update_interval, + } ) return True - except ImportError: - self.logger.error("pyserial not installed. Run: pip install pyserial") - return False except Exception as e: - self.logger.error(f"Failed to initialize Flipper UART: {e}") + logger.error(f"Failed to initialize Flipper handler: {e}") return False - def start(self) -> None: - """Start the RX listener and TX sender threads.""" - if self._running: - return - - self._running = True - self._connected = False - - # Start RX thread (listens for commands) - self._rx_thread = threading.Thread( - target=self._rx_loop, name="FlipperRX", daemon=True - ) - self._rx_thread.start() - - # Start TX thread (sends page content when connected) - self._tx_thread = threading.Thread( - target=self._tx_loop, name="FlipperTX", daemon=True - ) - self._tx_thread.start() - - self.logger.info( - f"Flipper handler started with {self._page_manager.get_page_count()} pages" - ) - - def _rx_loop(self) -> None: - """Receive loop - listens for commands from Flipper.""" - buffer = "" - - while self._running: - try: - if not self.serial_port or not self.serial_port.is_open: - time.sleep(0.1) - continue - - # Read available data - if self.serial_port.in_waiting > 0: - data = self.serial_port.read(self.serial_port.in_waiting) - buffer += data.decode("utf-8", errors="ignore") - - # Process complete lines - while "\n" in buffer: - line, buffer = buffer.split("\n", 1) - line = line.strip() - if line: - self._process_command(line) - else: - time.sleep(0.05) - - except Exception as e: - self.logger.debug(f"RX error: {e}") - time.sleep(0.1) - - def _process_command(self, raw_command: str) -> None: + def handle(self, reading: OBD2Reading) -> bool: """ - Process received command from Flipper. + Handle incoming OBD2 reading. + + Data is displayed via the update loop, not directly from readings. + This method just ensures the handler is active. Args: - raw_command: Raw command string - """ - self.logger.debug(f"RX: {raw_command}") - - # Handle handshake commands directly - if raw_command.startswith("INIT:"): - client_id = raw_command[5:].strip() - self.logger.info(f"Handshake request from: {client_id}") - - # Update IP and send ACK - self._ip_address = get_ip_address() - ack_msg = Protocol.encode_ack("rpi5", self._ip_address) - self._send_raw(ack_msg) - - self._connected = True - self.logger.info(f"Connected to Flipper, IP: {self._ip_address}") - - # Send initial page content - self._send_page_content() - return - - if raw_command.startswith("STOP:"): - client_id = raw_command[5:].strip() - self.logger.info(f"Disconnect request from: {client_id}") - self._connected = False - return - - # Parse and process other commands - command = Protocol.decode_command(raw_command) - if command is None: - self.logger.debug(f"Unknown command: {raw_command}") - return - - # Process command via page manager - result = self._page_manager.process_command(command) - - # Send result if available - if result: - msg = Protocol.encode_result(True, result) - self._send_raw(msg) - - # Always send updated page content after command - self._send_page_content() - - def _tx_loop(self) -> None: - """Transmit loop - sends page content periodically when connected.""" - while self._running: - try: - if self._connected: - self._send_page_content() - - time.sleep(self.send_interval) - - except Exception as e: - self.logger.debug(f"TX error: {e}") - with self._stats_lock: - self._error_count += 1 - - def _send_raw(self, message: str) -> bool: - """ - Send raw message via UART. - - Args: - message: Message to send + reading: OBD2Reading (not directly used) Returns: - True if sent successfully + True if handler is active """ - if not self.serial_port or not self.serial_port.is_open: - return False + return self._initialized and self._enabled - try: - self.serial_port.write(message.encode("utf-8")) - self.serial_port.flush() - self.logger.debug(f"TX: {message.strip()}") - return True - except Exception as e: - self.logger.debug(f"Send error: {e}") - return False - - def _send_page_content(self) -> None: - """Send current page content to Flipper Zero.""" - if not self._connected: - return - - content = self._page_manager.get_current_content() - if content: - if self._send_raw(content): - with self._stats_lock: - self._sent_count += 1 - - def handle(self, frame: CANFrame) -> bool: - """ - Handle a single CAN frame. - - Args: - frame: CANFrame to handle - - Returns: - True (always succeeds) - """ - with self._stats_lock: - self._total_frames += 1 - self._pending_frames += 1 - - # Update CAN provider - self._can_provider.update_stats( - total=self._total_frames, - pending=self._pending_frames, - processed=self._processed_frames - ) - - return True - - def handle_batch(self, frames: List[CANFrame]) -> int: - """ - Handle a batch of CAN frames. - - Args: - frames: List of CANFrame objects - - Returns: - Number of frames processed - """ - count = len(frames) - - with self._stats_lock: - self._total_frames += count - self._processed_frames += count - self._pending_frames = max(0, self._pending_frames - count) - - # Update CAN provider - self._can_provider.update_stats( - total=self._total_frames, - pending=self._pending_frames, - processed=self._processed_frames - ) - - return count - - def update_pending(self, pending_count: int) -> None: - """ - Update pending frame count. - - Args: - pending_count: Current number of pending frames - """ - with self._stats_lock: - self._pending_frames = pending_count - - self._can_provider.update_stats( - total=self._total_frames, - pending=self._pending_frames, - processed=self._processed_frames - ) + def handle_batch(self, readings: list) -> int: + """Handle batch of readings.""" + if not self._initialized or not self._enabled: + return 0 + return len(readings) def flush(self) -> None: - """Flush - send immediate page content if connected.""" - if self._connected: - try: - self._send_page_content() - except Exception as e: - self.logger.debug(f"Flush error: {e}") + """Flush is handled by update loop.""" + pass def shutdown(self) -> None: """Shutdown the handler.""" - self.logger.info("Shutting down Flipper handler...") - self._running = False - self._connected = False - # Wait for threads - if self._rx_thread and self._rx_thread.is_alive(): - self._rx_thread.join(timeout=2.0) + if self._update_thread and self._update_thread.is_alive(): + self._update_thread.join(timeout=2.0) - if self._tx_thread and self._tx_thread.is_alive(): - self._tx_thread.join(timeout=2.0) - - # Close serial port - if self.serial_port and self.serial_port.is_open: - try: - self.serial_port.close() - except Exception as e: - self.logger.debug(f"Error closing serial port: {e}") - - # Shutdown page manager - self._page_manager.shutdown() + if self._protocol: + self._protocol.disconnect() + logger.info("Flipper handler shutdown") self._initialized = False - self.logger.info("Flipper handler stopped") def get_stats(self) -> Dict[str, Any]: - """ - Get handler statistics. + """Get handler statistics.""" + stats = { + "name": self.name, + "enabled": self._enabled, + "initialized": self._initialized, + "device": self._device, + "connected": self._protocol.is_connected() if self._protocol else False, + } - Returns: - Dictionary with handler stats - """ - with self._stats_lock: - stats = { - "total_frames": self._total_frames, - "pending_frames": self._pending_frames, - "processed_frames": self._processed_frames, - "sent_count": self._sent_count, - "error_count": self._error_count, - "device": self.device, - "baudrate": self.baudrate, - "connected": self._connected, - "ip_address": self._ip_address, - } - - # Add page manager stats - stats.update(self._page_manager.get_stats()) + if self._page_manager: + stats["current_page"] = self._page_manager.get_current_page_name() return stats - def is_connected(self) -> bool: - """Check if Flipper is connected.""" - return self._connected + def _update_loop(self) -> None: + """Update loop for sending data to Flipper.""" + logger.debug("Flipper update loop started") - def get_page_manager(self) -> PageManager: - """Get page manager for external page registration.""" - return self._page_manager + while self._running: + try: + if self._page_manager and self._protocol and self._protocol.is_connected(): + # Update current page + self._page_manager.update() + + # Handle input from Flipper + self._page_manager.handle_input() + + except Exception as e: + logger.error(f"Flipper update error: {e}") + + time.sleep(self._update_interval) + + logger.debug("Flipper update loop stopped") diff --git a/can_sniffer/src/handlers/postgresql_handler.py b/can_sniffer/src/handlers/postgresql_handler.py deleted file mode 100644 index 9d90224..0000000 --- a/can_sniffer/src/handlers/postgresql_handler.py +++ /dev/null @@ -1,151 +0,0 @@ -""" -Обработчик для отправки CAN сообщений в PostgreSQL. -""" - -from typing import List, Dict, Any, Optional -from can_frame import CANFrame -from .base import BaseHandler -from postgresql_handler import get_postgresql_client -from postgresql_handler.postgresql_client import ConnectionStatus -from config import config - - -class PostgreSQLHandler(BaseHandler): - """Обработчик для отправки в PostgreSQL.""" - - def __init__(self, enabled: Optional[bool] = None): - """ - Инициализация обработчика PostgreSQL. - - Args: - enabled: Включен ли обработчик. Если None, берется из config.postgresql.enabled - """ - super().__init__( - name="postgresql", - enabled=enabled if enabled is not None else config.postgresql.enabled - ) - self.postgresql_client = None - - def initialize(self) -> bool: - """Инициализация PostgreSQL клиента.""" - if not self.enabled: - return False - - try: - self.postgresql_client = get_postgresql_client() - self._initialized = True - self.logger.info("PostgreSQL handler initialized") - return True - except Exception as e: - self.logger.error(f"Failed to initialize PostgreSQL: {e}", exc_info=True) - self.postgresql_client = None - return False - - def handle(self, frame: CANFrame) -> bool: - """Обработка одного CAN фрейма.""" - if not self.enabled or not self._initialized or not self.postgresql_client: - return False - - try: - return self.postgresql_client.write_message( - interface=frame.bus, - can_id=frame.can_id, - dlc=frame.dlc, - data=frame.data, - timestamp=frame.timestamp - ) - except Exception as e: - self.logger.error( - f"Failed to send frame to PostgreSQL: {e}", - exc_info=True, - extra={"can_id": frame.can_id_hex} - ) - return False - - def handle_batch(self, frames: List[CANFrame]) -> int: - """ - Обработка батча CAN фреймов. - - Неблокирующий метод - при ошибках или переполнении очереди PostgreSQL - просто пропускает батч, не останавливая обработку других handlers. - """ - if not self.enabled or not self._initialized or not self.postgresql_client or not frames: - return 0 - - try: - # Проверяем состояние соединения перед обработкой - if hasattr(self.postgresql_client, 'connection_status'): - if self.postgresql_client.connection_status != ConnectionStatus.CONNECTED: - # Соединение недоступно - пропускаем батч без ошибки - return 0 - - # Конвертируем CANFrame в формат для PostgreSQL - postgresql_messages = [] - for frame in frames: - postgresql_messages.append({ - "interface": frame.bus, - "can_id": frame.can_id, - "can_id_hex": frame.can_id_hex, - "dlc": frame.dlc, - "data": frame.data, - "data_hex": frame.data_hex, - "timestamp": frame.timestamp, # float timestamp в секундах - "is_extended": frame.is_extended - }) - - if postgresql_messages: - # Пытаемся добавить в очередь PostgreSQL (неблокирующий режим) - # Если очередь переполнена, пропускаем батч - return self.postgresql_client.write_messages_batch(postgresql_messages) - return 0 - except Exception as e: - # Ошибка не должна останавливать обработку других handlers - # Логируем, но не пробрасываем исключение - self.logger.error( - f"Failed to send frames batch to PostgreSQL: {e}", - exc_info=True, - extra={"batch_size": len(frames)} - ) - return 0 - - def flush(self) -> None: - """Принудительная отправка накопленных данных.""" - # PostgreSQL forwarder сам управляет flush через свой цикл - # Но можно вызвать явный flush если нужно - pass - - def shutdown(self) -> None: - """Корректное завершение работы обработчика.""" - if self.postgresql_client: - try: - self.postgresql_client.close() - self.logger.info("PostgreSQL handler closed") - except Exception as e: - self.logger.error(f"Error closing PostgreSQL: {e}", exc_info=True) - self._initialized = False - - def get_stats(self) -> Dict[str, Any]: - """Получение статистики обработчика.""" - if self.postgresql_client: - try: - stats = self.postgresql_client.get_stats() - stats["handler"] = self.name - stats["enabled"] = self.enabled - stats["initialized"] = self._initialized - return stats - except Exception: - pass - return { - "handler": self.name, - "enabled": self.enabled, - "initialized": self._initialized - } - - def start(self) -> None: - """Запуск PostgreSQL forwarder (если используется).""" - if self.postgresql_client: - try: - self.postgresql_client.start() - except Exception as e: - self.logger.error(f"Failed to start PostgreSQL forwarder: {e}", exc_info=True) - diff --git a/can_sniffer/src/handlers/realtime_handler.py b/can_sniffer/src/handlers/realtime_handler.py new file mode 100644 index 0000000..f896a83 --- /dev/null +++ b/can_sniffer/src/handlers/realtime_handler.py @@ -0,0 +1,114 @@ +""" +Realtime Handler for OBD2 Data. + +Updates VehicleState in memory from OBD2 readings. +""" + +from typing import Dict, Any, Optional + +from logger import get_logger +from vehicle.state_manager import VehicleStateManager +from obd2.pids import OBD2Reading +from .base import BaseHandler + +logger = get_logger(__name__) + + +class RealtimeHandler(BaseHandler): + """ + Handler that updates VehicleState from OBD2 readings. + + Maintains the in-memory vehicle state for real-time access. + """ + + def __init__(self, enabled: bool = True): + super().__init__(name="realtime", enabled=enabled) + self._state_manager: Optional[VehicleStateManager] = None + self._updates_count = 0 + + def initialize(self) -> bool: + """Initialize the handler.""" + try: + self._state_manager = VehicleStateManager() + self._initialized = True + logger.info("Realtime handler initialized") + return True + except Exception as e: + logger.error(f"Failed to initialize realtime handler: {e}") + return False + + def handle(self, reading: OBD2Reading) -> bool: + """ + Update vehicle state from a single reading. + + Args: + reading: OBD2Reading to process + + Returns: + True if state was updated + """ + if not self._initialized or not self._enabled: + return False + + if not reading.is_valid: + return False + + try: + updated = self._state_manager.update_from_reading(reading) + if updated: + self._updates_count += 1 + return updated + except Exception as e: + logger.error(f"Error updating vehicle state: {e}") + return False + + def handle_batch(self, readings: list) -> int: + """ + Update vehicle state from multiple readings. + + Args: + readings: List of OBD2Reading objects + + Returns: + Number of successful updates + """ + if not self._initialized or not self._enabled: + return 0 + + count = 0 + for reading in readings: + if self.handle(reading): + count += 1 + + return count + + def flush(self) -> None: + """Flush is not needed for realtime handler.""" + pass + + def shutdown(self) -> None: + """Shutdown the handler.""" + logger.info( + "Realtime handler shutdown", + extra={"total_updates": self._updates_count} + ) + self._initialized = False + + def get_stats(self) -> Dict[str, Any]: + """Get handler statistics.""" + state_stats = {} + if self._state_manager: + state_stats = self._state_manager.get_stats() + + return { + "name": self.name, + "enabled": self._enabled, + "initialized": self._initialized, + "updates_count": self._updates_count, + **state_stats + } + + @property + def state_manager(self) -> Optional[VehicleStateManager]: + """Get the VehicleStateManager instance.""" + return self._state_manager diff --git a/can_sniffer/src/handlers/storage_handler.py b/can_sniffer/src/handlers/storage_handler.py index 8bd0442..5c00688 100644 --- a/can_sniffer/src/handlers/storage_handler.py +++ b/can_sniffer/src/handlers/storage_handler.py @@ -1,119 +1,187 @@ """ -Обработчик для сохранения CAN сообщений в SQLite. +Storage Handler for OBD2 Data. + +Saves OBD2 readings to SQLite database with batching. """ -from typing import List, Dict, Any -from can_frame import CANFrame +import threading +import time +from typing import Dict, Any, List, Optional + +from logger import get_logger +from storage.storage import get_storage, Storage +from obd2.pids import OBD2Reading from .base import BaseHandler -from storage import get_storage + +logger = get_logger(__name__) class StorageHandler(BaseHandler): - """Обработчик для сохранения в SQLite.""" - - def __init__(self, enabled: bool = True): - """Инициализация обработчика storage.""" + """ + Handler that saves OBD2 readings to SQLite. + + Supports batching for efficient database writes. + """ + + def __init__( + self, + enabled: bool = True, + batch_size: int = 50, + flush_interval: float = 1.0 + ): + """ + Initialize storage handler. + + Args: + enabled: Whether handler is active + batch_size: Number of readings to batch before write + flush_interval: Maximum time before flushing batch (seconds) + """ super().__init__(name="storage", enabled=enabled) - self.storage = None - + self._storage: Optional[Storage] = None + self._batch_size = batch_size + self._flush_interval = flush_interval + + self._batch: List[OBD2Reading] = [] + self._batch_lock = threading.Lock() + self._last_flush_time = time.time() + + self._saved_count = 0 + self._batch_count = 0 + def initialize(self) -> bool: - """Инициализация storage.""" - if not self.enabled: - return False - + """Initialize the handler.""" try: - self.storage = get_storage() + self._storage = get_storage() self._initialized = True - self.logger.info("Storage handler initialized") + logger.info( + "Storage handler initialized", + extra={ + "batch_size": self._batch_size, + "flush_interval": self._flush_interval + } + ) return True except Exception as e: - self.logger.error(f"Failed to initialize storage: {e}", exc_info=True) - self.storage = None + logger.error(f"Failed to initialize storage handler: {e}") return False - - def handle(self, frame: CANFrame) -> bool: - """Обработка одного CAN фрейма.""" - if not self.enabled or not self._initialized or not self.storage: - return False - - try: - message_id = self.storage.save_message( - interface=frame.bus, - can_id=frame.can_id, - dlc=frame.dlc, - data=frame.data, - timestamp=frame.timestamp - ) - return message_id is not None - except Exception as e: - self.logger.error( - f"Failed to save frame: {e}", - exc_info=True, - extra={"can_id": frame.can_id_hex} - ) - return False - - def handle_batch(self, frames: List[CANFrame]) -> int: - """Обработка батча CAN фреймов.""" - if not self.enabled or not self._initialized or not self.storage or not frames: - return 0 - - try: - # Конвертируем CANFrame в формат для storage - messages = [] - for frame in frames: - messages.append(( - frame.timestamp, # float timestamp в секундах - frame.bus, - frame.can_id, - frame.dlc, - frame.data - )) - - saved_count = self.storage.save_messages_batch(messages) - if saved_count != len(frames): - self.logger.warning( - f"Not all frames saved: {saved_count}/{len(frames)}", - extra={"batch_size": len(frames)} - ) - return saved_count - except Exception as e: - self.logger.error( - f"Failed to save frames batch: {e}", - exc_info=True, - extra={"batch_size": len(frames)} - ) - return 0 - - def flush(self) -> None: - """Принудительная отправка накопленных данных.""" - # SQLite не требует явного flush, данные сохраняются сразу - pass - - def shutdown(self) -> None: - """Корректное завершение работы обработчика. - Примечание: НЕ закрываем Storage singleton здесь, так как он может - использоваться другими компонентами (например, для синхронизации с PostgreSQL). - Storage закрывается отдельно при полном завершении приложения. + def handle(self, reading: OBD2Reading) -> bool: """ + Add reading to batch. + + Args: + reading: OBD2Reading to save + + Returns: + True if reading was accepted + """ + if not self._initialized or not self._enabled: + return False + + with self._batch_lock: + self._batch.append(reading) + + # Check if we should flush + should_flush = ( + len(self._batch) >= self._batch_size or + (time.time() - self._last_flush_time) >= self._flush_interval + ) + + if should_flush: + self.flush() + + return True + + def handle_batch(self, readings: list) -> int: + """ + Add multiple readings. + + Args: + readings: List of OBD2Reading objects + + Returns: + Number of readings accepted + """ + if not self._initialized or not self._enabled: + return 0 + + with self._batch_lock: + self._batch.extend(readings) + + should_flush = ( + len(self._batch) >= self._batch_size or + (time.time() - self._last_flush_time) >= self._flush_interval + ) + + if should_flush: + self.flush() + + return len(readings) + + def flush(self) -> None: + """Flush pending readings to database.""" + if not self._storage: + return + + with self._batch_lock: + if not self._batch: + return + + batch_to_save = self._batch + self._batch = [] + self._last_flush_time = time.time() + + try: + saved = self._storage.save_readings_batch(batch_to_save) + self._saved_count += saved + self._batch_count += 1 + + logger.debug( + f"Flushed {saved} readings to storage", + extra={"batch_number": self._batch_count} + ) + except Exception as e: + logger.error(f"Failed to flush readings: {e}") + # Put readings back in batch for retry + with self._batch_lock: + self._batch = batch_to_save + self._batch + + def shutdown(self) -> None: + """Shutdown the handler.""" + # Final flush + self.flush() + + logger.info( + "Storage handler shutdown", + extra={ + "total_saved": self._saved_count, + "total_batches": self._batch_count + } + ) self._initialized = False - self.logger.info("Storage handler shutdown complete") - + def get_stats(self) -> Dict[str, Any]: - """Получение статистики обработчика.""" - if self.storage: - try: - stats = self.storage.get_stats() - stats["handler"] = self.name - stats["enabled"] = self.enabled - stats["initialized"] = self._initialized - return stats - except Exception: - pass + """Get handler statistics.""" + storage_stats = {} + if self._storage: + storage_stats = self._storage.get_stats() + + with self._batch_lock: + pending = len(self._batch) + return { - "handler": self.name, - "enabled": self.enabled, - "initialized": self._initialized + "name": self.name, + "enabled": self._enabled, + "initialized": self._initialized, + "saved_count": self._saved_count, + "batch_count": self._batch_count, + "pending_in_batch": pending, + "batch_size": self._batch_size, + **storage_stats } + @property + def storage(self) -> Optional[Storage]: + """Get the Storage instance.""" + return self._storage diff --git a/can_sniffer/src/main.py b/can_sniffer/src/main.py index f6c006f..c57da39 100644 --- a/can_sniffer/src/main.py +++ b/can_sniffer/src/main.py @@ -1,84 +1,377 @@ +#!/usr/bin/env python3 """ -Главный модуль CAN Sniffer приложения. +OBD2 Client - Onboard Computer for Vehicle Diagnostics. -Только код запуска приложения. Вся логика обработки сообщений -автоматически применяется в модуле socket_can. +Main entry point for the OBD2 client application. +Polls vehicle ECU for diagnostic data via CAN bus. """ import signal -import subprocess import sys import time -from config import config -from logger import get_logger -from socket_can import CANSniffer +import argparse +from typing import Optional, List + +from config import get_config, Config +from logger import get_logger +from obd2.transceiver import CANTransceiver +from obd2.response_matcher import ResponseMatcher +from obd2.poller import OBD2Poller, PollingGroup +from obd2.pids import OBD2Reading +from handlers.realtime_handler import RealtimeHandler +from handlers.storage_handler import StorageHandler +from handlers.flipper_handler import FlipperHandler +from storage.storage import get_storage +from vehicle.state_manager import VehicleStateManager -# Инициализация логгера logger = get_logger(__name__) -# Глобальная переменная для graceful shutdown -sniffer: CANSniffer = None + +class OBD2Client: + """ + Main OBD2 Client Application. + + Orchestrates all components for OBD2 communication: + - CAN transceiver for TX/RX + - Response matcher for request correlation + - Poller for periodic PID requests + - Handlers for data processing + """ + + def __init__(self, config: Optional[Config] = None): + """ + Initialize OBD2 client. + + Args: + config: Configuration object (uses global if not provided) + """ + self.config = config or get_config() + + # Core components + self._transceiver: Optional[CANTransceiver] = None + self._matcher: Optional[ResponseMatcher] = None + self._poller: Optional[OBD2Poller] = None + + # Handlers + self._realtime_handler: Optional[RealtimeHandler] = None + self._storage_handler: Optional[StorageHandler] = None + self._flipper_handler: Optional[FlipperHandler] = None + self._handlers: List = [] + + # State + self._running = False + self._state_manager: Optional[VehicleStateManager] = None + + def start(self) -> bool: + """ + Start the OBD2 client. + + Returns: + True if started successfully + """ + logger.info( + "Starting OBD2 Client", + extra={ + "interface": self.config.can.interface, + "bitrate": self.config.can.bitrate, + } + ) + + try: + # Initialize storage + storage = get_storage() + session_id = storage.start_session() + logger.info(f"Started session {session_id}") + + # Initialize transceiver + self._transceiver = CANTransceiver( + interface=self.config.can.interface, + bitrate=self.config.can.bitrate, + ) + + if not self._transceiver.start(): + logger.error("Failed to start CAN transceiver") + return False + + # Initialize response matcher + self._matcher = ResponseMatcher( + timeout_ms=self.config.obd2.request_timeout_ms, + max_retries=self.config.obd2.retry_count, + ) + + # Initialize handlers + self._init_handlers() + + # Initialize poller + self._poller = OBD2Poller( + transceiver=self._transceiver, + matcher=self._matcher, + reading_callback=self._on_reading, + auto_discover=self.config.obd2.auto_discover, + ) + + # Add polling groups from config + for group_config in self.config.obd2.polling_groups: + if group_config.enabled: + self._poller.add_group(PollingGroup( + name=group_config.name, + interval_ms=group_config.interval_ms, + pids=group_config.pids, + enabled=group_config.enabled, + )) + + # Start poller + if not self._poller.start(): + logger.error("Failed to start OBD2 poller") + return False + + self._running = True + logger.info("OBD2 Client started successfully") + return True + + except Exception as e: + logger.error(f"Failed to start OBD2 client: {e}", exc_info=True) + self.stop() + return False + + def _init_handlers(self) -> None: + """Initialize data handlers.""" + # Realtime handler (updates VehicleState) + self._realtime_handler = RealtimeHandler(enabled=True) + if self._realtime_handler.initialize(): + self._handlers.append(self._realtime_handler) + self._state_manager = self._realtime_handler.state_manager + + # Storage handler (saves to SQLite) + self._storage_handler = StorageHandler( + enabled=True, + batch_size=50, + flush_interval=1.0, + ) + if self._storage_handler.initialize(): + self._handlers.append(self._storage_handler) + + # Flipper handler (display on Flipper Zero) + if self.config.flipper.enabled: + self._flipper_handler = FlipperHandler( + enabled=True, + device=self.config.flipper.device, + baudrate=self.config.flipper.baudrate, + update_interval=self.config.flipper.update_interval, + ) + if self._flipper_handler.initialize(): + # Set callbacks for data access + self._flipper_handler.set_state_callback(lambda: self.vehicle_state) + self._flipper_handler.set_stats_callback(self.get_stats) + self._handlers.append(self._flipper_handler) + + logger.info(f"Initialized {len(self._handlers)} handlers") + + def _on_reading(self, reading: OBD2Reading) -> None: + """ + Handle incoming OBD2 reading. + + Args: + reading: Decoded OBD2 reading + """ + # Forward to all handlers + for handler in self._handlers: + try: + handler.handle(reading) + except Exception as e: + logger.error(f"Handler {handler.name} error: {e}") + + def stop(self) -> None: + """Stop the OBD2 client.""" + if not self._running: + return + + logger.info("Stopping OBD2 Client") + self._running = False + + # Stop poller first + if self._poller: + self._poller.stop() + + # Shutdown handlers + for handler in self._handlers: + try: + handler.shutdown() + except Exception as e: + logger.error(f"Error shutting down handler {handler.name}: {e}") + + # Stop transceiver + if self._transceiver: + self._transceiver.stop() + + # End session + try: + storage = get_storage() + storage.end_session() + storage.close() + except Exception as e: + logger.error(f"Error closing storage: {e}") + + logger.info("OBD2 Client stopped") + + def get_stats(self) -> dict: + """Get client statistics.""" + stats = { + "running": self._running, + } + + if self._transceiver: + stats["transceiver"] = self._transceiver.get_stats() + + if self._poller: + stats["poller"] = self._poller.get_stats() + + if self._matcher: + stats["matcher"] = self._matcher.get_stats() + + for handler in self._handlers: + stats[f"handler_{handler.name}"] = handler.get_stats() + + return stats + + @property + def vehicle_state(self): + """Get current vehicle state.""" + if self._state_manager: + return self._state_manager.state + return None + + @property + def is_running(self) -> bool: + """Check if client is running.""" + return self._running + + +# Global client instance for signal handler +_client: Optional[OBD2Client] = None def signal_handler(sig, frame): - """Обработчик сигналов для graceful shutdown.""" - logger.info("Received shutdown signal, stopping gracefully...") - if sniffer: - try: - sniffer.stop() - except Exception as e: - logger.error(f"Error during shutdown: {e}", exc_info=True) - # Даем время на завершение потоков перед выходом - import time - time.sleep(0.5) + """Handle shutdown signals.""" + logger.info(f"Received signal {sig}, shutting down...") + if _client: + _client.stop() sys.exit(0) +def print_vehicle_state(client: OBD2Client) -> None: + """Print current vehicle state to console.""" + state = client.vehicle_state + if state is None: + print("No vehicle state available") + return + + print("\n" + "=" * 50) + print("VEHICLE STATE") + print("=" * 50) + + if state.ecu_connected: + print(f"ECU: Connected") + else: + print(f"ECU: Disconnected") + + if state.rpm is not None: + print(f"RPM: {state.rpm:.0f}") + if state.speed is not None: + print(f"Speed: {state.speed:.0f} km/h") + if state.coolant_temp is not None: + print(f"Coolant: {state.coolant_temp:.0f} °C") + if state.throttle_pos is not None: + print(f"Throttle: {state.throttle_pos:.1f} %") + if state.engine_load is not None: + print(f"Load: {state.engine_load:.1f} %") + if state.fuel_level is not None: + print(f"Fuel: {state.fuel_level:.1f} %") + + print("=" * 50) + + def main(): - """Главная функция приложения - только запуск.""" - global sniffer - - # Регистрируем обработчики сигналов для graceful shutdown + """Main entry point.""" + global _client + + parser = argparse.ArgumentParser( + description="OBD2 Client - Vehicle Diagnostics via CAN" + ) + parser.add_argument( + "-i", "--interface", + default=None, + help="CAN interface (e.g., can0, vcan0)" + ) + parser.add_argument( + "-v", "--verbose", + action="store_true", + help="Enable verbose logging" + ) + parser.add_argument( + "--stats-interval", + type=int, + default=10, + help="Statistics print interval in seconds (0 to disable)" + ) + + args = parser.parse_args() + + # Override config if interface specified + config = get_config() + if args.interface: + config.can.interface = args.interface + + # Setup signal handlers signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) - - logger.info("CAN Sniffer application starting", extra={ - "interfaces": config.can.interfaces, - "bitrate": config.can.bitrate, - "listen_only": config.can.listen_only - }) - - logger.info("Configuration loaded", extra={ - "postgresql_enabled": config.postgresql.enabled, - "postgresql_host": config.postgresql.host if config.postgresql.enabled else None, - "storage_path": config.storage.database_path - }) - + + # Create and start client + _client = OBD2Client(config) + + if not _client.start(): + logger.error("Failed to start OBD2 client") + sys.exit(1) + + logger.info( + "OBD2 Client running", + extra={ + "interface": config.can.interface, + "polling_groups": len(config.obd2.polling_groups), + } + ) + + # Main loop try: - # Создаем и запускаем CAN Sniffer - # MessageProcessor автоматически инициализируется и используется внутри CANSniffer - sniffer = CANSniffer() - sniffer.start() - - logger.info("Application initialized successfully. Reading CAN messages...") - logger.info("Press Ctrl+C to stop") - - # Основной цикл - периодически выводим статистику - while True: - time.sleep(10) # Выводим статистику каждые 10 секунд - - stats = sniffer.get_stats() - logger.info("Statistics", extra=stats) - + last_stats_time = time.time() + + while _client.is_running: + time.sleep(1) + + # Print stats periodically + if args.stats_interval > 0: + if time.time() - last_stats_time >= args.stats_interval: + last_stats_time = time.time() + print_vehicle_state(_client) + + stats = _client.get_stats() + poller_stats = stats.get("poller", {}) + logger.info( + "Statistics", + extra={ + "requests": poller_stats.get("total_requests", 0), + "responses": poller_stats.get("successful_responses", 0), + "success_rate": poller_stats.get("success_rate", 0), + "avg_latency_ms": poller_stats.get("avg_latency_ms", 0), + } + ) + except KeyboardInterrupt: - logger.info("Keyboard interrupt received") - except Exception as e: - logger.error(f"Unexpected error: {e}", exc_info=True) + logger.info("Interrupted by user") finally: - if sniffer: - sniffer.stop() - logger.info("Application stopped") + if _client: + _client.stop() -if __name__ == '__main__': - main() \ No newline at end of file +if __name__ == "__main__": + main() diff --git a/can_sniffer/src/obd2/__init__.py b/can_sniffer/src/obd2/__init__.py new file mode 100644 index 0000000..64dc4de --- /dev/null +++ b/can_sniffer/src/obd2/__init__.py @@ -0,0 +1,29 @@ +""" +OBD2 Protocol Implementation Module. + +This module provides ISO 15765-4 (CAN) OBD2 protocol support including: +- Request/Response encoding and decoding +- PID registry with decoding formulas +- Polling engine with configurable groups +- Request/Response correlation +""" + +from .protocol import OBD2Request, OBD2Response, OBD2Mode +from .pids import PIDDefinition, PIDRegistry, OBD2Reading +from .transceiver import CANTransceiver +from .response_matcher import ResponseMatcher, PendingRequest +from .poller import OBD2Poller, PollingGroup + +__all__ = [ + "OBD2Request", + "OBD2Response", + "OBD2Mode", + "PIDDefinition", + "PIDRegistry", + "OBD2Reading", + "CANTransceiver", + "ResponseMatcher", + "PendingRequest", + "OBD2Poller", + "PollingGroup", +] diff --git a/can_sniffer/src/obd2/pids.py b/can_sniffer/src/obd2/pids.py new file mode 100644 index 0000000..2551aa0 --- /dev/null +++ b/can_sniffer/src/obd2/pids.py @@ -0,0 +1,619 @@ +""" +OBD2 PID Registry and Decoding. + +Contains definitions and decoding formulas for all supported PIDs. +Based on SAE J1979 / ISO 15031-5 standard. +""" + +from dataclasses import dataclass, field +from typing import Callable, Optional, Dict, Any, List +from enum import Enum +import time + + +class PIDCategory(Enum): + """Categories of OBD2 PIDs.""" + + ENGINE = "engine" + FUEL = "fuel" + TEMPERATURE = "temperature" + SPEED = "speed" + DIAGNOSTICS = "diagnostics" + VEHICLE_INFO = "vehicle_info" + OXYGEN = "oxygen" + OTHER = "other" + + +@dataclass +class PIDDefinition: + """ + Definition of an OBD2 PID with decoding formula. + + Attributes: + pid: Parameter ID (0x00-0xFF) + name: Human-readable name + short_name: Short identifier for display + unit: Unit of measurement + min_value: Minimum valid value + max_value: Maximum valid value + bytes_count: Number of data bytes expected + decoder: Function to decode raw bytes to value + category: PID category for grouping + description: Detailed description + """ + + pid: int + name: str + short_name: str + unit: str + min_value: float + max_value: float + bytes_count: int + decoder: Callable[[bytes], float] + category: PIDCategory = PIDCategory.OTHER + description: str = "" + + def decode(self, data: bytes) -> Optional[float]: + """ + Decode raw bytes to value. + + Args: + data: Raw data bytes from OBD2 response + + Returns: + Decoded value or None if insufficient data + """ + if len(data) < self.bytes_count: + return None + + try: + value = self.decoder(data) + # Clamp to valid range + return max(self.min_value, min(self.max_value, value)) + except (IndexError, ValueError, ZeroDivisionError): + return None + + @property + def pid_hex(self) -> str: + """PID as hex string.""" + return f"0x{self.pid:02X}" + + +@dataclass +class OBD2Reading: + """ + A decoded OBD2 reading with metadata. + + Attributes: + timestamp_ns: Reading timestamp in nanoseconds + pid: Parameter ID + pid_name: Human-readable PID name + raw_data: Original raw bytes + value: Decoded value + unit: Unit of measurement + is_valid: Whether decoding was successful + ecu_id: ECU that provided the response + """ + + timestamp_ns: int + pid: int + pid_name: str + raw_data: bytes + value: Optional[float] + unit: str + is_valid: bool + ecu_id: int = 0 + + @property + def timestamp(self) -> float: + """Timestamp in seconds.""" + return self.timestamp_ns / 1_000_000_000 + + @property + def pid_hex(self) -> str: + """PID as hex string.""" + return f"0x{self.pid:02X}" + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary for serialization.""" + return { + "timestamp": self.timestamp, + "timestamp_ns": self.timestamp_ns, + "pid": self.pid, + "pid_hex": self.pid_hex, + "pid_name": self.pid_name, + "raw_data": self.raw_data.hex(), + "value": self.value, + "unit": self.unit, + "is_valid": self.is_valid, + "ecu_id": self.ecu_id, + } + + +# ============================================================================ +# PID Decoding Formulas (SAE J1979) +# ============================================================================ + +def _decode_supported_pids(data: bytes) -> float: + """Decode supported PIDs bitmap (returns raw 32-bit value).""" + if len(data) >= 4: + return (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3] + return 0.0 + + +def _decode_percent(data: bytes) -> float: + """Decode percentage value: A * 100 / 255.""" + return data[0] * 100.0 / 255.0 + + +def _decode_temp(data: bytes) -> float: + """Decode temperature: A - 40 (°C).""" + return data[0] - 40.0 + + +def _decode_rpm(data: bytes) -> float: + """Decode engine RPM: (A * 256 + B) / 4.""" + return (data[0] * 256 + data[1]) / 4.0 + + +def _decode_speed(data: bytes) -> float: + """Decode vehicle speed: A (km/h).""" + return float(data[0]) + + +def _decode_timing_advance(data: bytes) -> float: + """Decode timing advance: A / 2 - 64 (degrees).""" + return data[0] / 2.0 - 64.0 + + +def _decode_maf(data: bytes) -> float: + """Decode MAF air flow rate: (A * 256 + B) / 100 (g/s).""" + return (data[0] * 256 + data[1]) / 100.0 + + +def _decode_fuel_pressure(data: bytes) -> float: + """Decode fuel pressure: A * 3 (kPa).""" + return data[0] * 3.0 + + +def _decode_intake_pressure(data: bytes) -> float: + """Decode intake manifold pressure: A (kPa).""" + return float(data[0]) + + +def _decode_fuel_trim(data: bytes) -> float: + """Decode fuel trim: (A - 128) * 100 / 128 (%).""" + return (data[0] - 128) * 100.0 / 128.0 + + +def _decode_fuel_system_status(data: bytes) -> float: + """Decode fuel system status (bitmap).""" + return float(data[0]) + + +def _decode_o2_voltage(data: bytes) -> float: + """Decode O2 sensor voltage: A / 200 (V).""" + return data[0] / 200.0 + + +def _decode_o2_trim(data: bytes) -> float: + """Decode O2 sensor trim: (B - 128) * 100 / 128 (%) if B != 0xFF.""" + if len(data) >= 2 and data[1] != 0xFF: + return (data[1] - 128) * 100.0 / 128.0 + return 0.0 + + +def _decode_runtime(data: bytes) -> float: + """Decode engine run time: A * 256 + B (seconds).""" + return data[0] * 256 + data[1] + + +def _decode_distance_mil(data: bytes) -> float: + """Decode distance traveled with MIL on: A * 256 + B (km).""" + return data[0] * 256 + data[1] + + +def _decode_fuel_rail_pressure(data: bytes) -> float: + """Decode fuel rail pressure: (A * 256 + B) * 0.079 (kPa).""" + return (data[0] * 256 + data[1]) * 0.079 + + +def _decode_fuel_rail_pressure_diesel(data: bytes) -> float: + """Decode fuel rail pressure (diesel): (A * 256 + B) * 10 (kPa).""" + return (data[0] * 256 + data[1]) * 10.0 + + +def _decode_egr_commanded(data: bytes) -> float: + """Decode commanded EGR: A * 100 / 255 (%).""" + return data[0] * 100.0 / 255.0 + + +def _decode_egr_error(data: bytes) -> float: + """Decode EGR error: (A - 128) * 100 / 128 (%).""" + return (data[0] - 128) * 100.0 / 128.0 + + +def _decode_evap_purge(data: bytes) -> float: + """Decode commanded evaporative purge: A * 100 / 255 (%).""" + return data[0] * 100.0 / 255.0 + + +def _decode_fuel_level(data: bytes) -> float: + """Decode fuel tank level: A * 100 / 255 (%).""" + return data[0] * 100.0 / 255.0 + + +def _decode_warmups(data: bytes) -> float: + """Decode warm-ups since codes cleared: A (count).""" + return float(data[0]) + + +def _decode_distance_cleared(data: bytes) -> float: + """Decode distance traveled since codes cleared: A * 256 + B (km).""" + return data[0] * 256 + data[1] + + +def _decode_barometric_pressure(data: bytes) -> float: + """Decode barometric pressure: A (kPa).""" + return float(data[0]) + + +def _decode_catalyst_temp(data: bytes) -> float: + """Decode catalyst temperature: (A * 256 + B) / 10 - 40 (°C).""" + return (data[0] * 256 + data[1]) / 10.0 - 40.0 + + +def _decode_control_module_voltage(data: bytes) -> float: + """Decode control module voltage: (A * 256 + B) / 1000 (V).""" + return (data[0] * 256 + data[1]) / 1000.0 + + +def _decode_absolute_load(data: bytes) -> float: + """Decode absolute load value: (A * 256 + B) * 100 / 255 (%).""" + return (data[0] * 256 + data[1]) * 100.0 / 255.0 + + +def _decode_equivalence_ratio(data: bytes) -> float: + """Decode commanded equivalence ratio: (A * 256 + B) / 32768.""" + return (data[0] * 256 + data[1]) / 32768.0 + + +def _decode_relative_throttle(data: bytes) -> float: + """Decode relative throttle position: A * 100 / 255 (%).""" + return data[0] * 100.0 / 255.0 + + +def _decode_absolute_throttle_b(data: bytes) -> float: + """Decode absolute throttle position B: A * 100 / 255 (%).""" + return data[0] * 100.0 / 255.0 + + +def _decode_accelerator_pedal_d(data: bytes) -> float: + """Decode accelerator pedal position D: A * 100 / 255 (%).""" + return data[0] * 100.0 / 255.0 + + +def _decode_accelerator_pedal_e(data: bytes) -> float: + """Decode accelerator pedal position E: A * 100 / 255 (%).""" + return data[0] * 100.0 / 255.0 + + +def _decode_commanded_throttle(data: bytes) -> float: + """Decode commanded throttle actuator: A * 100 / 255 (%).""" + return data[0] * 100.0 / 255.0 + + +def _decode_time_mil(data: bytes) -> float: + """Decode time run with MIL on: A * 256 + B (minutes).""" + return data[0] * 256 + data[1] + + +def _decode_time_cleared(data: bytes) -> float: + """Decode time since codes cleared: A * 256 + B (minutes).""" + return data[0] * 256 + data[1] + + +def _decode_fuel_rate(data: bytes) -> float: + """Decode engine fuel rate: (A * 256 + B) / 20 (L/h).""" + return (data[0] * 256 + data[1]) / 20.0 + + +def _decode_ethanol_percent(data: bytes) -> float: + """Decode ethanol fuel percentage: A * 100 / 255 (%).""" + return data[0] * 100.0 / 255.0 + + +def _decode_odometer(data: bytes) -> float: + """Decode odometer: A*2^24 + B*2^16 + C*2^8 + D / 10 (km).""" + if len(data) >= 4: + value = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3] + return value / 10.0 + return 0.0 + + +# ============================================================================ +# PID Registry +# ============================================================================ + +class PIDRegistry: + """ + Registry of all supported OBD2 PIDs. + + Provides lookup, decoding, and filtering capabilities for PIDs. + """ + + def __init__(self): + self._pids: Dict[int, PIDDefinition] = {} + self._register_standard_pids() + + def _register_standard_pids(self) -> None: + """Register all standard Mode 01 PIDs.""" + pids = [ + # Supported PIDs + PIDDefinition(0x00, "Supported PIDs 01-20", "PIDs_A", "", 0, 0xFFFFFFFF, 4, + _decode_supported_pids, PIDCategory.DIAGNOSTICS, "Bitmap of supported PIDs 01-20"), + PIDDefinition(0x20, "Supported PIDs 21-40", "PIDs_B", "", 0, 0xFFFFFFFF, 4, + _decode_supported_pids, PIDCategory.DIAGNOSTICS, "Bitmap of supported PIDs 21-40"), + PIDDefinition(0x40, "Supported PIDs 41-60", "PIDs_C", "", 0, 0xFFFFFFFF, 4, + _decode_supported_pids, PIDCategory.DIAGNOSTICS, "Bitmap of supported PIDs 41-60"), + PIDDefinition(0x60, "Supported PIDs 61-80", "PIDs_D", "", 0, 0xFFFFFFFF, 4, + _decode_supported_pids, PIDCategory.DIAGNOSTICS, "Bitmap of supported PIDs 61-80"), + PIDDefinition(0x80, "Supported PIDs 81-A0", "PIDs_E", "", 0, 0xFFFFFFFF, 4, + _decode_supported_pids, PIDCategory.DIAGNOSTICS, "Bitmap of supported PIDs 81-A0"), + PIDDefinition(0xA0, "Supported PIDs A1-C0", "PIDs_F", "", 0, 0xFFFFFFFF, 4, + _decode_supported_pids, PIDCategory.DIAGNOSTICS, "Bitmap of supported PIDs A1-C0"), + PIDDefinition(0xC0, "Supported PIDs C1-E0", "PIDs_G", "", 0, 0xFFFFFFFF, 4, + _decode_supported_pids, PIDCategory.DIAGNOSTICS, "Bitmap of supported PIDs C1-E0"), + + # Fuel System + PIDDefinition(0x03, "Fuel System Status", "FuelSys", "", 0, 255, 2, + _decode_fuel_system_status, PIDCategory.FUEL, "Fuel system status"), + PIDDefinition(0x04, "Calculated Engine Load", "Load", "%", 0, 100, 1, + _decode_percent, PIDCategory.ENGINE, "Calculated engine load value"), + + # Temperature + PIDDefinition(0x05, "Engine Coolant Temperature", "Coolant", "°C", -40, 215, 1, + _decode_temp, PIDCategory.TEMPERATURE, "Engine coolant temperature"), + PIDDefinition(0x0F, "Intake Air Temperature", "IntakeTemp", "°C", -40, 215, 1, + _decode_temp, PIDCategory.TEMPERATURE, "Intake air temperature"), + PIDDefinition(0x46, "Ambient Air Temperature", "Ambient", "°C", -40, 215, 1, + _decode_temp, PIDCategory.TEMPERATURE, "Ambient air temperature"), + PIDDefinition(0x5C, "Engine Oil Temperature", "OilTemp", "°C", -40, 210, 1, + _decode_temp, PIDCategory.TEMPERATURE, "Engine oil temperature"), + + # Fuel Trim + PIDDefinition(0x06, "Short Term Fuel Trim Bank 1", "STFT1", "%", -100, 99.2, 1, + _decode_fuel_trim, PIDCategory.FUEL, "Short term fuel trim - Bank 1"), + PIDDefinition(0x07, "Long Term Fuel Trim Bank 1", "LTFT1", "%", -100, 99.2, 1, + _decode_fuel_trim, PIDCategory.FUEL, "Long term fuel trim - Bank 1"), + PIDDefinition(0x08, "Short Term Fuel Trim Bank 2", "STFT2", "%", -100, 99.2, 1, + _decode_fuel_trim, PIDCategory.FUEL, "Short term fuel trim - Bank 2"), + PIDDefinition(0x09, "Long Term Fuel Trim Bank 2", "LTFT2", "%", -100, 99.2, 1, + _decode_fuel_trim, PIDCategory.FUEL, "Long term fuel trim - Bank 2"), + + # Pressure + PIDDefinition(0x0A, "Fuel Pressure", "FuelPres", "kPa", 0, 765, 1, + _decode_fuel_pressure, PIDCategory.FUEL, "Fuel pressure (gauge)"), + PIDDefinition(0x0B, "Intake Manifold Pressure", "MAP", "kPa", 0, 255, 1, + _decode_intake_pressure, PIDCategory.ENGINE, "Intake manifold absolute pressure"), + PIDDefinition(0x33, "Barometric Pressure", "Baro", "kPa", 0, 255, 1, + _decode_barometric_pressure, PIDCategory.OTHER, "Absolute barometric pressure"), + + # Engine + PIDDefinition(0x0C, "Engine RPM", "RPM", "rpm", 0, 16383.75, 2, + _decode_rpm, PIDCategory.ENGINE, "Engine speed"), + PIDDefinition(0x0E, "Timing Advance", "Timing", "°", -64, 63.5, 1, + _decode_timing_advance, PIDCategory.ENGINE, "Timing advance relative to #1 cylinder"), + PIDDefinition(0x10, "Mass Air Flow Rate", "MAF", "g/s", 0, 655.35, 2, + _decode_maf, PIDCategory.ENGINE, "Mass air flow sensor rate"), + + # Speed & Movement + PIDDefinition(0x0D, "Vehicle Speed", "Speed", "km/h", 0, 255, 1, + _decode_speed, PIDCategory.SPEED, "Vehicle speed"), + PIDDefinition(0x11, "Throttle Position", "Throttle", "%", 0, 100, 1, + _decode_percent, PIDCategory.ENGINE, "Throttle position"), + + # O2 Sensors (Bank 1) + PIDDefinition(0x14, "O2 Sensor B1S1 Voltage", "O2_B1S1", "V", 0, 1.275, 2, + _decode_o2_voltage, PIDCategory.OXYGEN, "Oxygen sensor 1, Bank 1 - voltage"), + PIDDefinition(0x15, "O2 Sensor B1S2 Voltage", "O2_B1S2", "V", 0, 1.275, 2, + _decode_o2_voltage, PIDCategory.OXYGEN, "Oxygen sensor 2, Bank 1 - voltage"), + PIDDefinition(0x16, "O2 Sensor B1S3 Voltage", "O2_B1S3", "V", 0, 1.275, 2, + _decode_o2_voltage, PIDCategory.OXYGEN, "Oxygen sensor 3, Bank 1 - voltage"), + PIDDefinition(0x17, "O2 Sensor B1S4 Voltage", "O2_B1S4", "V", 0, 1.275, 2, + _decode_o2_voltage, PIDCategory.OXYGEN, "Oxygen sensor 4, Bank 1 - voltage"), + + # O2 Sensors (Bank 2) + PIDDefinition(0x18, "O2 Sensor B2S1 Voltage", "O2_B2S1", "V", 0, 1.275, 2, + _decode_o2_voltage, PIDCategory.OXYGEN, "Oxygen sensor 1, Bank 2 - voltage"), + PIDDefinition(0x19, "O2 Sensor B2S2 Voltage", "O2_B2S2", "V", 0, 1.275, 2, + _decode_o2_voltage, PIDCategory.OXYGEN, "Oxygen sensor 2, Bank 2 - voltage"), + PIDDefinition(0x1A, "O2 Sensor B2S3 Voltage", "O2_B2S3", "V", 0, 1.275, 2, + _decode_o2_voltage, PIDCategory.OXYGEN, "Oxygen sensor 3, Bank 2 - voltage"), + PIDDefinition(0x1B, "O2 Sensor B2S4 Voltage", "O2_B2S4", "V", 0, 1.275, 2, + _decode_o2_voltage, PIDCategory.OXYGEN, "Oxygen sensor 4, Bank 2 - voltage"), + + # Runtime & Distance + PIDDefinition(0x1F, "Run Time Since Engine Start", "Runtime", "s", 0, 65535, 2, + _decode_runtime, PIDCategory.ENGINE, "Time since engine start"), + PIDDefinition(0x21, "Distance with MIL On", "DistMIL", "km", 0, 65535, 2, + _decode_distance_mil, PIDCategory.DIAGNOSTICS, "Distance traveled with MIL on"), + PIDDefinition(0x31, "Distance Since Codes Cleared", "DistClr", "km", 0, 65535, 2, + _decode_distance_cleared, PIDCategory.DIAGNOSTICS, "Distance since codes cleared"), + PIDDefinition(0xA6, "Odometer", "Odometer", "km", 0, 429496729.5, 4, + _decode_odometer, PIDCategory.SPEED, "Odometer value"), + + # Fuel System Advanced + PIDDefinition(0x22, "Fuel Rail Pressure", "FRP", "kPa", 0, 5177.27, 2, + _decode_fuel_rail_pressure, PIDCategory.FUEL, "Fuel rail pressure (manifold vacuum)"), + PIDDefinition(0x23, "Fuel Rail Pressure (Diesel)", "FRP_D", "kPa", 0, 655350, 2, + _decode_fuel_rail_pressure_diesel, PIDCategory.FUEL, "Fuel rail pressure (diesel/direct inject)"), + PIDDefinition(0x2C, "Commanded EGR", "EGR", "%", 0, 100, 1, + _decode_egr_commanded, PIDCategory.ENGINE, "Commanded EGR"), + PIDDefinition(0x2D, "EGR Error", "EGR_Err", "%", -100, 99.2, 1, + _decode_egr_error, PIDCategory.ENGINE, "EGR error"), + PIDDefinition(0x2E, "Commanded Evaporative Purge", "Evap", "%", 0, 100, 1, + _decode_evap_purge, PIDCategory.FUEL, "Commanded evaporative purge"), + PIDDefinition(0x2F, "Fuel Tank Level", "FuelLvl", "%", 0, 100, 1, + _decode_fuel_level, PIDCategory.FUEL, "Fuel tank level input"), + + # Warmups & Codes + PIDDefinition(0x30, "Warm-ups Since Codes Cleared", "Warmups", "", 0, 255, 1, + _decode_warmups, PIDCategory.DIAGNOSTICS, "Warm-ups since codes cleared"), + PIDDefinition(0x4D, "Time with MIL On", "TimeMIL", "min", 0, 65535, 2, + _decode_time_mil, PIDCategory.DIAGNOSTICS, "Time run with MIL on"), + PIDDefinition(0x4E, "Time Since Codes Cleared", "TimeClr", "min", 0, 65535, 2, + _decode_time_cleared, PIDCategory.DIAGNOSTICS, "Time since codes cleared"), + + # Catalyst Temperature + PIDDefinition(0x3C, "Catalyst Temp B1S1", "Cat_B1S1", "°C", -40, 6513.5, 2, + _decode_catalyst_temp, PIDCategory.TEMPERATURE, "Catalyst temperature Bank 1, Sensor 1"), + PIDDefinition(0x3D, "Catalyst Temp B2S1", "Cat_B2S1", "°C", -40, 6513.5, 2, + _decode_catalyst_temp, PIDCategory.TEMPERATURE, "Catalyst temperature Bank 2, Sensor 1"), + PIDDefinition(0x3E, "Catalyst Temp B1S2", "Cat_B1S2", "°C", -40, 6513.5, 2, + _decode_catalyst_temp, PIDCategory.TEMPERATURE, "Catalyst temperature Bank 1, Sensor 2"), + PIDDefinition(0x3F, "Catalyst Temp B2S2", "Cat_B2S2", "°C", -40, 6513.5, 2, + _decode_catalyst_temp, PIDCategory.TEMPERATURE, "Catalyst temperature Bank 2, Sensor 2"), + + # Voltage & Load + PIDDefinition(0x42, "Control Module Voltage", "ECU_V", "V", 0, 65.535, 2, + _decode_control_module_voltage, PIDCategory.OTHER, "Control module voltage"), + PIDDefinition(0x43, "Absolute Load Value", "AbsLoad", "%", 0, 25700, 2, + _decode_absolute_load, PIDCategory.ENGINE, "Absolute load value"), + PIDDefinition(0x44, "Commanded Equivalence Ratio", "Lambda", "", 0, 2, 2, + _decode_equivalence_ratio, PIDCategory.FUEL, "Commanded air-fuel equivalence ratio"), + + # Throttle & Pedal + PIDDefinition(0x45, "Relative Throttle Position", "RelThrot", "%", 0, 100, 1, + _decode_relative_throttle, PIDCategory.ENGINE, "Relative throttle position"), + PIDDefinition(0x47, "Absolute Throttle Position B", "ThrotB", "%", 0, 100, 1, + _decode_absolute_throttle_b, PIDCategory.ENGINE, "Absolute throttle position B"), + PIDDefinition(0x48, "Absolute Throttle Position C", "ThrotC", "%", 0, 100, 1, + _decode_absolute_throttle_b, PIDCategory.ENGINE, "Absolute throttle position C"), + PIDDefinition(0x49, "Accelerator Pedal Position D", "PedalD", "%", 0, 100, 1, + _decode_accelerator_pedal_d, PIDCategory.ENGINE, "Accelerator pedal position D"), + PIDDefinition(0x4A, "Accelerator Pedal Position E", "PedalE", "%", 0, 100, 1, + _decode_accelerator_pedal_e, PIDCategory.ENGINE, "Accelerator pedal position E"), + PIDDefinition(0x4B, "Accelerator Pedal Position F", "PedalF", "%", 0, 100, 1, + _decode_accelerator_pedal_e, PIDCategory.ENGINE, "Accelerator pedal position F"), + PIDDefinition(0x4C, "Commanded Throttle Actuator", "ThrotAct", "%", 0, 100, 1, + _decode_commanded_throttle, PIDCategory.ENGINE, "Commanded throttle actuator"), + + # Fuel Rate & Ethanol + PIDDefinition(0x5E, "Engine Fuel Rate", "FuelRate", "L/h", 0, 3276.75, 2, + _decode_fuel_rate, PIDCategory.FUEL, "Engine fuel rate"), + PIDDefinition(0x52, "Ethanol Fuel Percentage", "Ethanol", "%", 0, 100, 1, + _decode_ethanol_percent, PIDCategory.FUEL, "Ethanol fuel percentage"), + ] + + for pid_def in pids: + self._pids[pid_def.pid] = pid_def + + def get(self, pid: int) -> Optional[PIDDefinition]: + """Get PID definition by ID.""" + return self._pids.get(pid) + + def decode(self, pid: int, data: bytes) -> Optional[float]: + """ + Decode raw data using PID definition. + + Args: + pid: Parameter ID + data: Raw data bytes + + Returns: + Decoded value or None if PID unknown or decode failed + """ + pid_def = self.get(pid) + if pid_def is None: + return None + return pid_def.decode(data) + + def create_reading( + self, + pid: int, + data: bytes, + timestamp_ns: int, + ecu_id: int = 0 + ) -> OBD2Reading: + """ + Create an OBD2Reading from raw response data. + + Args: + pid: Parameter ID + data: Raw data bytes + timestamp_ns: Reading timestamp + ecu_id: ECU identifier + + Returns: + OBD2Reading with decoded value + """ + pid_def = self.get(pid) + + if pid_def is None: + return OBD2Reading( + timestamp_ns=timestamp_ns, + pid=pid, + pid_name=f"Unknown_0x{pid:02X}", + raw_data=data, + value=None, + unit="", + is_valid=False, + ecu_id=ecu_id + ) + + value = pid_def.decode(data) + + return OBD2Reading( + timestamp_ns=timestamp_ns, + pid=pid, + pid_name=pid_def.name, + raw_data=data, + value=value, + unit=pid_def.unit, + is_valid=value is not None, + ecu_id=ecu_id + ) + + def get_by_category(self, category: PIDCategory) -> List[PIDDefinition]: + """Get all PIDs in a category.""" + return [p for p in self._pids.values() if p.category == category] + + def get_supported_pids(self, bitmap: int, base_pid: int = 0x00) -> List[int]: + """ + Parse supported PIDs bitmap. + + Args: + bitmap: 32-bit bitmap from PID 0x00/0x20/0x40/etc response + base_pid: Base PID (0x00, 0x20, 0x40, etc.) + + Returns: + List of supported PID numbers + """ + supported = [] + for i in range(32): + if bitmap & (1 << (31 - i)): + supported.append(base_pid + i + 1) + return supported + + def all_pids(self) -> List[PIDDefinition]: + """Get all registered PIDs.""" + return list(self._pids.values()) + + def __contains__(self, pid: int) -> bool: + return pid in self._pids + + def __len__(self) -> int: + return len(self._pids) + + +# Global registry instance +_registry: Optional[PIDRegistry] = None + + +def get_pid_registry() -> PIDRegistry: + """Get global PID registry instance.""" + global _registry + if _registry is None: + _registry = PIDRegistry() + return _registry diff --git a/can_sniffer/src/obd2/poller.py b/can_sniffer/src/obd2/poller.py new file mode 100644 index 0000000..d85ca0d --- /dev/null +++ b/can_sniffer/src/obd2/poller.py @@ -0,0 +1,447 @@ +""" +OBD2 Polling Engine. + +Manages periodic polling of OBD2 PIDs with configurable groups +and intervals. Coordinates with transceiver and response matcher. +""" + +import threading +import time +from dataclasses import dataclass, field +from typing import Optional, Callable, Dict, Any, List, Set +from enum import Enum + +from logger import get_logger +from .protocol import OBD2Request, OBD2Response, OBD2Mode +from .pids import OBD2Reading, get_pid_registry +from .transceiver import CANTransceiver +from .response_matcher import ResponseMatcher + +logger = get_logger(__name__) + + +class PollerState(Enum): + """OBD2 Poller states.""" + + STOPPED = "stopped" + DISCOVERING = "discovering" + RUNNING = "running" + PAUSED = "paused" + ERROR = "error" + + +@dataclass +class PollingGroup: + """ + A group of PIDs polled at the same interval. + + Attributes: + name: Group identifier + interval_ms: Polling interval in milliseconds + pids: List of PIDs to poll (hex strings like "0C", "0D") + enabled: Whether group is active + priority: Lower = higher priority + """ + + name: str + interval_ms: int + pids: List[str] + enabled: bool = True + priority: int = 0 + + def get_pid_ints(self) -> List[int]: + """Convert hex PID strings to integers.""" + result = [] + for pid_str in self.pids: + try: + result.append(int(pid_str, 16)) + except ValueError: + logger.warning(f"Invalid PID format: {pid_str}") + return result + + +@dataclass +class GroupState: + """Runtime state for a polling group.""" + + group: PollingGroup + last_poll_time: float = 0.0 + polls_completed: int = 0 + current_pid_index: int = 0 + + @property + def pids(self) -> List[int]: + """Get PIDs as integers.""" + return self.group.get_pid_ints() + + @property + def next_poll_time(self) -> float: + """Calculate when next poll should occur.""" + return self.last_poll_time + (self.group.interval_ms / 1000.0) + + @property + def is_due(self) -> bool: + """Check if group is due for polling.""" + return time.time() >= self.next_poll_time + + def get_next_pid(self) -> Optional[int]: + """Get next PID to poll (round-robin).""" + pids = self.pids + if not pids: + return None + pid = pids[self.current_pid_index] + self.current_pid_index = (self.current_pid_index + 1) % len(pids) + return pid + + +@dataclass +class PollerStats: + """Statistics for the OBD2 poller.""" + + total_requests: int = 0 + successful_responses: int = 0 + timeouts: int = 0 + ecu_connected: bool = False + supported_pids: Set[int] = field(default_factory=set) + last_response_time: float = 0.0 + polling_active: bool = False + + +class OBD2Poller: + """ + OBD2 Polling Engine. + + Manages periodic polling of OBD2 PIDs with configurable groups. + Handles ECU discovery, supported PID detection, and adaptive polling. + + Typical usage: + poller = OBD2Poller(transceiver, matcher) + poller.add_group(PollingGroup("fast", 100, ["0C", "0D"])) + poller.add_group(PollingGroup("slow", 1000, ["05", "2F"])) + poller.start() + """ + + def __init__( + self, + transceiver: CANTransceiver, + matcher: ResponseMatcher, + reading_callback: Optional[Callable[[OBD2Reading], None]] = None, + auto_discover: bool = True, + ): + """ + Initialize OBD2 poller. + + Args: + transceiver: CAN transceiver for sending/receiving + matcher: Response matcher for request correlation + reading_callback: Callback for decoded readings + auto_discover: Auto-discover supported PIDs on start + """ + self._transceiver = transceiver + self._matcher = matcher + self._reading_callback = reading_callback + self._auto_discover = auto_discover + + self._groups: Dict[str, GroupState] = {} + self._state = PollerState.STOPPED + self._stats = PollerStats() + self._stats_lock = threading.Lock() + + self._running = False + self._poll_thread: Optional[threading.Thread] = None + self._pid_registry = get_pid_registry() + + # Connect matcher callbacks + self._matcher.set_reading_callback(self._on_reading) + self._matcher.set_retry_callback(self._on_retry) + + @property + def state(self) -> PollerState: + """Get current poller state.""" + return self._state + + @property + def is_running(self) -> bool: + """Check if poller is actively polling.""" + return self._running and self._state == PollerState.RUNNING + + def add_group(self, group: PollingGroup) -> None: + """ + Add a polling group. + + Args: + group: Polling group to add + """ + self._groups[group.name] = GroupState(group=group) + logger.info( + f"Added polling group: {group.name}", + extra={ + "interval_ms": group.interval_ms, + "pids": group.pids, + } + ) + + def remove_group(self, name: str) -> None: + """Remove a polling group by name.""" + if name in self._groups: + del self._groups[name] + logger.info(f"Removed polling group: {name}") + + def enable_group(self, name: str, enabled: bool = True) -> None: + """Enable or disable a polling group.""" + if name in self._groups: + self._groups[name].group.enabled = enabled + logger.info(f"Polling group {name} {'enabled' if enabled else 'disabled'}") + + def start(self) -> bool: + """ + Start the polling engine. + + Returns: + True if started successfully + """ + if self._running: + logger.warning("Poller already running") + return True + + if not self._transceiver.is_running: + logger.error("Cannot start poller - transceiver not running") + return False + + self._running = True + + # Start matcher + self._matcher.start() + + # Connect transceiver response callback + self._transceiver.set_response_callback(self._on_response) + + # Start polling thread + self._poll_thread = threading.Thread( + target=self._poll_loop, + name="OBD2-Poller", + daemon=True + ) + self._poll_thread.start() + + logger.info("OBD2 Poller started") + return True + + def stop(self) -> None: + """Stop the polling engine.""" + if not self._running: + return + + logger.info("Stopping OBD2 poller") + self._running = False + self._state = PollerState.STOPPED + + if self._poll_thread and self._poll_thread.is_alive(): + self._poll_thread.join(timeout=2.0) + + self._matcher.stop() + + with self._stats_lock: + self._stats.polling_active = False + + logger.info("OBD2 Poller stopped") + + def pause(self) -> None: + """Pause polling (can be resumed).""" + if self._state == PollerState.RUNNING: + self._state = PollerState.PAUSED + with self._stats_lock: + self._stats.polling_active = False + logger.info("OBD2 Poller paused") + + def resume(self) -> None: + """Resume paused polling.""" + if self._state == PollerState.PAUSED: + self._state = PollerState.RUNNING + with self._stats_lock: + self._stats.polling_active = True + logger.info("OBD2 Poller resumed") + + def request_pid( + self, + pid: int, + callback: Optional[Callable[[OBD2Reading], None]] = None, + ) -> bool: + """ + Request a single PID (one-shot). + + Args: + pid: PID to request + callback: Optional callback for this specific request + + Returns: + True if request was sent + """ + request = OBD2Request(mode=OBD2Mode.CURRENT_DATA, pid=pid) + + if self._transceiver.send_request(request): + self._matcher.register_request(request, callback) + with self._stats_lock: + self._stats.total_requests += 1 + return True + return False + + def discover_supported_pids(self) -> None: + """ + Discover supported PIDs from ECU. + + Queries PID 0x00, 0x20, 0x40, etc. to get supported PID bitmaps. + """ + logger.info("Starting PID discovery") + self._state = PollerState.DISCOVERING + + discovery_pids = [0x00, 0x20, 0x40, 0x60, 0x80, 0xA0, 0xC0] + + for pid in discovery_pids: + self.request_pid(pid, self._on_discovery_response) + time.sleep(0.05) # Small delay between requests + + def get_stats(self) -> Dict[str, Any]: + """Get poller statistics.""" + with self._stats_lock: + matcher_stats = self._matcher.get_stats() + return { + "state": self._state.value, + "total_requests": self._stats.total_requests, + "successful_responses": self._stats.successful_responses, + "timeouts": matcher_stats["timeouts"], + "success_rate": matcher_stats["success_rate"], + "avg_latency_ms": matcher_stats["avg_latency_ms"], + "ecu_connected": self._stats.ecu_connected, + "supported_pids_count": len(self._stats.supported_pids), + "last_response_time": self._stats.last_response_time, + "polling_active": self._stats.polling_active, + "groups": { + name: { + "enabled": gs.group.enabled, + "interval_ms": gs.group.interval_ms, + "polls_completed": gs.polls_completed, + } + for name, gs in self._groups.items() + }, + } + + def get_supported_pids(self) -> Set[int]: + """Get set of discovered supported PIDs.""" + with self._stats_lock: + return self._stats.supported_pids.copy() + + def _poll_loop(self) -> None: + """Main polling loop.""" + logger.debug("Poll loop started") + + # Auto-discover if enabled + if self._auto_discover: + self.discover_supported_pids() + time.sleep(0.5) # Wait for discovery responses + + self._state = PollerState.RUNNING + with self._stats_lock: + self._stats.polling_active = True + + while self._running: + try: + if self._state == PollerState.PAUSED: + time.sleep(0.1) + continue + + # Poll each group that's due + for name, group_state in self._groups.items(): + if not group_state.group.enabled: + continue + + if group_state.is_due: + self._poll_group(group_state) + + # Small sleep to prevent busy loop + time.sleep(0.001) + + except Exception as e: + logger.error(f"Poll loop error: {e}") + self._state = PollerState.ERROR + + logger.debug("Poll loop stopped") + + def _poll_group(self, group_state: GroupState) -> None: + """Poll the next PID in a group.""" + pid = group_state.get_next_pid() + if pid is None: + return + + # Skip if PID not in supported set (if we have discovery data) + with self._stats_lock: + if self._stats.supported_pids and pid not in self._stats.supported_pids: + # Skip unsupported PIDs silently + return + + request = OBD2Request(mode=OBD2Mode.CURRENT_DATA, pid=pid) + + if self._transceiver.send_request(request): + self._matcher.register_request(request) + group_state.last_poll_time = time.time() + + with self._stats_lock: + self._stats.total_requests += 1 + + # If we've polled all PIDs, increment polls_completed + if group_state.current_pid_index == 0: + group_state.polls_completed += 1 + + def _on_response(self, response: OBD2Response) -> None: + """Handle response from transceiver.""" + # Forward to matcher for correlation + self._matcher.match_response(response) + + def _on_reading(self, reading: OBD2Reading) -> None: + """Handle decoded reading from matcher.""" + with self._stats_lock: + self._stats.successful_responses += 1 + self._stats.last_response_time = time.time() + self._stats.ecu_connected = True + + # Forward to user callback + if self._reading_callback: + try: + self._reading_callback(reading) + except Exception as e: + logger.error(f"Reading callback error: {e}") + + def _on_retry(self, request: OBD2Request) -> None: + """Handle retry request from matcher.""" + self._transceiver.send_request(request) + self._matcher.register_request(request) + + def _on_discovery_response(self, reading: OBD2Reading) -> None: + """Handle PID discovery response.""" + if reading.value is None: + return + + # Parse supported PIDs bitmap + bitmap = int(reading.value) + base_pid = reading.pid + + supported = self._pid_registry.get_supported_pids(bitmap, base_pid) + + with self._stats_lock: + self._stats.supported_pids.update(supported) + + logger.info( + f"Discovered PIDs from 0x{base_pid:02X}", + extra={"count": len(supported), "pids": [f"0x{p:02X}" for p in supported[:10]]} + ) + + # Also forward to regular callback if set + if self._reading_callback: + self._reading_callback(reading) + + def set_reading_callback( + self, + callback: Callable[[OBD2Reading], None] + ) -> None: + """Set callback for all decoded readings.""" + self._reading_callback = callback diff --git a/can_sniffer/src/obd2/protocol.py b/can_sniffer/src/obd2/protocol.py new file mode 100644 index 0000000..67eea42 --- /dev/null +++ b/can_sniffer/src/obd2/protocol.py @@ -0,0 +1,316 @@ +""" +OBD2 Protocol Encoding/Decoding. + +Implements ISO 15765-4 (CAN) protocol for OBD2 communication. +Supports Single Frame and Multi Frame messages. +""" + +from dataclasses import dataclass, field +from enum import IntEnum +from typing import Optional, List +import time + +from can_frame import CANFrame + + +class OBD2Mode(IntEnum): + """OBD2 Service Modes (ISO 15031-5).""" + + CURRENT_DATA = 0x01 # Mode 01 - Current powertrain diagnostic data + FREEZE_FRAME = 0x02 # Mode 02 - Freeze frame data + STORED_DTCS = 0x03 # Mode 03 - Stored Diagnostic Trouble Codes + CLEAR_DTCS = 0x04 # Mode 04 - Clear DTCs and stored values + O2_MONITORING = 0x05 # Mode 05 - Oxygen sensor monitoring + ONBOARD_MONITORING = 0x06 # Mode 06 - On-board monitoring test results + PENDING_DTCS = 0x07 # Mode 07 - Pending Diagnostic Trouble Codes + CONTROL_OPERATION = 0x08 # Mode 08 - Control operation of on-board system + VEHICLE_INFO = 0x09 # Mode 09 - Request vehicle information + PERMANENT_DTCS = 0x0A # Mode 0A - Permanent DTCs + + +class FrameType(IntEnum): + """ISO-TP Frame Types for Multi Frame support.""" + + SINGLE = 0x00 # Single Frame (SF) - complete message in one frame + FIRST = 0x10 # First Frame (FF) - first of multi-frame sequence + CONSECUTIVE = 0x20 # Consecutive Frame (CF) - continuation frames + FLOW_CONTROL = 0x30 # Flow Control (FC) - flow control frame + + +# Standard OBD2 CAN IDs +OBD2_REQUEST_BROADCAST = 0x7DF # Broadcast request (all ECUs) +OBD2_REQUEST_ECU_BASE = 0x7E0 # Physical addressing base (ECU 0) +OBD2_RESPONSE_ECU_BASE = 0x7E8 # Response base (ECU 0 responds on 0x7E8) +OBD2_RESPONSE_ECU_END = 0x7EF # Response end (ECU 7 responds on 0x7EF) + + +@dataclass(frozen=True) +class OBD2Request: + """ + Represents an OBD2 request message. + + Encapsulates the mode and PID for an OBD2 query, with methods + to convert to CAN frames for transmission. + + Attributes: + mode: OBD2 service mode (e.g., 0x01 for current data) + pid: Parameter ID to request (0x00-0xFF) + target_id: CAN ID to send request to (default: broadcast 0x7DF) + timestamp_ns: Request creation timestamp in nanoseconds + """ + + mode: int + pid: int + target_id: int = OBD2_REQUEST_BROADCAST + timestamp_ns: int = field(default_factory=lambda: time.time_ns()) + + def __post_init__(self): + """Validate request parameters.""" + if not 0x01 <= self.mode <= 0x0A: + raise ValueError(f"Invalid OBD2 mode: 0x{self.mode:02X}") + if not 0x00 <= self.pid <= 0xFF: + raise ValueError(f"Invalid PID: 0x{self.pid:02X}") + + def to_can_frame(self) -> CANFrame: + """ + Convert OBD2 request to CAN frame. + + Single Frame format (ISO 15765-2): + Byte 0: PCI (length) + Byte 1: Mode (Service ID) + Byte 2: PID + Bytes 3-7: Padding (0x00 or 0x55/0xAA) + + Returns: + CANFrame ready for transmission + """ + # Single Frame: length = 2 bytes (mode + pid) + data = bytes([ + 0x02, # PCI: Single Frame, length = 2 + self.mode, # Service ID (Mode) + self.pid, # PID + 0x00, 0x00, 0x00, 0x00, 0x00 # Padding + ]) + + return CANFrame( + ts_ns=self.timestamp_ns, + bus="obd2", + can_id=self.target_id, + is_extended=False, + dlc=8, + data=data + ) + + @property + def request_id(self) -> str: + """Unique identifier for request correlation.""" + return f"{self.mode:02X}_{self.pid:02X}" + + def __repr__(self) -> str: + return f"OBD2Request(mode=0x{self.mode:02X}, pid=0x{self.pid:02X}, target=0x{self.target_id:03X})" + + +@dataclass +class OBD2Response: + """ + Represents an OBD2 response message. + + Parses CAN frames containing OBD2 responses and extracts + the mode, PID, and data bytes. + + Attributes: + mode: Response mode (request mode + 0x40) + pid: Parameter ID + data: Raw data bytes from response + source_id: CAN ID of responding ECU + timestamp_ns: Response reception timestamp + is_multiframe: Whether this is part of a multi-frame sequence + """ + + mode: int + pid: int + data: bytes + source_id: int + timestamp_ns: int + is_multiframe: bool = False + + @classmethod + def from_can_frame(cls, frame: CANFrame) -> Optional["OBD2Response"]: + """ + Parse OBD2 response from CAN frame. + + Args: + frame: Received CAN frame + + Returns: + OBD2Response if valid OBD2 response, None otherwise + """ + # Validate source ID is in OBD2 response range + if not OBD2_RESPONSE_ECU_BASE <= frame.can_id <= OBD2_RESPONSE_ECU_END: + return None + + if len(frame.data) < 3: + return None + + pci = frame.data[0] + frame_type = pci & 0xF0 + + if frame_type == FrameType.SINGLE: + # Single Frame response + length = pci & 0x0F + if length < 2: + return None + + mode = frame.data[1] + pid = frame.data[2] + + # Verify it's a positive response (mode + 0x40) + if not (mode & 0x40): + return None + + # Extract data bytes (skip PCI, mode, PID) + data_length = length - 2 # Subtract mode and PID + data = frame.data[3:3 + data_length] + + return cls( + mode=mode & 0x3F, # Remove response flag to get original mode + pid=pid, + data=bytes(data), + source_id=frame.can_id, + timestamp_ns=frame.ts_ns, + is_multiframe=False + ) + + elif frame_type == FrameType.FIRST: + # First Frame of multi-frame response + # Total length is in PCI byte 0 (lower nibble) + byte 1 + total_length = ((pci & 0x0F) << 8) | frame.data[1] + mode = frame.data[2] + pid = frame.data[3] + + if not (mode & 0x40): + return None + + # First 4 data bytes are in this frame + data = frame.data[4:8] + + return cls( + mode=mode & 0x3F, + pid=pid, + data=bytes(data), + source_id=frame.can_id, + timestamp_ns=frame.ts_ns, + is_multiframe=True + ) + + return None + + @property + def response_id(self) -> str: + """Unique identifier matching request_id.""" + return f"{self.mode:02X}_{self.pid:02X}" + + @property + def ecu_index(self) -> int: + """ECU index (0-7) based on response CAN ID.""" + return self.source_id - OBD2_RESPONSE_ECU_BASE + + def __repr__(self) -> str: + return ( + f"OBD2Response(mode=0x{self.mode:02X}, pid=0x{self.pid:02X}, " + f"data={self.data.hex()}, ecu={self.ecu_index})" + ) + + +@dataclass +class MultiFrameBuffer: + """ + Buffer for assembling multi-frame OBD2 responses. + + Collects First Frame and Consecutive Frames until + the complete message is assembled. + """ + + source_id: int + mode: int + pid: int + total_length: int + data: bytearray + expected_sequence: int = 1 + timestamp_ns: int = field(default_factory=lambda: time.time_ns()) + + def add_consecutive_frame(self, frame: CANFrame) -> bool: + """ + Add a Consecutive Frame to the buffer. + + Args: + frame: Consecutive Frame to add + + Returns: + True if frame was valid and added + """ + if frame.can_id != self.source_id: + return False + + pci = frame.data[0] + if (pci & 0xF0) != FrameType.CONSECUTIVE: + return False + + sequence = pci & 0x0F + if sequence != (self.expected_sequence & 0x0F): + return False + + # Add data bytes (up to 7 per CF) + remaining = self.total_length - len(self.data) + bytes_to_add = min(7, remaining) + self.data.extend(frame.data[1:1 + bytes_to_add]) + + self.expected_sequence += 1 + return True + + @property + def is_complete(self) -> bool: + """Check if all data has been received.""" + return len(self.data) >= self.total_length + + def to_response(self) -> OBD2Response: + """Convert completed buffer to OBD2Response.""" + return OBD2Response( + mode=self.mode, + pid=self.pid, + data=bytes(self.data[:self.total_length]), + source_id=self.source_id, + timestamp_ns=self.timestamp_ns, + is_multiframe=True + ) + + +def create_flow_control_frame(target_id: int) -> CANFrame: + """ + Create Flow Control frame for multi-frame reception. + + Sends CTS (Clear To Send) to allow ECU to continue + sending Consecutive Frames. + + Args: + target_id: ECU request ID (0x7E0-0x7E7) + + Returns: + Flow Control CAN frame + """ + # FC frame: CTS, Block Size = 0 (no limit), ST_min = 0 (no delay) + data = bytes([ + 0x30, # FC, CTS + 0x00, # Block Size = unlimited + 0x00, # ST_min = 0ms + 0x00, 0x00, 0x00, 0x00, 0x00 + ]) + + return CANFrame( + ts_ns=time.time_ns(), + bus="obd2", + can_id=target_id, + is_extended=False, + dlc=8, + data=data + ) diff --git a/can_sniffer/src/obd2/response_matcher.py b/can_sniffer/src/obd2/response_matcher.py new file mode 100644 index 0000000..4775a34 --- /dev/null +++ b/can_sniffer/src/obd2/response_matcher.py @@ -0,0 +1,342 @@ +""" +OBD2 Request/Response Matcher. + +Correlates OBD2 requests with their responses, handling timeouts +and providing metrics for latency and success rate. +""" + +import threading +import time +from dataclasses import dataclass, field +from typing import Optional, Callable, Dict, Any, List +from collections import OrderedDict +from enum import Enum + +from logger import get_logger +from .protocol import OBD2Request, OBD2Response +from .pids import OBD2Reading, get_pid_registry + +logger = get_logger(__name__) + + +class RequestState(Enum): + """State of a pending request.""" + + PENDING = "pending" + COMPLETED = "completed" + TIMEOUT = "timeout" + ERROR = "error" + + +@dataclass +class PendingRequest: + """ + Represents a pending OBD2 request awaiting response. + + Attributes: + request: Original OBD2 request + sent_time: Time when request was sent + timeout_ms: Timeout in milliseconds + callback: Optional callback when response received + state: Current request state + response: Received response (if any) + retry_count: Number of retries attempted + """ + + request: OBD2Request + sent_time: float = field(default_factory=time.time) + timeout_ms: int = 100 + callback: Optional[Callable[[OBD2Reading], None]] = None + state: RequestState = RequestState.PENDING + response: Optional[OBD2Response] = None + retry_count: int = 0 + max_retries: int = 2 + + @property + def is_expired(self) -> bool: + """Check if request has timed out.""" + elapsed_ms = (time.time() - self.sent_time) * 1000 + return elapsed_ms > self.timeout_ms + + @property + def latency_ms(self) -> Optional[float]: + """Get response latency in milliseconds.""" + if self.response is None: + return None + return (self.response.timestamp_ns / 1_000_000) - (self.sent_time * 1000) + + @property + def can_retry(self) -> bool: + """Check if request can be retried.""" + return self.retry_count < self.max_retries + + +@dataclass +class MatcherStats: + """Statistics for the response matcher.""" + + requests_sent: int = 0 + responses_matched: int = 0 + timeouts: int = 0 + retries: int = 0 + errors: int = 0 + total_latency_ms: float = 0.0 + + @property + def success_rate(self) -> float: + """Calculate success rate percentage.""" + if self.requests_sent == 0: + return 0.0 + return (self.responses_matched / self.requests_sent) * 100.0 + + @property + def avg_latency_ms(self) -> float: + """Calculate average latency.""" + if self.responses_matched == 0: + return 0.0 + return self.total_latency_ms / self.responses_matched + + +class ResponseMatcher: + """ + Matches OBD2 requests with responses. + + Tracks pending requests, handles timeouts and retries, + provides callback notification and metrics. + + Thread-safe for concurrent request/response handling. + """ + + def __init__( + self, + timeout_ms: int = 100, + max_retries: int = 2, + reading_callback: Optional[Callable[[OBD2Reading], None]] = None, + retry_callback: Optional[Callable[[OBD2Request], None]] = None, + ): + """ + Initialize response matcher. + + Args: + timeout_ms: Default request timeout in milliseconds + max_retries: Maximum retry attempts per request + reading_callback: Callback for decoded readings + retry_callback: Callback to retry failed requests + """ + self.timeout_ms = timeout_ms + self.max_retries = max_retries + self._reading_callback = reading_callback + self._retry_callback = retry_callback + + # Pending requests by request_id (mode_pid) + self._pending: OrderedDict[str, PendingRequest] = OrderedDict() + self._lock = threading.Lock() + + self._stats = MatcherStats() + self._stats_lock = threading.Lock() + + self._pid_registry = get_pid_registry() + + # Cleanup thread + self._running = False + self._cleanup_thread: Optional[threading.Thread] = None + + def start(self) -> None: + """Start the cleanup thread.""" + if self._running: + return + + self._running = True + self._cleanup_thread = threading.Thread( + target=self._cleanup_loop, + name="Matcher-Cleanup", + daemon=True + ) + self._cleanup_thread.start() + logger.debug("Response matcher started") + + def stop(self) -> None: + """Stop the cleanup thread.""" + self._running = False + if self._cleanup_thread and self._cleanup_thread.is_alive(): + self._cleanup_thread.join(timeout=2.0) + logger.debug("Response matcher stopped") + + def register_request( + self, + request: OBD2Request, + callback: Optional[Callable[[OBD2Reading], None]] = None, + ) -> None: + """ + Register a sent request for response matching. + + Args: + request: The OBD2 request that was sent + callback: Optional per-request callback + """ + pending = PendingRequest( + request=request, + sent_time=time.time(), + timeout_ms=self.timeout_ms, + callback=callback, + max_retries=self.max_retries, + ) + + with self._lock: + # Replace if already exists (retry case) + self._pending[request.request_id] = pending + + with self._stats_lock: + self._stats.requests_sent += 1 + + def match_response(self, response: OBD2Response) -> Optional[OBD2Reading]: + """ + Match a response to a pending request. + + Args: + response: Received OBD2 response + + Returns: + OBD2Reading if matched, None otherwise + """ + with self._lock: + pending = self._pending.pop(response.response_id, None) + + if pending is None: + # Response without pending request (unsolicited or late) + logger.debug( + f"Unmatched response: {response.response_id}", + extra={"source_id": hex(response.source_id)} + ) + return None + + # Mark as completed + pending.state = RequestState.COMPLETED + pending.response = response + + # Update stats + latency = pending.latency_ms + with self._stats_lock: + self._stats.responses_matched += 1 + if latency: + self._stats.total_latency_ms += latency + + # Create reading + reading = self._pid_registry.create_reading( + pid=response.pid, + data=response.data, + timestamp_ns=response.timestamp_ns, + ecu_id=response.ecu_index, + ) + + # Notify callbacks + if pending.callback: + try: + pending.callback(reading) + except Exception as e: + logger.error(f"Request callback error: {e}") + + if self._reading_callback: + try: + self._reading_callback(reading) + except Exception as e: + logger.error(f"Reading callback error: {e}") + + return reading + + def get_pending_count(self) -> int: + """Get number of pending requests.""" + with self._lock: + return len(self._pending) + + def get_stats(self) -> Dict[str, Any]: + """Get matcher statistics.""" + with self._stats_lock: + return { + "requests_sent": self._stats.requests_sent, + "responses_matched": self._stats.responses_matched, + "timeouts": self._stats.timeouts, + "retries": self._stats.retries, + "errors": self._stats.errors, + "success_rate": round(self._stats.success_rate, 2), + "avg_latency_ms": round(self._stats.avg_latency_ms, 2), + "pending_count": self.get_pending_count(), + } + + def clear_pending(self) -> None: + """Clear all pending requests.""" + with self._lock: + self._pending.clear() + + def _cleanup_loop(self) -> None: + """Cleanup loop for handling timeouts.""" + logger.debug("Cleanup loop started") + + while self._running: + try: + self._check_timeouts() + time.sleep(0.01) # 10ms check interval + except Exception as e: + logger.error(f"Cleanup loop error: {e}") + + logger.debug("Cleanup loop stopped") + + def _check_timeouts(self) -> None: + """Check and handle timed out requests.""" + expired: List[PendingRequest] = [] + + with self._lock: + # Find expired requests + for request_id, pending in list(self._pending.items()): + if pending.is_expired: + expired.append(pending) + del self._pending[request_id] + + # Handle expired requests outside lock + for pending in expired: + self._handle_timeout(pending) + + def _handle_timeout(self, pending: PendingRequest) -> None: + """Handle a timed out request.""" + pending.state = RequestState.TIMEOUT + + with self._stats_lock: + self._stats.timeouts += 1 + + # Check if we should retry + if pending.can_retry and self._retry_callback: + pending.retry_count += 1 + + with self._stats_lock: + self._stats.retries += 1 + + logger.debug( + f"Retrying request {pending.request.request_id} " + f"(attempt {pending.retry_count}/{pending.max_retries})" + ) + + try: + self._retry_callback(pending.request) + except Exception as e: + logger.error(f"Retry callback error: {e}") + with self._stats_lock: + self._stats.errors += 1 + else: + logger.debug( + f"Request timeout: {pending.request.request_id}", + extra={"retries": pending.retry_count} + ) + + def set_reading_callback( + self, + callback: Callable[[OBD2Reading], None] + ) -> None: + """Set callback for all decoded readings.""" + self._reading_callback = callback + + def set_retry_callback( + self, + callback: Callable[[OBD2Request], None] + ) -> None: + """Set callback for retry requests.""" + self._retry_callback = callback diff --git a/can_sniffer/src/obd2/transceiver.py b/can_sniffer/src/obd2/transceiver.py new file mode 100644 index 0000000..36c2c38 --- /dev/null +++ b/can_sniffer/src/obd2/transceiver.py @@ -0,0 +1,370 @@ +""" +CAN Transceiver for OBD2 Communication. + +Provides unified TX/RX interface for sending OBD2 requests +and receiving responses via SocketCAN. +""" + +import threading +import time +from dataclasses import dataclass, field +from typing import Optional, Callable, Dict, Any +from queue import Queue, Empty +from enum import Enum + +try: + import can +except ImportError: + can = None # type: ignore + +from can_frame import CANFrame +from logger import get_logger +from .protocol import ( + OBD2Request, + OBD2Response, + OBD2_RESPONSE_ECU_BASE, + OBD2_RESPONSE_ECU_END, +) + +logger = get_logger(__name__) + + +class TransceiverState(Enum): + """CAN Transceiver states.""" + + STOPPED = "stopped" + STARTING = "starting" + RUNNING = "running" + ERROR = "error" + + +@dataclass +class TransceiverStats: + """Statistics for the CAN transceiver.""" + + tx_count: int = 0 + rx_count: int = 0 + tx_errors: int = 0 + rx_errors: int = 0 + last_tx_time: float = 0.0 + last_rx_time: float = 0.0 + bus_errors: int = 0 + + +class CANTransceiver: + """ + Unified CAN TX/RX interface for OBD2 communication. + + Handles sending OBD2 requests and receiving responses via SocketCAN. + Provides callback-based response notification and statistics. + + Attributes: + interface: CAN interface name (e.g., "can0", "vcan0") + bitrate: CAN bus bitrate (default: 500000 for OBD2) + state: Current transceiver state + """ + + def __init__( + self, + interface: str = "can0", + bitrate: int = 500000, + response_callback: Optional[Callable[[OBD2Response], None]] = None, + ): + """ + Initialize CAN transceiver. + + Args: + interface: CAN interface name + bitrate: CAN bus bitrate + response_callback: Callback for received OBD2 responses + """ + self.interface = interface + self.bitrate = bitrate + self._response_callback = response_callback + + self._bus: Optional[can.Bus] = None + self._rx_thread: Optional[threading.Thread] = None + self._running = False + self._state = TransceiverState.STOPPED + self._stats = TransceiverStats() + self._stats_lock = threading.Lock() + + # TX queue for thread-safe sending + self._tx_queue: Queue[CANFrame] = Queue(maxsize=100) + self._tx_thread: Optional[threading.Thread] = None + + @property + def state(self) -> TransceiverState: + """Get current transceiver state.""" + return self._state + + @property + def is_running(self) -> bool: + """Check if transceiver is running.""" + return self._running and self._state == TransceiverState.RUNNING + + def start(self) -> bool: + """ + Start the CAN transceiver. + + Initializes SocketCAN bus, starts RX/TX threads. + + Returns: + True if started successfully + """ + if self._running: + logger.warning("Transceiver already running") + return True + + self._state = TransceiverState.STARTING + + try: + # Initialize CAN bus + self._bus = can.Bus( + channel=self.interface, + bustype="socketcan", + bitrate=self.bitrate, + receive_own_messages=False, + ) + + # Set up filters for OBD2 responses only + self._bus.set_filters([ + { + "can_id": OBD2_RESPONSE_ECU_BASE, + "can_mask": 0x7F8, # Match 0x7E8-0x7EF + "extended": False + } + ]) + + self._running = True + + # Start RX thread + self._rx_thread = threading.Thread( + target=self._rx_loop, + name="CAN-RX", + daemon=True + ) + self._rx_thread.start() + + # Start TX thread + self._tx_thread = threading.Thread( + target=self._tx_loop, + name="CAN-TX", + daemon=True + ) + self._tx_thread.start() + + self._state = TransceiverState.RUNNING + + logger.info( + "CAN Transceiver started", + extra={ + "interface": self.interface, + "bitrate": self.bitrate, + } + ) + return True + + except Exception as e: + self._state = TransceiverState.ERROR + logger.error( + "Failed to start CAN transceiver", + extra={"error": str(e), "interface": self.interface} + ) + return False + + def stop(self) -> None: + """Stop the CAN transceiver.""" + if not self._running: + return + + logger.info("Stopping CAN transceiver") + self._running = False + + # Wait for threads to finish + if self._rx_thread and self._rx_thread.is_alive(): + self._rx_thread.join(timeout=2.0) + if self._tx_thread and self._tx_thread.is_alive(): + self._tx_thread.join(timeout=2.0) + + # Close CAN bus + if self._bus: + try: + self._bus.shutdown() + except Exception as e: + logger.warning(f"Error closing CAN bus: {e}") + self._bus = None + + self._state = TransceiverState.STOPPED + logger.info("CAN Transceiver stopped") + + def send_request(self, request: OBD2Request) -> bool: + """ + Send an OBD2 request. + + Args: + request: OBD2Request to send + + Returns: + True if request was queued successfully + """ + if not self.is_running: + logger.warning("Cannot send - transceiver not running") + return False + + frame = request.to_can_frame() + + try: + self._tx_queue.put_nowait(frame) + return True + except Exception as e: + logger.warning(f"TX queue full, dropping request: {e}") + with self._stats_lock: + self._stats.tx_errors += 1 + return False + + def send_frame(self, frame: CANFrame) -> bool: + """ + Send a raw CAN frame. + + Args: + frame: CANFrame to send + + Returns: + True if frame was queued successfully + """ + if not self.is_running: + return False + + try: + self._tx_queue.put_nowait(frame) + return True + except Exception: + with self._stats_lock: + self._stats.tx_errors += 1 + return False + + def set_response_callback( + self, + callback: Callable[[OBD2Response], None] + ) -> None: + """Set callback for received OBD2 responses.""" + self._response_callback = callback + + def get_stats(self) -> Dict[str, Any]: + """Get transceiver statistics.""" + with self._stats_lock: + return { + "state": self._state.value, + "interface": self.interface, + "tx_count": self._stats.tx_count, + "rx_count": self._stats.rx_count, + "tx_errors": self._stats.tx_errors, + "rx_errors": self._stats.rx_errors, + "bus_errors": self._stats.bus_errors, + "last_tx_time": self._stats.last_tx_time, + "last_rx_time": self._stats.last_rx_time, + "tx_queue_size": self._tx_queue.qsize(), + } + + def _rx_loop(self) -> None: + """Receive loop - runs in dedicated thread.""" + logger.debug("RX loop started") + + while self._running: + try: + # Receive with timeout for graceful shutdown + msg = self._bus.recv(timeout=0.1) + + if msg is None: + continue + + # Convert to CANFrame + frame = CANFrame( + ts_ns=int(msg.timestamp * 1_000_000_000) if msg.timestamp else time.time_ns(), + bus=self.interface, + can_id=msg.arbitration_id, + is_extended=msg.is_extended_id, + dlc=msg.dlc, + data=bytes(msg.data) + ) + + # Parse as OBD2 response + response = OBD2Response.from_can_frame(frame) + + if response: + with self._stats_lock: + self._stats.rx_count += 1 + self._stats.last_rx_time = time.time() + + # Notify callback + if self._response_callback: + try: + self._response_callback(response) + except Exception as e: + logger.error(f"Response callback error: {e}") + + except can.CanError as e: + with self._stats_lock: + self._stats.bus_errors += 1 + logger.warning(f"CAN bus error: {e}") + + except Exception as e: + with self._stats_lock: + self._stats.rx_errors += 1 + logger.error(f"RX loop error: {e}") + + logger.debug("RX loop stopped") + + def _tx_loop(self) -> None: + """Transmit loop - runs in dedicated thread.""" + logger.debug("TX loop started") + + while self._running: + try: + # Get frame from queue with timeout + frame = self._tx_queue.get(timeout=0.1) + + if self._bus is None: + continue + + # Convert CANFrame to python-can Message + msg = can.Message( + arbitration_id=frame.can_id, + data=frame.data, + is_extended_id=frame.is_extended, + dlc=frame.dlc + ) + + # Send + self._bus.send(msg) + + with self._stats_lock: + self._stats.tx_count += 1 + self._stats.last_tx_time = time.time() + + except Empty: + continue + + except can.CanError as e: + with self._stats_lock: + self._stats.tx_errors += 1 + self._stats.bus_errors += 1 + logger.warning(f"CAN TX error: {e}") + + except Exception as e: + with self._stats_lock: + self._stats.tx_errors += 1 + logger.error(f"TX loop error: {e}") + + logger.debug("TX loop stopped") + + def __enter__(self): + """Context manager entry.""" + self.start() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Context manager exit.""" + self.stop() + return False diff --git a/can_sniffer/src/postgresql_handler/__init__.py b/can_sniffer/src/postgresql_handler/__init__.py deleted file mode 100644 index 175ab2f..0000000 --- a/can_sniffer/src/postgresql_handler/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -""" -Модуль для работы с PostgreSQL. - -Предоставляет singleton класс для отправки CAN сообщений в PostgreSQL -с поддержкой пакетной отправки, connection pooling, retry с backoff. -""" - -from typing import Optional -from .postgresql_client import PostgreSQLClient, get_postgresql_client - -__all__ = ['PostgreSQLClient', 'get_postgresql_client'] - diff --git a/can_sniffer/src/postgresql_handler/postgresql_client.py b/can_sniffer/src/postgresql_handler/postgresql_client.py deleted file mode 100644 index dd4e4c0..0000000 --- a/can_sniffer/src/postgresql_handler/postgresql_client.py +++ /dev/null @@ -1,759 +0,0 @@ -""" -Модуль для работы с PostgreSQL. - -Предоставляет singleton класс для отправки CAN сообщений в PostgreSQL -с поддержкой пакетной отправки, connection pooling, retry с backoff. -""" - -import queue -import threading -import time -from datetime import datetime, timezone -from queue import Queue, Empty -from typing import Optional, List, Dict, Any, Tuple -from enum import Enum - -from config import config -from logger import get_logger - -logger = get_logger(__name__) - -# Импортируем PostgreSQL клиент -try: - import psycopg2 - from psycopg2 import pool, sql - from psycopg2.extras import execute_batch - POSTGRESQL_AVAILABLE = True -except ImportError: - POSTGRESQL_AVAILABLE = False - psycopg2 = None - pool = None - sql = None - execute_batch = None - logger.warning("psycopg2 not installed. Install with: pip install psycopg2-binary") - - -class ConnectionStatus(Enum): - """Статус соединения с PostgreSQL.""" - DISCONNECTED = "disconnected" - CONNECTING = "connecting" - CONNECTED = "connected" - ERROR = "error" - - -class PostgreSQLClient: - """Singleton класс для работы с PostgreSQL.""" - - _instance: Optional['PostgreSQLClient'] = None - _lock = threading.Lock() - - def __new__(cls): - """Singleton паттерн для единого экземпляра клиента.""" - if cls._instance is None: - with cls._lock: - if cls._instance is None: - cls._instance = super().__new__(cls) - return cls._instance - - def __init__(self): - """Инициализация клиента PostgreSQL.""" - # Защита от race condition при инициализации singleton - with self._lock: - # Проверяем, что инициализация выполняется только один раз - if hasattr(self, '_initialized') and self._initialized: - return - - self.config = config.postgresql - self.logger = logger - - # Инициализируем атрибуты по умолчанию - self.connection_pool: Optional[pool.ThreadedConnectionPool] = None - self.message_queue: Queue[Dict[str, Any]] = Queue(maxsize=config.general.buffer_size) - self.running = False - self.forwarder_thread: Optional[threading.Thread] = None - self.connection_status = ConnectionStatus.DISCONNECTED - - # Статистика с блокировкой для потокобезопасности - self._stats_lock = threading.Lock() - self._sent_count = 0 - self._failed_count = 0 - self._retry_count = 0 - self._reconnect_count = 0 - self._synced_count = 0 # Количество синхронизированных из SQLite - - # Флаг для запуска синхронизации после восстановления соединения - self._needs_sync = True - self._last_sync_time = 0.0 - self._sync_interval = self.config.sync_interval - - if not POSTGRESQL_AVAILABLE: - self.logger.error("PostgreSQL client library not available") - self._initialized = True # Отмечаем как инициализированный, чтобы не повторять - return - - # Инициализируем клиент - self._init_client() - self._initialized = True - - # Потокобезопасные свойства для статистики - @property - def sent_count(self) -> int: - with self._stats_lock: - return self._sent_count - - @property - def failed_count(self) -> int: - with self._stats_lock: - return self._failed_count - - @property - def retry_count(self) -> int: - with self._stats_lock: - return self._retry_count - - @property - def reconnect_count(self) -> int: - with self._stats_lock: - return self._reconnect_count - - @property - def synced_count(self) -> int: - with self._stats_lock: - return self._synced_count - - def _increment_sent(self, count: int = 1) -> None: - with self._stats_lock: - self._sent_count += count - - def _increment_failed(self, count: int = 1) -> None: - with self._stats_lock: - self._failed_count += count - - def _increment_retry(self) -> None: - with self._stats_lock: - self._retry_count += 1 - - def _increment_reconnect(self) -> None: - with self._stats_lock: - self._reconnect_count += 1 - - def _increment_synced(self, count: int = 1) -> None: - with self._stats_lock: - self._synced_count += count - - def _init_client(self) -> None: - """Инициализация пула соединений PostgreSQL.""" - if not POSTGRESQL_AVAILABLE: - self.connection_status = ConnectionStatus.ERROR - return - - try: - # Создаем пул соединений - self.connection_pool = pool.ThreadedConnectionPool( - minconn=1, - maxconn=self.config.connection_pool_size, - host=self.config.host, - port=self.config.port, - database=self.config.database, - user=self.config.user, - password=self.config.password, - connect_timeout=self.config.connection_timeout - ) - - # Проверяем соединение - conn = self.connection_pool.getconn() - if conn: - # Создаем таблицу если её нет - self._create_table(conn) - self.connection_pool.putconn(conn) - self.connection_status = ConnectionStatus.CONNECTED - self.logger.info( - f"PostgreSQL connected: {self.config.host}:{self.config.port}/{self.config.database}" - ) - else: - self.connection_pool = None - self.connection_status = ConnectionStatus.ERROR - - except Exception as e: - self.connection_pool = None - self.connection_status = ConnectionStatus.ERROR - # Используем warning вместо error - это нормальная ситуация при старте без сети - self.logger.warning(f"PostgreSQL not available: {e}") - - def _create_table(self, conn) -> None: - """Создание таблицы для CAN сообщений если её нет.""" - try: - cursor = conn.cursor() - cursor.execute(""" - CREATE TABLE IF NOT EXISTS can_messages ( - id BIGSERIAL PRIMARY KEY, - timestamp TIMESTAMP NOT NULL, - interface VARCHAR(50) NOT NULL, - can_id INTEGER NOT NULL, - can_id_hex VARCHAR(10) NOT NULL, - is_extended BOOLEAN NOT NULL, - dlc INTEGER NOT NULL, - data BYTEA NOT NULL, - data_hex VARCHAR(32) NOT NULL, - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP - ); - - CREATE INDEX IF NOT EXISTS idx_can_messages_timestamp ON can_messages(timestamp); - CREATE INDEX IF NOT EXISTS idx_can_messages_can_id ON can_messages(can_id); - CREATE INDEX IF NOT EXISTS idx_can_messages_interface ON can_messages(interface); - """) - conn.commit() - cursor.close() - self.logger.info("PostgreSQL table 'can_messages' created or verified") - except Exception as e: - conn.rollback() - self.logger.error(f"Failed to create table: {e}", exc_info=True) - raise - - def write_message(self, interface: str, can_id: int, dlc: int, data: bytes, timestamp: float, block: bool = False) -> bool: - """ - Добавление CAN сообщения в очередь для отправки. - - Args: - interface: Имя интерфейса (например, 'can0') - can_id: CAN ID сообщения - dlc: Data Length Code - data: Данные сообщения (bytes) - timestamp: Временная метка сообщения (float в секундах) - block: Блокировать ли при переполнении очереди - - Returns: - True если сообщение добавлено в очередь - """ - if not self.connection_pool: - return False - - try: - # Добавляем сообщение в очередь для пакетной отправки - if block: - self.message_queue.put({ - "interface": interface, - "can_id": can_id, - "can_id_hex": hex(can_id), - "dlc": dlc, - "data": data, - "data_hex": data.hex().upper(), - "timestamp": timestamp, - "is_extended": can_id > 0x7FF - }) - else: - try: - self.message_queue.put_nowait({ - "interface": interface, - "can_id": can_id, - "can_id_hex": hex(can_id), - "dlc": dlc, - "data": data, - "data_hex": data.hex().upper(), - "timestamp": timestamp, - "is_extended": can_id > 0x7FF - }) - except queue.Full: - # Очередь переполнена - пропускаем сообщение - self._increment_failed() - return False - return True - except Exception as e: - self.logger.error( - f"Failed to queue message for PostgreSQL: {e}", - exc_info=True - ) - self._increment_failed() - return False - - def write_messages_batch(self, messages: List[Dict[str, Any]], block: bool = False) -> int: - """ - Пакетная отправка сообщений в PostgreSQL. - - Добавляет сообщения в очередь для асинхронной отправки через forwarder loop. - - Args: - messages: Список словарей с данными сообщений - block: Блокировать ли при переполнении очереди - - Returns: - Количество успешно добавленных в очередь сообщений - """ - if not self.connection_pool or not messages: - return 0 - - # Проверяем соединение перед добавлением в очередь - if self.connection_status != ConnectionStatus.CONNECTED: - if not self._health_check(): - # Соединение недоступно - пропускаем батч без ошибки - self._increment_failed(len(messages)) - return 0 - else: - self.connection_status = ConnectionStatus.CONNECTED - - # Проверяем заполненность очереди перед добавлением - queue_usage = self.message_queue.qsize() / self.message_queue.maxsize if self.message_queue.maxsize > 0 else 0 - if queue_usage > 0.9 and not block: - # Очередь почти переполнена - пропускаем батч - self._increment_failed(len(messages)) - return 0 - - # Добавляем сообщения в очередь для асинхронной отправки - added_count = 0 - for msg in messages: - try: - if block: - self.message_queue.put(msg) - else: - try: - self.message_queue.put_nowait(msg) - except queue.Full: - # Очередь переполнена - пропускаем оставшиеся сообщения - break - added_count += 1 - except Exception as e: - self.logger.debug(f"Failed to queue message: {e}") - break - - if added_count < len(messages): - self._increment_failed(len(messages) - added_count) - - return added_count - - def _send_messages_batch(self, messages: List[Dict[str, Any]]) -> int: - """ - Непосредственная отправка батча сообщений в PostgreSQL. - - Этот метод вызывается из forwarder loop для реальной отправки данных. - - Args: - messages: Список словарей с данными сообщений - - Returns: - Количество успешно отправленных сообщений - """ - if not self.connection_pool or not messages: - return 0 - - # Проверяем соединение перед отправкой - if self.connection_status != ConnectionStatus.CONNECTED: - if not self._health_check(): - self.logger.warning("PostgreSQL connection not available, skipping batch") - self._increment_failed(len(messages)) - return 0 - else: - self.connection_status = ConnectionStatus.CONNECTED - - conn = None - try: - # Получаем соединение из пула - conn = self.connection_pool.getconn() - if not conn: - self._increment_failed(len(messages)) - return 0 - - cursor = conn.cursor() - - # Подготавливаем данные для batch insert - insert_query = """ - INSERT INTO can_messages (timestamp, interface, can_id, can_id_hex, is_extended, dlc, data, data_hex) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s) - """ - - values = [] - for msg in messages: - # Используем UTC для согласованности времени - ts = datetime.fromtimestamp(msg["timestamp"], tz=timezone.utc) - values.append(( - ts, - msg["interface"], - msg["can_id"], - msg.get("can_id_hex", hex(msg["can_id"])), - msg.get("is_extended", msg["can_id"] > 0x7FF), - msg["dlc"], - msg["data"], - msg.get("data_hex", msg["data"].hex().upper() if isinstance(msg["data"], bytes) else "") - )) - - # Выполняем batch insert - execute_batch(cursor, insert_query, values) - conn.commit() - cursor.close() - - sent = len(messages) - self._increment_sent(sent) - self.logger.debug( - f"Sent {sent} messages to PostgreSQL", - extra={"batch_size": sent} - ) - return sent - - except Exception as e: - if conn: - conn.rollback() - self.logger.error( - f"Failed to send messages batch to PostgreSQL: {e}", - exc_info=True, - extra={"batch_size": len(messages)} - ) - # Не увеличиваем failed_count здесь - это делает _send_messages_batch_with_retry - raise # Пробрасываем исключение для retry механизма - finally: - if conn: - self.connection_pool.putconn(conn) - - def _send_messages_batch_with_retry(self, messages: List[Dict[str, Any]]) -> int: - """ - Отправка батча сообщений с retry и exponential backoff. - - Args: - messages: Список словарей с данными сообщений - - Returns: - Количество успешно отправленных сообщений - """ - if not messages: - return 0 - - max_retries = self.config.max_retries - base_backoff = self.config.retry_backoff - - for attempt in range(max_retries): - try: - return self._send_messages_batch(messages) - except Exception as e: - self._increment_retry() - - if attempt < max_retries - 1: - # Exponential backoff: 1s, 2s, 4s... - delay = base_backoff * (2 ** attempt) - self.logger.warning( - f"PostgreSQL send failed (attempt {attempt + 1}/{max_retries}), " - f"retrying in {delay}s: {e}" - ) - time.sleep(delay) - - # Проверяем соединение перед повторной попыткой - if not self._health_check(): - self.logger.warning("PostgreSQL connection lost, attempting reconnect") - self._reconnect() - else: - # Все попытки исчерпаны - self.logger.error( - f"All {max_retries} retries failed for batch of {len(messages)} messages" - ) - self._increment_failed(len(messages)) - return 0 - - return 0 - - def _health_check(self) -> bool: - """Проверка здоровья соединения с PostgreSQL.""" - if not self.connection_pool: - return False - - try: - conn = self.connection_pool.getconn() - if conn: - cursor = conn.cursor() - cursor.execute("SELECT 1") - cursor.fetchone() - cursor.close() - self.connection_pool.putconn(conn) - self.connection_status = ConnectionStatus.CONNECTED - self.last_health_check = time.time() - return True - except Exception as e: - self.logger.debug(f"PostgreSQL health check failed: {e}") - return False - - def _sync_from_sqlite(self) -> int: - """ - Синхронизация необработанных записей из SQLite в PostgreSQL. - - Читает записи с processed=0 из SQLite и отправляет их в PostgreSQL. - После успешной отправки помечает записи как обработанные. - - Returns: - Количество синхронизированных сообщений - """ - if self.connection_status != ConnectionStatus.CONNECTED: - return 0 - - try: - # Импортируем storage здесь, чтобы избежать циклических импортов - from storage import get_storage - storage = get_storage() - - # Получаем необработанные сообщения из SQLite - unprocessed = storage.get_unprocessed_messages(limit=config.general.batch_size) - if not unprocessed: - return 0 - - self.logger.info( - f"Syncing {len(unprocessed)} unprocessed messages from SQLite to PostgreSQL" - ) - - # Конвертируем записи SQLite в формат для PostgreSQL - # Формат из SQLite: (id, timestamp, interface, can_id, can_id_hex, is_extended, dlc, data, data_hex) - messages = [] - sqlite_ids = [] - for row in unprocessed: - sqlite_id, ts, interface, can_id, can_id_hex, is_extended, dlc, data, data_hex = row - sqlite_ids.append(sqlite_id) - messages.append({ - "interface": interface, - "can_id": can_id, - "can_id_hex": can_id_hex or hex(can_id), - "dlc": dlc, - "data": data, - "data_hex": data_hex or (data.hex().upper() if isinstance(data, bytes) else ""), - "timestamp": ts, - "is_extended": bool(is_extended) if is_extended is not None else (can_id > 0x7FF) - }) - - # Отправляем в PostgreSQL с retry механизмом - sent_count = self._send_messages_batch_with_retry(messages) - - if sent_count > 0: - # Помечаем успешно отправленные как обработанные - # Помечаем все, так как _send_messages_batch либо отправляет всё, либо ничего - marked = storage.mark_as_processed(sqlite_ids) - self._increment_synced(marked) - self.logger.info( - f"Synced {sent_count} messages from SQLite, marked {marked} as processed" - ) - return sent_count - else: - self.logger.warning( - f"Failed to sync {len(messages)} messages from SQLite to PostgreSQL" - ) - - return 0 - - except Exception as e: - self.logger.error( - f"Error syncing from SQLite: {e}", - exc_info=True - ) - return 0 - - def _reconnect(self) -> bool: - """ - Переподключение к PostgreSQL. - - Returns: - True если подключение успешно - """ - if self.connection_status == ConnectionStatus.CONNECTING: - return False - - self.connection_status = ConnectionStatus.CONNECTING - self._increment_reconnect() - - try: - # Закрываем старый пул если есть - if self.connection_pool: - try: - self.connection_pool.closeall() - except Exception: - pass - self.connection_pool = None - - # Создаем новый пул - self._init_client() - - if self.connection_status == ConnectionStatus.CONNECTED: - self.logger.info("Successfully connected to PostgreSQL") - return True - else: - return False - - except Exception as e: - self.connection_status = ConnectionStatus.ERROR - self.logger.warning(f"Failed to connect to PostgreSQL: {e}") - return False - - def _forwarder_loop(self) -> None: - """Основной цикл для отправки сообщений в PostgreSQL.""" - self.logger.info("PostgreSQL forwarder loop started") - - batch = [] - last_flush_time = time.time() - last_reconnect_attempt = 0.0 - reconnect_interval = 10.0 # Интервал между попытками подключения - was_connected = self.connection_status == ConnectionStatus.CONNECTED - - while self.running or not self.message_queue.empty(): - try: - current_time = time.time() - - # Если нет соединения - пытаемся подключиться периодически - if not self.connection_pool or self.connection_status != ConnectionStatus.CONNECTED: - if current_time - last_reconnect_attempt >= reconnect_interval: - last_reconnect_attempt = current_time - self.logger.info("Attempting to connect to PostgreSQL...") - self._reconnect() - - # Проверяем восстановление соединения и запускаем синхронизацию - is_connected = self.connection_status == ConnectionStatus.CONNECTED - if is_connected: - # Синхронизация при восстановлении соединения или по интервалу - should_sync = ( - (not was_connected and is_connected) or # Соединение восстановлено - (self._needs_sync) or # Первая синхронизация - (current_time - self._last_sync_time >= self._sync_interval) # По интервалу - ) - if should_sync: - synced = self._sync_from_sqlite() - self._last_sync_time = current_time - self._needs_sync = False - if synced > 0: - self.logger.debug(f"Synced {synced} messages from SQLite") - - was_connected = is_connected - - # Собираем сообщения в батч - try: - message = self.message_queue.get(timeout=0.1) - batch.append(message) - except Empty: - pass - - # Отправляем батч если он заполнен или прошло достаточно времени - should_flush = ( - len(batch) >= self.config.batch_size or - (batch and (current_time - last_flush_time) >= self.config.flush_interval) - ) - - if should_flush: - if batch: - # Отправляем батч с retry механизмом - self._send_messages_batch_with_retry(batch) - batch = [] - last_flush_time = current_time - - except Exception as e: - self.logger.error( - f"Error in forwarder loop: {e}", - exc_info=True - ) - time.sleep(0.1) - - # Отправляем оставшиеся сообщения с retry - if batch: - self._send_messages_batch_with_retry(batch) - - # Финальная синхронизация перед остановкой - self._sync_from_sqlite() - - self.logger.info( - "PostgreSQL forwarder loop stopped", - extra={ - "sent_count": self.sent_count, - "failed_count": self.failed_count, - "synced_count": self.synced_count - } - ) - - def start(self) -> None: - """Запуск forwarder потока для отправки сообщений.""" - if not self.config.enabled: - self.logger.info("PostgreSQL is disabled in config") - return - - if hasattr(self, 'running') and self.running: - self.logger.warning("PostgreSQL forwarder is already running") - return - - self.running = True - - # Запускаем forwarder поток даже если соединения нет - # Он будет периодически пытаться подключиться - self.forwarder_thread = threading.Thread( - target=self._forwarder_loop, - name="PostgreSQL-Forwarder", - daemon=False - ) - self.forwarder_thread.start() - - if self.connection_pool: - self.logger.info("PostgreSQL forwarder started (connected)") - else: - self.logger.info("PostgreSQL forwarder started (will retry connection)") - - def stop(self) -> None: - """Остановка forwarder потока.""" - if not hasattr(self, 'running') or not self.running: - return - - self.logger.info("Stopping PostgreSQL forwarder...") - self.running = False - - # Даем время на обработку оставшихся сообщений в очереди - max_wait_time = 5.0 - wait_start = time.time() - while not self.message_queue.empty() and (time.time() - wait_start) < max_wait_time: - time.sleep(0.1) - - if not self.message_queue.empty(): - remaining = self.message_queue.qsize() - self.logger.warning( - f"PostgreSQL queue not empty after shutdown, {remaining} messages remaining" - ) - - # Ждем завершения потока - if self.forwarder_thread and self.forwarder_thread.is_alive(): - self.forwarder_thread.join(timeout=10.0) - if self.forwarder_thread.is_alive(): - self.logger.warning("Forwarder thread did not stop gracefully") - - # Закрываем пул соединений - if self.connection_pool: - try: - self.connection_pool.closeall() - self.connection_pool = None - except Exception as e: - self.logger.error(f"Error closing PostgreSQL connection pool: {e}", exc_info=True) - - self.connection_status = ConnectionStatus.DISCONNECTED - self.logger.info("PostgreSQL forwarder stopped") - - def get_stats(self) -> Dict[str, Any]: - """Получение статистики клиента.""" - return { - "enabled": self.config.enabled, - "initialized": self._initialized and self.connection_pool is not None, - "running": getattr(self, 'running', False), - "connection_status": self.connection_status.value, - "sent_count": self.sent_count, - "failed_count": self.failed_count, - "retry_count": self.retry_count, - "reconnect_count": self.reconnect_count, - "synced_count": self.synced_count, - "queue_size": self.message_queue.qsize(), - "host": self.config.host if self.config.enabled else None, - "database": self.config.database if self.config.enabled else None - } - - def close(self) -> None: - """Закрытие соединения с PostgreSQL.""" - self.stop() - - -# Глобальный экземпляр клиента -_postgresql_instance: Optional[PostgreSQLClient] = None - - -def get_postgresql_client() -> PostgreSQLClient: - """ - Получение глобального экземпляра PostgreSQL клиента (singleton). - - Returns: - Экземпляр PostgreSQLClient - """ - global _postgresql_instance - if _postgresql_instance is None: - _postgresql_instance = PostgreSQLClient() - return _postgresql_instance - diff --git a/can_sniffer/src/socket_can/__init__.py b/can_sniffer/src/socket_can/__init__.py deleted file mode 100644 index b1a8b95..0000000 --- a/can_sniffer/src/socket_can/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -"""Модуль для работы с SocketCAN интерфейсами.""" - -from .src import CANSniffer, CANBusHandler -from .message_processor import MessageProcessor - -__all__ = ['CANSniffer', 'CANBusHandler', 'MessageProcessor'] - diff --git a/can_sniffer/src/socket_can/message_processor.py b/can_sniffer/src/socket_can/message_processor.py deleted file mode 100644 index 34c1fdb..0000000 --- a/can_sniffer/src/socket_can/message_processor.py +++ /dev/null @@ -1,434 +0,0 @@ -""" -Модуль для обработки CAN сообщений. - -Обрабатывает входящие CAN сообщения через pipeline обработчиков. -Использует очередь для асинхронной обработки, чтобы не блокировать чтение CAN сообщений. -""" - -import queue -import threading -import time -from queue import Queue, Empty -from typing import Optional, Dict, Any, List - -from logger import get_logger -from config import config -from can_frame import CANFrame -from handlers import BaseHandler, StorageHandler, PostgreSQLHandler, FlipperHandler - -logger = get_logger(__name__) - - -class MessageProcessor: - """ - Класс для обработки и сохранения CAN сообщений с асинхронной обработкой. - - Использует плагинную архитектуру обработчиков (pipeline). - Каждый обработчик реализует интерфейс BaseHandler. - """ - - def __init__(self, handlers: Optional[List[BaseHandler]] = None, queue_size: Optional[int] = None): - """ - Инициализация процессора сообщений. - - Args: - handlers: Список обработчиков для pipeline. Если None, создаются по умолчанию. - queue_size: Максимальный размер очереди сообщений. Если None, берется из config.general.buffer_size - """ - self.logger = logger - - # Используем размер очереди из конфига, если не указан явно - if queue_size is None: - queue_size = config.general.buffer_size - - # Очередь для асинхронной обработки сообщений - # Храним CANFrame объекты (неизменяемые, легковесные) - self.message_queue: Queue[CANFrame] = Queue(maxsize=queue_size) - self.running = False - self.processing_thread: Optional[threading.Thread] = None - - # Статистика с блокировкой для потокобезопасности - self._stats_lock = threading.Lock() - self._processed_count = 0 - self._dropped_count = 0 - self._queue_full_warnings = 0 - - # Инициализируем обработчики - if handlers is None: - handlers = self._create_default_handlers() - - self.handlers: List[BaseHandler] = [] - self._init_handlers(handlers) - - @property - def processed_count(self) -> int: - with self._stats_lock: - return self._processed_count - - @property - def dropped_count(self) -> int: - with self._stats_lock: - return self._dropped_count - - @property - def queue_full_warnings(self) -> int: - with self._stats_lock: - return self._queue_full_warnings - - def _increment_processed(self, count: int = 1) -> None: - with self._stats_lock: - self._processed_count += count - - def _increment_dropped(self, count: int = 1) -> None: - with self._stats_lock: - self._dropped_count += count - self._queue_full_warnings += count - - def _create_default_handlers(self) -> List[BaseHandler]: - """ - Создание обработчиков по умолчанию из конфигурации. - - Returns: - Список обработчиков - """ - handlers = [] - - # Storage handler всегда включен - handlers.append(StorageHandler(enabled=True)) - - # PostgreSQL handler зависит от конфигурации - handlers.append(PostgreSQLHandler(enabled=None)) # None = из config - - # Flipper Zero handler зависит от конфигурации - handlers.append(FlipperHandler(enabled=None)) # None = из config - - return handlers - - def _init_handlers(self, handlers: List[BaseHandler]) -> None: - """ - Инициализация обработчиков. - - Args: - handlers: Список обработчиков для инициализации - """ - for handler in handlers: - if handler.is_enabled(): - try: - if handler.initialize(): - self.handlers.append(handler) - self.logger.info( - f"Handler '{handler.name}' initialized successfully" - ) - else: - self.logger.warning( - f"Handler '{handler.name}' initialization failed" - ) - except Exception as e: - self.logger.error( - f"Error initializing handler '{handler.name}': {e}", - exc_info=True - ) - else: - self.logger.debug(f"Handler '{handler.name}' is disabled") - - self.logger.info( - f"Initialized {len(self.handlers)}/{len(handlers)} handlers", - extra={"handlers": [h.name for h in self.handlers]} - ) - - def enqueue(self, frame: CANFrame, block: bool = False, timeout: Optional[float] = None) -> bool: - """ - Добавление CAN фрейма в очередь для асинхронной обработки. - - Этот метод вызывается из callback CAN чтения и должен быть быстрым. - - Args: - frame: CANFrame объект - block: Блокировать ли при переполнении очереди (для backpressure) - timeout: Таймаут для блокирующего режима (секунды) - - Returns: - True если сообщение добавлено, False если очередь переполнена - """ - try: - if block: - # Блокирующий режим - используется для backpressure - self.message_queue.put(frame, timeout=timeout) - return True - else: - # Неблокирующий режим - быстрое добавление - self.message_queue.put_nowait(frame) - return True - except queue.Full: - # Очередь переполнена - пропускаем сообщение - self._increment_dropped() - - # Логируем предупреждение периодически (не каждое сообщение) - if self.queue_full_warnings % 1000 == 0: - queue_usage = (self.message_queue.qsize() / self.message_queue.maxsize) * 100 - self.logger.warning( - f"Message queue full, dropped {self.dropped_count} messages", - extra={ - "dropped_count": self.dropped_count, - "queue_size": self.message_queue.qsize(), - "queue_maxsize": self.message_queue.maxsize, - "queue_usage_percent": round(queue_usage, 1) - } - ) - return False - except Exception as e: - # Неожиданная ошибка - self.logger.debug(f"Unexpected error in enqueue: {e}") - self._increment_dropped() - return False - - def get_queue_usage(self) -> float: - """ - Получение процента заполнения очереди. - - Returns: - Процент заполнения (0.0 - 1.0) - """ - if self.message_queue.maxsize == 0: - return 0.0 - return self.message_queue.qsize() / self.message_queue.maxsize - - def process(self, frame: CANFrame) -> None: - """ - Публичный метод для обработки CAN фрейма. - - Используется как callback для CANSniffer. - Быстро добавляет фрейм в очередь без блокировки. - - Args: - frame: CANFrame объект - """ - self.enqueue(frame) - - def _processing_loop(self) -> None: - """Основной цикл обработки сообщений из очереди.""" - self.logger.info("Message processing loop started") - - # Батч для групповой обработки - batch: List[CANFrame] = [] - batch_size = config.general.batch_size - batch_interval = config.general.batch_interval - last_batch_time = time.time() - last_flush_time = time.time() - flush_interval = 5.0 # Периодический flush обработчиков - - # Обрабатываем сообщения пока очередь не пуста или пока running=True - while self.running or not self.message_queue.empty(): - try: - # Получаем сообщение из очереди с таймаутом - # Используем меньший таймаут при shutdown для быстрого завершения - timeout = batch_interval if self.running else 0.1 - try: - frame = self.message_queue.get(timeout=timeout) - batch.append(frame) - except Empty: - # Если очередь пуста, обрабатываем накопленный батч - if batch: - self._process_batch(batch) - batch = [] - last_batch_time = time.time() - # Если shutdown и очередь пуста - выходим - if not self.running and self.message_queue.empty(): - break - continue - - # Обрабатываем батч если он заполнен или прошло достаточно времени - current_time = time.time() - should_flush = ( - len(batch) >= batch_size or - (batch and (current_time - last_batch_time) >= batch_interval) - ) - - if should_flush: - self._process_batch(batch) - batch = [] - last_batch_time = current_time - - # Периодический flush обработчиков - if (current_time - last_flush_time) >= flush_interval: - self._flush_handlers() - last_flush_time = current_time - - except Exception as e: - self.logger.error( - f"Error in processing loop: {e}", - exc_info=True - ) - - # Обрабатываем оставшиеся сообщения в батче - if batch: - self._process_batch(batch) - - # Финальный flush всех обработчиков - self._flush_handlers() - - self.logger.info( - "Message processing loop stopped", - extra={ - "processed_count": self.processed_count, - "dropped_count": self.dropped_count - } - ) - - def _process_batch(self, batch: List[CANFrame]) -> None: - """ - Обработка батча CAN фреймов через pipeline обработчиков. - - Args: - batch: Список CANFrame объектов - """ - if not batch: - return - - try: - # Логируем батч на уровне DEBUG (если уровень DEBUG включен) - if batch: - first_frame = batch[0] - self.logger.debug( - "CAN message batch processed", - extra={ - "batch_size": len(batch), - "first_message": { - "bus": first_frame.bus, - "can_id": first_frame.can_id, - "can_id_hex": first_frame.can_id_hex, - "dlc": first_frame.dlc, - "data_hex": first_frame.data_hex, - "ts_ns": first_frame.ts_ns, - "timestamp": first_frame.timestamp, - "is_extended": first_frame.is_extended - } - } - ) - - # Обрабатываем батч через все обработчики (pipeline) - for handler in self.handlers: - if not handler.is_enabled() or not handler.is_initialized(): - continue - - try: - handler.handle_batch(batch) - except Exception as e: - self.logger.error( - f"Error in handler '{handler.name}': {e}", - exc_info=True, - extra={"batch_size": len(batch)} - ) - - # Обновляем счетчик обработанных сообщений (атомарно) - self._increment_processed(len(batch)) - - except Exception as e: - self.logger.error( - f"Error processing batch: {e}", - exc_info=True, - extra={"batch_size": len(batch)} - ) - - def _flush_handlers(self) -> None: - """Принудительный flush всех обработчиков.""" - for handler in self.handlers: - if handler.is_enabled() and handler.is_initialized(): - try: - handler.flush() - except Exception as e: - self.logger.error( - f"Error flushing handler '{handler.name}': {e}", - exc_info=True - ) - - def start(self) -> None: - """Запуск обработки сообщений в отдельном потоке.""" - if self.running: - self.logger.warning("Message processor is already running") - return - - self.running = True - - # Запускаем специальные обработчики (например, PostgreSQL forwarder, Flipper sender) - for handler in self.handlers: - if isinstance(handler, (PostgreSQLHandler, FlipperHandler)) and handler.is_initialized(): - try: - handler.start() - except Exception as e: - self.logger.error( - f"Failed to start handler '{handler.name}': {e}", - exc_info=True - ) - - # Запускаем поток обработки сообщений - # НЕ используем daemon=True, чтобы поток мог корректно завершиться - self.processing_thread = threading.Thread( - target=self._processing_loop, - name="MessageProcessor", - daemon=False - ) - self.processing_thread.start() - self.logger.info("Message processor started") - - def shutdown(self) -> None: - """Корректное завершение работы процессора.""" - self.logger.info("Shutting down message processor...") - self.running = False - - # Даем время на обработку оставшихся сообщений - # Ждем пока очередь не опустеет или не пройдет таймаут - max_wait_time = 10.0 - wait_start = time.time() - while not self.message_queue.empty() and (time.time() - wait_start) < max_wait_time: - time.sleep(0.1) - - if not self.message_queue.empty(): - remaining = self.message_queue.qsize() - self.logger.warning( - f"Queue not empty after shutdown signal, {remaining} messages remaining" - ) - - # Ждем завершения потока обработки - if self.processing_thread and self.processing_thread.is_alive(): - self.processing_thread.join(timeout=5.0) - if self.processing_thread.is_alive(): - self.logger.warning("Processing thread did not stop gracefully") - - # Закрываем все обработчики - for handler in self.handlers: - try: - handler.shutdown() - except Exception as e: - self.logger.error( - f"Error shutting down handler '{handler.name}': {e}", - exc_info=True - ) - - self.logger.info( - "Message processor stopped", - extra={ - "processed_count": self.processed_count, - "dropped_count": self.dropped_count - } - ) - - def get_stats(self) -> dict: - """Получение статистики процессора.""" - stats = { - "processed_count": self.processed_count, - "dropped_count": self.dropped_count, - "queue_size": self.message_queue.qsize(), - "running": self.running, - "handlers_count": len(self.handlers) - } - - # Добавляем статистику всех обработчиков - for handler in self.handlers: - try: - handler_stats = handler.get_stats() - stats[handler.name] = handler_stats - except Exception as e: - self.logger.debug(f"Failed to get stats from handler '{handler.name}': {e}") - - return stats diff --git a/can_sniffer/src/socket_can/src.py b/can_sniffer/src/socket_can/src.py deleted file mode 100644 index 08b1be9..0000000 --- a/can_sniffer/src/socket_can/src.py +++ /dev/null @@ -1,411 +0,0 @@ -""" -Модуль для работы с SocketCAN интерфейсами. - -Предоставляет параллельное чтение CAN сообщений с нескольких интерфейсов -с поддержкой обработки ошибок, логирования и graceful shutdown. -""" - -import can -import threading -import time -from typing import Callable, Dict, List, Optional -from queue import Queue, Empty - -from config import config -from logger import get_logger -from can_frame import CANFrame -from .message_processor import MessageProcessor - - -class CANBusHandler: - """Обработчик для одной CAN шины.""" - - def __init__( - self, - interface: str, - bus: can.Bus, - message_callback: Callable[[CANFrame], None], - logger, - filters: Optional[List[dict]] = None - ): - """ - Инициализация обработчика CAN шины. - - Args: - interface: Имя интерфейса (например, 'can0') - bus: Экземпляр can.Bus - message_callback: Функция для обработки CAN сообщений (принимает CANFrame) - logger: Логгер для данного интерфейса - filters: Список фильтров SocketCAN - """ - self.interface = interface - self.bus = bus - self.message_callback = message_callback - self.logger = logger - self.filters = filters or [] - self.running = False - self.thread: Optional[threading.Thread] = None - self.message_count = 0 - self.error_count = 0 - self.last_message_time: Optional[float] = None - - # Кэшируем ссылки для быстрого доступа (избегаем рефлексии в hot path) - self._processor = None - self._has_backpressure = False - self._enqueue_method = None - self._get_queue_usage_method = None - - if hasattr(message_callback, '__self__'): - processor = getattr(message_callback, '__self__', None) - if processor and hasattr(processor, 'get_queue_usage') and hasattr(processor, 'enqueue'): - self._processor = processor - self._has_backpressure = True - self._enqueue_method = processor.enqueue - self._get_queue_usage_method = processor.get_queue_usage - - # Применяем фильтры, если они есть - if self.filters: - self._apply_filters() - - def _apply_filters(self) -> None: - """Применение фильтров SocketCAN к шине.""" - try: - # SocketCAN фильтры применяются через set_filters - # Формат: [{"can_id": 0x123, "can_mask": 0x7FF}, ...] - self.bus.set_filters(self.filters) - self.logger.info( - f"Applied {len(self.filters)} filters to {self.interface}", - extra={"filters": self.filters} - ) - except Exception as e: - self.logger.warning( - f"Failed to apply filters to {self.interface}: {e}", - exc_info=True - ) - - def _read_loop(self) -> None: - """Основной цикл чтения сообщений с шины.""" - self.logger.info(f"Starting read loop for {self.interface}") - - # Переменные для backpressure механизма - consecutive_drops = 0 - backpressure_delay = 0.0 - max_backpressure_delay = 0.5 # Максимальная задержка 500ms - - while self.running: - try: - # Читаем сообщение с таймаутом для возможности проверки running - # Увеличиваем таймаут при backpressure - recv_timeout = 0.1 + backpressure_delay - message = self.bus.recv(timeout=recv_timeout) - - if message is not None: - self.message_count += 1 - self.last_message_time = time.time() - - # Конвертируем can.Message в CANFrame - try: - frame = CANFrame.from_can_message(message, self.interface) - except Exception as e: - self.logger.error( - f"Failed to convert message to CANFrame for {self.interface}: {e}", - exc_info=True, - extra={"can_id": hex(message.arbitration_id) if message else None} - ) - self.error_count += 1 - continue - - # Вызываем callback для обработки сообщения - # Используем backpressure: если очередь заполнена, замедляем чтение - try: - # Используем закэшированные ссылки для избежания рефлексии в hot path - if self._has_backpressure: - queue_usage = self._get_queue_usage_method() - - # Если очередь заполнена более чем на 80%, используем блокирующий режим - if queue_usage > 0.8: - # Блокируем добавление с небольшим таймаутом для backpressure - success = self._enqueue_method(frame, block=True, timeout=0.01) - if not success: - consecutive_drops += 1 - # Увеличиваем задержку при последовательных потерях - backpressure_delay = min( - max_backpressure_delay, - 0.001 * consecutive_drops - ) - continue - else: - # Очередь не заполнена - быстрое добавление - success = self._enqueue_method(frame, block=False) - if not success: - consecutive_drops += 1 - backpressure_delay = min( - max_backpressure_delay, - 0.001 * consecutive_drops - ) - continue - - # Сбрасываем счетчик при успешной отправке - if queue_usage < 0.5: - consecutive_drops = 0 - backpressure_delay = 0.0 - else: - # Обычный callback без backpressure - self.message_callback(frame) - except Exception as e: - self.logger.error( - f"Error in message callback for {self.interface}: {e}", - exc_info=True, - extra={"can_id": frame.can_id_hex} - ) - self.error_count += 1 - - except can.CanError as e: - self.logger.error( - f"CAN error on {self.interface}: {e}", - exc_info=True - ) - self.error_count += 1 - # Небольшая задержка перед повторной попыткой - time.sleep(0.1) - - except Exception as e: - self.logger.error( - f"Unexpected error on {self.interface}: {e}", - exc_info=True - ) - self.error_count += 1 - time.sleep(0.1) - - self.logger.info( - f"Read loop stopped for {self.interface}", - extra={ - "total_messages": self.message_count, - "total_errors": self.error_count - } - ) - - def start(self) -> None: - """Запуск чтения сообщений в отдельном потоке.""" - if self.running: - self.logger.warning(f"{self.interface} is already running") - return - - self.running = True - # НЕ используем daemon=True для корректного завершения - self.thread = threading.Thread( - target=self._read_loop, - name=f"CAN-{self.interface}", - daemon=False - ) - self.thread.start() - self.logger.info(f"Started reading from {self.interface}") - - def stop(self) -> None: - """Остановка чтения сообщений.""" - if not self.running: - return - - self.logger.info(f"Stopping {self.interface}...") - self.running = False - - if self.thread and self.thread.is_alive(): - self.thread.join(timeout=2.0) - if self.thread.is_alive(): - self.logger.warning(f"Thread for {self.interface} did not stop gracefully") - - # Закрываем шину - try: - self.bus.shutdown() - self.logger.info(f"Bus {self.interface} closed") - except Exception as e: - self.logger.error(f"Error closing bus {self.interface}: {e}", exc_info=True) - - def get_stats(self) -> Dict: - """Получение статистики по обработке сообщений.""" - return { - "interface": self.interface, - "message_count": self.message_count, - "error_count": self.error_count, - "last_message_time": self.last_message_time, - "running": self.running - } - - -class CANSniffer: - """Класс для параллельного чтения CAN сообщений с нескольких интерфейсов.""" - - def __init__(self, message_callback: Optional[Callable[[CANFrame], None]] = None): - """ - Инициализация CAN Sniffer. - - Args: - message_callback: Функция для обработки CAN сообщений. - Должна принимать CANFrame объект - """ - self.config = config.can - self.logger = get_logger(__name__) - - # Инициализируем MessageProcessor для автоматической обработки сообщений - # Используем настройку из конфигурации для логирования сообщений - self.message_processor = MessageProcessor() - - # Используем переданный callback или процессор по умолчанию - if message_callback: - self.message_callback = message_callback - else: - # Автоматически используем MessageProcessor - # Метод enqueue быстрый и не блокирует чтение CAN - self.message_callback = self.message_processor.enqueue - - self.bus_handlers: Dict[str, CANBusHandler] = {} - self.running = False - - self._init_buses() - - def _init_buses(self) -> None: - """ - Инициализация CAN шин из конфигурации. - - Примечание: Битрейт должен быть установлен на уровне системы через: - `ip link set canX type can bitrate X` - Значение в конфиге используется только для логирования и должно соответствовать реальному битрейту интерфейса. - """ - self.logger.info( - "Initializing CAN buses", - extra={ - "interfaces": self.config.interfaces, - "listen_only": self.config.listen_only, - "bitrate": self.config.bitrate, - "note": "Bitrate must match system interface settings (ip link set)" - } - ) - - for interface in self.config.interfaces: - try: - bus = self._create_bus(interface) - handler = CANBusHandler( - interface=interface, - bus=bus, - message_callback=self.message_callback, - logger=self.logger.getChild(f"bus.{interface}"), - filters=self.config.filters - ) - self.bus_handlers[interface] = handler - self.logger.info(f"Initialized bus: {interface}") - - except Exception as e: - self.logger.error( - f"Failed to initialize bus {interface}: {e}", - exc_info=True, - extra={"interface": interface} - ) - - def _create_bus(self, interface: str) -> can.Bus: - """ - Создание CAN шины для интерфейса. - - Args: - interface: Имя интерфейса (например, 'can0') - - Returns: - Экземпляр can.Bus - """ - bus_kwargs = { - "channel": interface, - "bustype": "socketcan", - "receive_own_messages": False, - } - - # Добавляем listen-only режим, если указан в конфигурации - if self.config.listen_only: - # Для SocketCAN listen-only режим устанавливается через параметр - # В некоторых версиях python-can это может быть через receive_own_messages=False - # и отдельным параметром, но для SocketCAN обычно достаточно receive_own_messages=False - pass - - try: - bus = can.interface.Bus(**bus_kwargs) - self.logger.debug( - f"Created bus for {interface}", - extra={"kwargs": bus_kwargs} - ) - return bus - except can.CanError as e: - self.logger.error( - f"CAN error creating bus for {interface}: {e}", - exc_info=True - ) - raise - except Exception as e: - self.logger.error( - f"Unexpected error creating bus for {interface}: {e}", - exc_info=True - ) - raise - - - def start(self) -> None: - """Запуск чтения со всех шин.""" - if self.running: - self.logger.warning("CANSniffer is already running") - return - - self.logger.info( - f"Starting CANSniffer with {len(self.bus_handlers)} buses", - extra={"interfaces": list(self.bus_handlers.keys())} - ) - - # Запускаем процессор сообщений первым - self.message_processor.start() - - self.running = True - - # Запускаем все обработчики параллельно - for handler in self.bus_handlers.values(): - handler.start() - - self.logger.info("CANSniffer started successfully") - - def stop(self) -> None: - """Остановка чтения со всех шин.""" - if not self.running: - return - - self.logger.info("Stopping CANSniffer...") - self.running = False - - # Останавливаем все обработчики - for handler in self.bus_handlers.values(): - handler.stop() - - # Останавливаем процессор сообщений - self.message_processor.shutdown() - - self.logger.info("CANSniffer stopped") - - def get_stats(self) -> Dict: - """Получение статистики по всем шинам и обработке сообщений.""" - stats = { - "running": self.running, - "buses": { - interface: handler.get_stats() - for interface, handler in self.bus_handlers.items() - } - } - - # Добавляем статистику процессора сообщений - processor_stats = self.message_processor.get_stats() - stats["message_processor"] = processor_stats - - return stats - - def __enter__(self): - """Поддержка context manager.""" - self.start() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - """Поддержка context manager.""" - self.stop() - return False diff --git a/can_sniffer/src/storage/storage.py b/can_sniffer/src/storage/storage.py index f7dfd53..e7d1921 100644 --- a/can_sniffer/src/storage/storage.py +++ b/can_sniffer/src/storage/storage.py @@ -1,15 +1,17 @@ """ -Модуль для работы с локальным хранилищем SQLite. +SQLite Storage for OBD2 Data. -Предоставляет singleton класс для инициализации и работы с SQLite базой данных -для временного хранения CAN сообщений. +Provides singleton class for storing OBD2 readings and sessions +with efficient batch operations and automatic cleanup. """ import sqlite3 import threading +import time from pathlib import Path -from typing import Optional, Dict, Any +from typing import Optional, Dict, Any, List from contextlib import contextmanager +from dataclasses import dataclass from config import config from logger import get_logger @@ -17,298 +19,483 @@ from logger import get_logger logger = get_logger(__name__) +@dataclass +class SessionSummary: + """Summary of a driving session.""" + + id: int + started_at: float + ended_at: Optional[float] + duration_s: Optional[float] + total_distance_km: Optional[float] + avg_speed_kmh: Optional[float] + max_speed_kmh: Optional[float] + max_rpm: Optional[float] + fuel_consumed_l: Optional[float] + readings_count: int + + class Storage: - """Singleton класс для работы с SQLite базой данных.""" + """Singleton class for OBD2 data storage.""" _instance: Optional['Storage'] = None _lock = threading.Lock() - _write_lock = threading.Lock() # Мьютекс для потокобезопасной записи в SQLite - + _write_lock = threading.Lock() + def __new__(cls): - """Singleton паттерн для единого экземпляра хранилища.""" + """Singleton pattern.""" if cls._instance is None: with cls._lock: if cls._instance is None: cls._instance = super().__new__(cls) return cls._instance - + def __init__(self): - """Инициализация хранилища.""" - # Защита от race condition при инициализации singleton + """Initialize storage.""" with self._lock: - # Проверяем, что инициализация выполняется только один раз if hasattr(self, '_initialized') and self._initialized: return self.config = config.storage - self.logger = logger self.connection: Optional[sqlite3.Connection] = None self._initialized = False + self._current_session_id: Optional[int] = None - # Инициализируем базу данных self._init_database() - + def _init_database(self) -> None: - """Инициализация базы данных SQLite.""" + """Initialize SQLite database.""" try: - # Определяем путь к базе данных db_path = Path(self.config.database_path) db_path.parent.mkdir(parents=True, exist_ok=True) - - self.logger.info( + + logger.info( "Initializing SQLite database", extra={ "path": str(db_path), "wal_mode": self.config.wal_mode, - "sync_mode": self.config.sync_mode } ) - - # Создаем соединение + self.connection = sqlite3.connect( str(db_path), - check_same_thread=False, # Разрешаем использование из разных потоков - timeout=30.0 # Таймаут для блокировок + check_same_thread=False, + timeout=30.0 ) - - # Настраиваем режим синхронизации - sync_mode_map = { - "NORMAL": "NORMAL", - "FULL": "FULL", - "OFF": "OFF" - } - sync_mode = sync_mode_map.get(self.config.sync_mode.upper(), "NORMAL") + + # Configure for performance + sync_mode = self.config.sync_mode.upper() self.connection.execute(f"PRAGMA synchronous = {sync_mode}") - - # Включаем WAL режим, если указано + if self.config.wal_mode: self.connection.execute("PRAGMA journal_mode = WAL") - self.logger.info("WAL mode enabled") - - # Оптимизация для производительности - self.connection.execute("PRAGMA busy_timeout = 30000") # 30 секунд - self.connection.execute("PRAGMA cache_size = -64000") # 64MB кэш + + self.connection.execute("PRAGMA busy_timeout = 30000") + self.connection.execute("PRAGMA cache_size = -64000") self.connection.execute("PRAGMA temp_store = MEMORY") - - # Создаем таблицу для CAN сообщений + self._create_tables() - + self._initialized = True - self.logger.info("SQLite database initialized successfully") - + logger.info("SQLite database initialized successfully") + except Exception as e: - self.logger.error( - f"Failed to initialize SQLite database: {e}", - exc_info=True - ) + logger.error(f"Failed to initialize SQLite database: {e}", exc_info=True) raise - def _migrate_add_column(self, cursor, table: str, column: str, column_def: str) -> None: - """Добавление колонки в таблицу, если она не существует. - - Args: - cursor: Курсор базы данных - table: Имя таблицы - column: Имя колонки - column_def: Определение колонки (тип и ограничения) - """ - try: - cursor.execute(f"SELECT {column} FROM {table} LIMIT 1") - except sqlite3.OperationalError: - # Колонка не существует, добавляем - cursor.execute(f"ALTER TABLE {table} ADD COLUMN {column} {column_def}") - self.logger.info(f"Added column {column} to table {table}") - def _create_tables(self) -> None: - """Создание таблиц в базе данных.""" + """Create database tables for OBD2 data.""" if not self.connection: raise RuntimeError("Database connection not initialized") cursor = self.connection.cursor() - # Таблица для CAN сообщений (согласована с PostgreSQL схемой) + # OBD2 Readings table cursor.execute(""" - CREATE TABLE IF NOT EXISTS can_messages ( + CREATE TABLE IF NOT EXISTS obd2_readings ( id INTEGER PRIMARY KEY AUTOINCREMENT, + session_id INTEGER, + timestamp_ns INTEGER NOT NULL, timestamp REAL NOT NULL, - interface TEXT NOT NULL, - can_id INTEGER NOT NULL, - can_id_hex TEXT NOT NULL DEFAULT '', - is_extended INTEGER NOT NULL DEFAULT 0, - dlc INTEGER NOT NULL, - data BLOB NOT NULL, - data_hex TEXT NOT NULL DEFAULT '', - processed INTEGER DEFAULT 0, + pid INTEGER NOT NULL, + pid_hex TEXT NOT NULL, + pid_name TEXT, + raw_data BLOB, + decoded_value REAL, + unit TEXT, + is_valid INTEGER DEFAULT 1, + ecu_id INTEGER DEFAULT 0, + synced INTEGER DEFAULT 0, + created_at REAL DEFAULT (julianday('now')), + FOREIGN KEY (session_id) REFERENCES obd2_sessions(id) + ) + """) + + # Sessions table + cursor.execute(""" + CREATE TABLE IF NOT EXISTS obd2_sessions ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + started_at REAL NOT NULL, + ended_at REAL, + vin TEXT, + total_distance_km REAL, + avg_speed_kmh REAL, + max_speed_kmh REAL, + max_rpm REAL, + fuel_consumed_l REAL, + readings_count INTEGER DEFAULT 0, created_at REAL DEFAULT (julianday('now')) ) """) - # Добавляем новые колонки для существующих таблиц (миграция) - self._migrate_add_column(cursor, "can_messages", "can_id_hex", "TEXT NOT NULL DEFAULT ''") - self._migrate_add_column(cursor, "can_messages", "is_extended", "INTEGER NOT NULL DEFAULT 0") - self._migrate_add_column(cursor, "can_messages", "data_hex", "TEXT NOT NULL DEFAULT ''") - - # Индексы для быстрого поиска + # Aggregated readings (hourly summaries) cursor.execute(""" - CREATE INDEX IF NOT EXISTS idx_timestamp - ON can_messages(timestamp) + CREATE TABLE IF NOT EXISTS obd2_aggregated ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + hour_timestamp INTEGER NOT NULL, + pid INTEGER NOT NULL, + pid_name TEXT, + min_value REAL, + max_value REAL, + avg_value REAL, + count INTEGER, + unit TEXT, + synced INTEGER DEFAULT 0, + UNIQUE(hour_timestamp, pid) + ) """) - + + # Indexes for efficient queries cursor.execute(""" - CREATE INDEX IF NOT EXISTS idx_interface - ON can_messages(interface) + CREATE INDEX IF NOT EXISTS idx_readings_timestamp + ON obd2_readings(timestamp) """) - + cursor.execute(""" - CREATE INDEX IF NOT EXISTS idx_can_id - ON can_messages(can_id) + CREATE INDEX IF NOT EXISTS idx_readings_pid + ON obd2_readings(pid) """) - + cursor.execute(""" - CREATE INDEX IF NOT EXISTS idx_processed - ON can_messages(processed) + CREATE INDEX IF NOT EXISTS idx_readings_session + ON obd2_readings(session_id) """) - - # Комбинированный индекс для запросов по времени и интерфейсу + cursor.execute(""" - CREATE INDEX IF NOT EXISTS idx_timestamp_interface - ON can_messages(timestamp, interface) + CREATE INDEX IF NOT EXISTS idx_readings_synced + ON obd2_readings(synced) """) - + + cursor.execute(""" + CREATE INDEX IF NOT EXISTS idx_readings_timestamp_pid + ON obd2_readings(timestamp, pid) + """) + + cursor.execute(""" + CREATE INDEX IF NOT EXISTS idx_aggregated_hour + ON obd2_aggregated(hour_timestamp) + """) + self.connection.commit() - self.logger.debug("Database tables and indexes created") - + logger.debug("Database tables and indexes created") + @contextmanager def _get_cursor(self): - """Context manager для получения курсора с автоматическим commit.""" + """Context manager for cursor with auto-commit.""" if not self.connection: raise RuntimeError("Database connection not initialized") - + cursor = self.connection.cursor() try: yield cursor self.connection.commit() - except Exception as e: + except Exception: self.connection.rollback() raise finally: cursor.close() - - def save_message(self, interface: str, can_id: int, dlc: int, data: bytes, timestamp: float) -> Optional[int]: + + # ========================================================================= + # Session Management + # ========================================================================= + + def start_session(self, vin: str = "") -> int: """ - Сохранение CAN сообщения в базу данных. + Start a new driving session. Args: - interface: Имя интерфейса (например, 'can0') - can_id: CAN ID сообщения - dlc: Data Length Code - data: Данные сообщения (bytes) - timestamp: Временная метка сообщения + vin: Vehicle Identification Number (optional) Returns: - ID сохраненного сообщения или None в случае ошибки + Session ID """ - if not self.connection: - self.logger.error("Database connection not initialized") - return None - try: - # Вычисляем дополнительные поля для совместимости с PostgreSQL - can_id_hex = hex(can_id) - is_extended = 1 if can_id > 0x7FF else 0 - data_hex = data.hex().upper() if isinstance(data, bytes) else "" - with self._write_lock: with self._get_cursor() as cursor: cursor.execute(""" - INSERT INTO can_messages (timestamp, interface, can_id, can_id_hex, is_extended, dlc, data, data_hex) - VALUES (?, ?, ?, ?, ?, ?, ?, ?) - """, (timestamp, interface, can_id, can_id_hex, is_extended, dlc, data, data_hex)) + INSERT INTO obd2_sessions (started_at, vin) + VALUES (?, ?) + """, (time.time(), vin)) + + session_id = cursor.lastrowid + self._current_session_id = session_id + + logger.info(f"Started session {session_id}") + return session_id + + except Exception as e: + logger.error(f"Failed to start session: {e}", exc_info=True) + return -1 + + def end_session( + self, + session_id: Optional[int] = None, + total_distance_km: Optional[float] = None, + fuel_consumed_l: Optional[float] = None + ) -> bool: + """ + End a driving session. + + Args: + session_id: Session ID (uses current if not specified) + total_distance_km: Total distance traveled + fuel_consumed_l: Total fuel consumed + + Returns: + True if successful + """ + if session_id is None: + session_id = self._current_session_id + + if session_id is None: + return False + + try: + with self._write_lock: + with self._get_cursor() as cursor: + # Calculate session stats + cursor.execute(""" + SELECT + COUNT(*) as count, + AVG(CASE WHEN pid = 13 THEN decoded_value END) as avg_speed, + MAX(CASE WHEN pid = 13 THEN decoded_value END) as max_speed, + MAX(CASE WHEN pid = 12 THEN decoded_value END) as max_rpm + FROM obd2_readings + WHERE session_id = ? AND is_valid = 1 + """, (session_id,)) + + row = cursor.fetchone() + readings_count, avg_speed, max_speed, max_rpm = row + + cursor.execute(""" + UPDATE obd2_sessions + SET ended_at = ?, + total_distance_km = ?, + avg_speed_kmh = ?, + max_speed_kmh = ?, + max_rpm = ?, + fuel_consumed_l = ?, + readings_count = ? + WHERE id = ? + """, ( + time.time(), + total_distance_km, + avg_speed, + max_speed, + max_rpm, + fuel_consumed_l, + readings_count, + session_id + )) + + if session_id == self._current_session_id: + self._current_session_id = None + + logger.info(f"Ended session {session_id}", extra={"readings": readings_count}) + return True + + except Exception as e: + logger.error(f"Failed to end session: {e}", exc_info=True) + return False + + @property + def current_session_id(self) -> Optional[int]: + """Get current session ID.""" + return self._current_session_id + + # ========================================================================= + # Reading Storage + # ========================================================================= + + def save_reading(self, reading: "OBD2Reading") -> Optional[int]: + """ + Save a single OBD2 reading. + + Args: + reading: OBD2Reading object + + Returns: + Reading ID or None on error + """ + from obd2.pids import OBD2Reading + + try: + with self._write_lock: + with self._get_cursor() as cursor: + cursor.execute(""" + INSERT INTO obd2_readings + (session_id, timestamp_ns, timestamp, pid, pid_hex, pid_name, + raw_data, decoded_value, unit, is_valid, ecu_id) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, ( + self._current_session_id, + reading.timestamp_ns, + reading.timestamp, + reading.pid, + reading.pid_hex, + reading.pid_name, + reading.raw_data, + reading.value, + reading.unit, + 1 if reading.is_valid else 0, + reading.ecu_id + )) return cursor.lastrowid except Exception as e: - self.logger.error( - f"Failed to save message to database: {e}", - exc_info=True, - extra={ - "interface": interface, - "can_id": hex(can_id) - } - ) + logger.error(f"Failed to save reading: {e}", exc_info=True) return None - - def save_messages_batch(self, messages: list) -> int: + + def save_readings_batch(self, readings: List["OBD2Reading"]) -> int: """ - Пакетное сохранение CAN сообщений. + Save multiple OBD2 readings in a batch. Args: - messages: Список кортежей (timestamp, interface, can_id, dlc, data) + readings: List of OBD2Reading objects Returns: - Количество успешно сохраненных сообщений + Number of saved readings """ - if not self.connection: - self.logger.error("Database connection not initialized") - return 0 - - if not messages: + if not readings: return 0 try: - # Преобразуем сообщения в расширенный формат с дополнительными полями - extended_messages = [] - for msg in messages: - timestamp, interface, can_id, dlc, data = msg - can_id_hex = hex(can_id) - is_extended = 1 if can_id > 0x7FF else 0 - data_hex = data.hex().upper() if isinstance(data, bytes) else "" - extended_messages.append(( - timestamp, interface, can_id, can_id_hex, is_extended, dlc, data, data_hex + batch = [] + for reading in readings: + batch.append(( + self._current_session_id, + reading.timestamp_ns, + reading.timestamp, + reading.pid, + reading.pid_hex, + reading.pid_name, + reading.raw_data, + reading.value, + reading.unit, + 1 if reading.is_valid else 0, + reading.ecu_id )) with self._write_lock: with self._get_cursor() as cursor: cursor.executemany(""" - INSERT INTO can_messages (timestamp, interface, can_id, can_id_hex, is_extended, dlc, data, data_hex) - VALUES (?, ?, ?, ?, ?, ?, ?, ?) - """, extended_messages) + INSERT INTO obd2_readings + (session_id, timestamp_ns, timestamp, pid, pid_hex, pid_name, + raw_data, decoded_value, unit, is_valid, ecu_id) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, batch) - saved_count = cursor.rowcount - self.logger.debug( - f"Saved {saved_count} messages in batch", - extra={"batch_size": len(messages)} - ) - return saved_count + return cursor.rowcount except Exception as e: - self.logger.error( - f"Failed to save messages batch: {e}", - exc_info=True, - extra={"batch_size": len(messages)} - ) + logger.error(f"Failed to save readings batch: {e}", exc_info=True) return 0 - - def get_unprocessed_messages(self, limit: int = 1000) -> list: + + def get_readings( + self, + pid: Optional[int] = None, + start_time: Optional[float] = None, + end_time: Optional[float] = None, + limit: int = 1000 + ) -> List[Dict[str, Any]]: """ - Получение необработанных сообщений для отправки в PostgreSQL. + Get readings with optional filters. Args: - limit: Максимальное количество сообщений + pid: Filter by PID + start_time: Start timestamp + end_time: End timestamp + limit: Maximum number of results Returns: - Список кортежей (id, timestamp, interface, can_id, can_id_hex, is_extended, dlc, data, data_hex) + List of reading dictionaries """ - if not self.connection: - self.logger.error("Database connection not initialized") + try: + conditions = ["is_valid = 1"] + params = [] + + if pid is not None: + conditions.append("pid = ?") + params.append(pid) + + if start_time is not None: + conditions.append("timestamp >= ?") + params.append(start_time) + + if end_time is not None: + conditions.append("timestamp <= ?") + params.append(end_time) + + params.append(limit) + + with self._get_cursor() as cursor: + cursor.execute(f""" + SELECT timestamp, pid, pid_name, decoded_value, unit + FROM obd2_readings + WHERE {' AND '.join(conditions)} + ORDER BY timestamp DESC + LIMIT ? + """, params) + + return [ + { + "timestamp": row[0], + "pid": row[1], + "pid_name": row[2], + "value": row[3], + "unit": row[4] + } + for row in cursor.fetchall() + ] + + except Exception as e: + logger.error(f"Failed to get readings: {e}", exc_info=True) return [] + def get_latest_reading(self, pid: int) -> Optional[Dict[str, Any]]: + """Get the most recent reading for a PID.""" + readings = self.get_readings(pid=pid, limit=1) + return readings[0] if readings else None + + # ========================================================================= + # Sync Support + # ========================================================================= + + def get_unsynced_readings(self, limit: int = 100) -> List[tuple]: + """ + Get readings not yet synced to PostgreSQL. + + Returns: + List of reading tuples + """ try: with self._get_cursor() as cursor: cursor.execute(""" - SELECT id, timestamp, interface, can_id, can_id_hex, is_extended, dlc, data, data_hex - FROM can_messages - WHERE processed = 0 + SELECT id, timestamp_ns, timestamp, pid, pid_hex, pid_name, + raw_data, decoded_value, unit, is_valid, ecu_id + FROM obd2_readings + WHERE synced = 0 ORDER BY timestamp ASC LIMIT ? """, (limit,)) @@ -316,166 +503,222 @@ class Storage: return cursor.fetchall() except Exception as e: - self.logger.error( - f"Failed to get unprocessed messages: {e}", - exc_info=True - ) + logger.error(f"Failed to get unsynced readings: {e}", exc_info=True) return [] - - def mark_as_processed(self, message_ids: list) -> int: - """ - Отметить сообщения как обработанные. - Args: - message_ids: Список ID сообщений - - Returns: - Количество обновленных сообщений - """ - if not self.connection: - self.logger.error("Database connection not initialized") - return 0 - - if not message_ids: + def mark_readings_synced(self, reading_ids: List[int]) -> int: + """Mark readings as synced to PostgreSQL.""" + if not reading_ids: return 0 try: with self._write_lock: with self._get_cursor() as cursor: - placeholders = ','.join('?' * len(message_ids)) + placeholders = ','.join('?' * len(reading_ids)) cursor.execute(f""" - UPDATE can_messages - SET processed = 1 + UPDATE obd2_readings + SET synced = 1 WHERE id IN ({placeholders}) - """, message_ids) + """, reading_ids) return cursor.rowcount except Exception as e: - self.logger.error( - f"Failed to mark messages as processed: {e}", - exc_info=True - ) + logger.error(f"Failed to mark readings synced: {e}", exc_info=True) return 0 - - def get_stats(self) -> Dict[str, Any]: - """ - Получение статистики базы данных. - - Returns: - Словарь со статистикой - """ - if not self.connection: - return { - "initialized": False, - "total_messages": 0, - "unprocessed_messages": 0, - "processed_messages": 0 - } - - try: - with self._get_cursor() as cursor: - # Общее количество сообщений - cursor.execute("SELECT COUNT(*) FROM can_messages") - total = cursor.fetchone()[0] - - # Необработанные сообщения - cursor.execute("SELECT COUNT(*) FROM can_messages WHERE processed = 0") - unprocessed = cursor.fetchone()[0] - - # Обработанные сообщения - cursor.execute("SELECT COUNT(*) FROM can_messages WHERE processed = 1") - processed = cursor.fetchone()[0] - - return { - "initialized": True, - "total_messages": total, - "unprocessed_messages": unprocessed, - "processed_messages": processed, - "database_path": self.config.database_path - } - - except Exception as e: - self.logger.error( - f"Failed to get database stats: {e}", - exc_info=True - ) - return { - "initialized": True, - "error": str(e) - } - def cleanup_old_messages(self, days: Optional[int] = None) -> int: - """ - Удаление обработанных записей старше указанного количества дней. + # ========================================================================= + # Aggregation + # ========================================================================= - Удаляет только записи с processed=1 для сохранения необработанных данных. + def aggregate_hour(self, hour_timestamp: int) -> int: + """ + Aggregate readings for a specific hour. Args: - days: Количество дней хранения. Если None, берется из config.storage.retention_days + hour_timestamp: Hour timestamp (unix timestamp floored to hour) Returns: - Количество удаленных записей + Number of PIDs aggregated """ - if not self.connection: - self.logger.error("Database connection not initialized") + try: + next_hour = hour_timestamp + 3600 + + with self._write_lock: + with self._get_cursor() as cursor: + cursor.execute(""" + INSERT OR REPLACE INTO obd2_aggregated + (hour_timestamp, pid, pid_name, min_value, max_value, avg_value, count, unit) + SELECT + ?, + pid, + pid_name, + MIN(decoded_value), + MAX(decoded_value), + AVG(decoded_value), + COUNT(*), + unit + FROM obd2_readings + WHERE timestamp >= ? AND timestamp < ? AND is_valid = 1 + GROUP BY pid + """, (hour_timestamp, hour_timestamp, next_hour)) + + return cursor.rowcount + + except Exception as e: + logger.error(f"Failed to aggregate hour: {e}", exc_info=True) return 0 + # ========================================================================= + # Cleanup + # ========================================================================= + + def cleanup_old_readings(self, days: Optional[int] = None) -> int: + """ + Delete synced readings older than retention period. + + Args: + days: Retention days (uses config if not specified) + + Returns: + Number of deleted readings + """ if days is None: days = self.config.retention_days try: + cutoff = time.time() - (days * 86400) + with self._write_lock: with self._get_cursor() as cursor: - # julianday('now') - days дает дату N дней назад cursor.execute(""" - DELETE FROM can_messages - WHERE processed = 1 - AND created_at < julianday('now') - ? - """, (days,)) + DELETE FROM obd2_readings + WHERE synced = 1 AND timestamp < ? + """, (cutoff,)) deleted = cursor.rowcount if deleted > 0: - self.logger.info( - f"Cleaned up {deleted} processed messages older than {days} days" - ) + logger.info(f"Cleaned up {deleted} old readings") return deleted except Exception as e: - self.logger.error( - f"Failed to cleanup old messages: {e}", - exc_info=True - ) + logger.error(f"Failed to cleanup old readings: {e}", exc_info=True) return 0 + def cleanup_old_aggregated(self, days: Optional[int] = None) -> int: + """Delete aggregated data older than retention period.""" + if days is None: + days = self.config.aggregation_retention_days + + try: + cutoff = int(time.time()) - (days * 86400) + + with self._write_lock: + with self._get_cursor() as cursor: + cursor.execute(""" + DELETE FROM obd2_aggregated + WHERE synced = 1 AND hour_timestamp < ? + """, (cutoff,)) + + return cursor.rowcount + + except Exception as e: + logger.error(f"Failed to cleanup old aggregated: {e}", exc_info=True) + return 0 + + # ========================================================================= + # Statistics + # ========================================================================= + + def get_stats(self) -> Dict[str, Any]: + """Get database statistics.""" + if not self.connection: + return {"initialized": False} + + try: + with self._get_cursor() as cursor: + cursor.execute("SELECT COUNT(*) FROM obd2_readings") + total_readings = cursor.fetchone()[0] + + cursor.execute("SELECT COUNT(*) FROM obd2_readings WHERE synced = 0") + unsynced = cursor.fetchone()[0] + + cursor.execute("SELECT COUNT(*) FROM obd2_sessions") + total_sessions = cursor.fetchone()[0] + + cursor.execute("SELECT COUNT(*) FROM obd2_aggregated") + total_aggregated = cursor.fetchone()[0] + + return { + "initialized": True, + "total_readings": total_readings, + "unsynced_readings": unsynced, + "total_sessions": total_sessions, + "total_aggregated": total_aggregated, + "current_session_id": self._current_session_id, + "database_path": self.config.database_path + } + + except Exception as e: + logger.error(f"Failed to get stats: {e}", exc_info=True) + return {"initialized": True, "error": str(e)} + + def get_session_summary(self, session_id: int) -> Optional[SessionSummary]: + """Get summary for a specific session.""" + try: + with self._get_cursor() as cursor: + cursor.execute(""" + SELECT id, started_at, ended_at, total_distance_km, + avg_speed_kmh, max_speed_kmh, max_rpm, + fuel_consumed_l, readings_count + FROM obd2_sessions + WHERE id = ? + """, (session_id,)) + + row = cursor.fetchone() + if not row: + return None + + duration = None + if row[1] and row[2]: + duration = row[2] - row[1] + + return SessionSummary( + id=row[0], + started_at=row[1], + ended_at=row[2], + duration_s=duration, + total_distance_km=row[3], + avg_speed_kmh=row[4], + max_speed_kmh=row[5], + max_rpm=row[6], + fuel_consumed_l=row[7], + readings_count=row[8] or 0 + ) + + except Exception as e: + logger.error(f"Failed to get session summary: {e}", exc_info=True) + return None + def close(self) -> None: - """Закрытие соединения с базой данных.""" + """Close database connection.""" if self.connection: try: self.connection.close() - self.logger.info("Database connection closed") + logger.info("Database connection closed") except Exception as e: - self.logger.error( - f"Error closing database connection: {e}", - exc_info=True - ) + logger.error(f"Error closing database: {e}", exc_info=True) finally: self.connection = None -# Глобальный экземпляр хранилища +# Global storage instance _storage_instance: Optional[Storage] = None def get_storage() -> Storage: - """ - Получение глобального экземпляра хранилища. - - Returns: - Экземпляр Storage - """ + """Get global storage instance.""" global _storage_instance if _storage_instance is None: _storage_instance = Storage() return _storage_instance - diff --git a/can_sniffer/src/vehicle/__init__.py b/can_sniffer/src/vehicle/__init__.py new file mode 100644 index 0000000..2a36253 --- /dev/null +++ b/can_sniffer/src/vehicle/__init__.py @@ -0,0 +1,14 @@ +""" +Vehicle State Management Module. + +Provides in-memory vehicle state tracking with pub/sub notifications. +""" + +from .state import VehicleState +from .state_manager import VehicleStateManager, StateChangeEvent + +__all__ = [ + "VehicleState", + "VehicleStateManager", + "StateChangeEvent", +] diff --git a/can_sniffer/src/vehicle/state.py b/can_sniffer/src/vehicle/state.py new file mode 100644 index 0000000..90079bd --- /dev/null +++ b/can_sniffer/src/vehicle/state.py @@ -0,0 +1,283 @@ +""" +Vehicle State Data Model. + +Represents the current state of vehicle parameters from OBD2. +""" + +from dataclasses import dataclass, field, fields +from typing import Optional, Dict, Any +import time + + +@dataclass +class VehicleState: + """ + Current vehicle state from OBD2 readings. + + All fields are optional and updated as responses arrive. + Timestamps track when each field was last updated. + + Attributes: + timestamp: Last update timestamp + + # Engine + rpm: Engine speed (rpm) + engine_load: Calculated engine load (%) + coolant_temp: Engine coolant temperature (°C) + oil_temp: Engine oil temperature (°C) + intake_temp: Intake air temperature (°C) + timing_advance: Timing advance (°) + maf: Mass air flow rate (g/s) + + # Speed & Movement + speed: Vehicle speed (km/h) + throttle_pos: Throttle position (%) + accelerator_pos: Accelerator pedal position (%) + + # Fuel + fuel_level: Fuel tank level (%) + fuel_rate: Engine fuel rate (L/h) + fuel_pressure: Fuel pressure (kPa) + + # Temperatures + ambient_temp: Ambient air temperature (°C) + catalyst_temp: Catalyst temperature (°C) + + # Diagnostics + runtime: Time since engine start (s) + distance_mil: Distance with MIL on (km) + dtc_count: Number of DTCs + + # Vehicle Info + vin: Vehicle Identification Number + odometer: Odometer reading (km) + + # Connection status + ecu_connected: Whether ECU is responding + last_response_time: Last successful response time + """ + + # Metadata + timestamp: float = field(default_factory=time.time) + + # Engine + rpm: Optional[float] = None + engine_load: Optional[float] = None + coolant_temp: Optional[float] = None + oil_temp: Optional[float] = None + intake_temp: Optional[float] = None + timing_advance: Optional[float] = None + maf: Optional[float] = None + map_pressure: Optional[float] = None # Intake manifold pressure + + # Speed & Movement + speed: Optional[float] = None + throttle_pos: Optional[float] = None + accelerator_pos: Optional[float] = None + relative_throttle: Optional[float] = None + + # Fuel + fuel_level: Optional[float] = None + fuel_rate: Optional[float] = None + fuel_pressure: Optional[float] = None + short_term_fuel_trim_1: Optional[float] = None + long_term_fuel_trim_1: Optional[float] = None + short_term_fuel_trim_2: Optional[float] = None + long_term_fuel_trim_2: Optional[float] = None + ethanol_percent: Optional[float] = None + + # Temperatures + ambient_temp: Optional[float] = None + catalyst_temp_b1s1: Optional[float] = None + catalyst_temp_b1s2: Optional[float] = None + + # Voltage + control_module_voltage: Optional[float] = None + + # Oxygen Sensors + o2_voltage_b1s1: Optional[float] = None + o2_voltage_b1s2: Optional[float] = None + o2_voltage_b2s1: Optional[float] = None + o2_voltage_b2s2: Optional[float] = None + + # Diagnostics + runtime: Optional[float] = None + distance_mil: Optional[float] = None + distance_since_clear: Optional[float] = None + warmups_since_clear: Optional[int] = None + time_since_clear: Optional[float] = None + dtc_count: int = 0 + + # Vehicle Info + vin: str = "" + odometer: Optional[float] = None + barometric_pressure: Optional[float] = None + + # Connection status + ecu_connected: bool = False + last_response_time: float = 0.0 + + # Field update timestamps + _field_timestamps: Dict[str, float] = field(default_factory=dict) + + def update_field(self, name: str, value: Any) -> bool: + """ + Update a field value with timestamp tracking. + + Args: + name: Field name + value: New value + + Returns: + True if field was updated (value changed) + """ + if not hasattr(self, name): + return False + + old_value = getattr(self, name) + if old_value == value: + return False + + setattr(self, name, value) + self._field_timestamps[name] = time.time() + self.timestamp = time.time() + return True + + def get_field_age(self, name: str) -> Optional[float]: + """ + Get age of a field value in seconds. + + Args: + name: Field name + + Returns: + Age in seconds or None if never updated + """ + ts = self._field_timestamps.get(name) + if ts is None: + return None + return time.time() - ts + + def is_field_stale(self, name: str, max_age_s: float = 5.0) -> bool: + """ + Check if a field value is stale. + + Args: + name: Field name + max_age_s: Maximum age in seconds + + Returns: + True if field is stale or never updated + """ + age = self.get_field_age(name) + if age is None: + return True + return age > max_age_s + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary (excludes private fields).""" + result = {} + for f in fields(self): + if f.name.startswith("_"): + continue + value = getattr(self, f.name) + if value is not None: + result[f.name] = value + return result + + def get_engine_summary(self) -> Dict[str, Any]: + """Get engine-related parameters.""" + return { + "rpm": self.rpm, + "load": self.engine_load, + "coolant_temp": self.coolant_temp, + "oil_temp": self.oil_temp, + "intake_temp": self.intake_temp, + "maf": self.maf, + "throttle": self.throttle_pos, + } + + def get_fuel_summary(self) -> Dict[str, Any]: + """Get fuel-related parameters.""" + return { + "level": self.fuel_level, + "rate": self.fuel_rate, + "pressure": self.fuel_pressure, + "stft1": self.short_term_fuel_trim_1, + "ltft1": self.long_term_fuel_trim_1, + } + + def get_movement_summary(self) -> Dict[str, Any]: + """Get movement-related parameters.""" + return { + "speed": self.speed, + "throttle": self.throttle_pos, + "accelerator": self.accelerator_pos, + "odometer": self.odometer, + } + + @property + def is_engine_running(self) -> bool: + """Check if engine appears to be running.""" + if self.rpm is not None and self.rpm > 0: + return True + return False + + @property + def is_moving(self) -> bool: + """Check if vehicle appears to be moving.""" + if self.speed is not None and self.speed > 0: + return True + return False + + def __repr__(self) -> str: + parts = [] + if self.rpm is not None: + parts.append(f"rpm={self.rpm:.0f}") + if self.speed is not None: + parts.append(f"speed={self.speed:.0f}km/h") + if self.coolant_temp is not None: + parts.append(f"coolant={self.coolant_temp:.0f}°C") + if self.fuel_level is not None: + parts.append(f"fuel={self.fuel_level:.0f}%") + + status = "connected" if self.ecu_connected else "disconnected" + return f"VehicleState({', '.join(parts)}, {status})" + + +# Mapping from PID to VehicleState field name +PID_TO_FIELD: Dict[int, str] = { + 0x04: "engine_load", + 0x05: "coolant_temp", + 0x06: "short_term_fuel_trim_1", + 0x07: "long_term_fuel_trim_1", + 0x08: "short_term_fuel_trim_2", + 0x09: "long_term_fuel_trim_2", + 0x0A: "fuel_pressure", + 0x0B: "map_pressure", + 0x0C: "rpm", + 0x0D: "speed", + 0x0E: "timing_advance", + 0x0F: "intake_temp", + 0x10: "maf", + 0x11: "throttle_pos", + 0x14: "o2_voltage_b1s1", + 0x15: "o2_voltage_b1s2", + 0x18: "o2_voltage_b2s1", + 0x19: "o2_voltage_b2s2", + 0x1F: "runtime", + 0x21: "distance_mil", + 0x2F: "fuel_level", + 0x31: "distance_since_clear", + 0x33: "barometric_pressure", + 0x3C: "catalyst_temp_b1s1", + 0x3E: "catalyst_temp_b1s2", + 0x42: "control_module_voltage", + 0x45: "relative_throttle", + 0x46: "ambient_temp", + 0x49: "accelerator_pos", + 0x52: "ethanol_percent", + 0x5C: "oil_temp", + 0x5E: "fuel_rate", + 0xA6: "odometer", +} diff --git a/can_sniffer/src/vehicle/state_manager.py b/can_sniffer/src/vehicle/state_manager.py new file mode 100644 index 0000000..62f3f32 --- /dev/null +++ b/can_sniffer/src/vehicle/state_manager.py @@ -0,0 +1,324 @@ +""" +Vehicle State Manager. + +Manages the vehicle state singleton with pub/sub notifications +for state changes. +""" + +import threading +import time +from dataclasses import dataclass +from typing import Optional, Callable, Dict, Any, List, Set +from enum import Enum + +from logger import get_logger +from .state import VehicleState, PID_TO_FIELD + +logger = get_logger(__name__) + + +@dataclass +class StateChangeEvent: + """ + Event fired when vehicle state changes. + + Attributes: + field: Field name that changed + old_value: Previous value + new_value: New value + timestamp: When the change occurred + pid: Source PID (if known) + """ + + field: str + old_value: Any + new_value: Any + timestamp: float + pid: Optional[int] = None + + +class SubscriptionType(Enum): + """Types of state subscriptions.""" + + ALL = "all" # All state changes + FIELD = "field" # Specific field changes + CATEGORY = "category" # Category changes (engine, fuel, etc.) + + +@dataclass +class Subscription: + """A state change subscription.""" + + callback: Callable[[StateChangeEvent], None] + sub_type: SubscriptionType + filter_value: Optional[str] = None # Field name or category + + +class VehicleStateManager: + """ + Manages vehicle state with pub/sub notifications. + + Provides: + - Singleton state instance + - Thread-safe updates + - Subscription to state changes + - Session tracking + + Example: + manager = VehicleStateManager() + manager.subscribe(on_change) # All changes + manager.subscribe_field("rpm", on_rpm_change) + + # Updates from OBD2 readings + manager.update_from_reading(reading) + """ + + _instance: Optional["VehicleStateManager"] = None + _lock = threading.Lock() + + def __new__(cls) -> "VehicleStateManager": + """Singleton pattern.""" + with cls._lock: + if cls._instance is None: + cls._instance = super().__new__(cls) + cls._instance._initialized = False + return cls._instance + + def __init__(self): + if self._initialized: + return + + self._state = VehicleState() + self._state_lock = threading.RLock() + + self._subscriptions: List[Subscription] = [] + self._sub_lock = threading.Lock() + + # Session tracking + self._session_start: Optional[float] = None + self._session_start_odometer: Optional[float] = None + + # Stats + self._updates_count = 0 + self._last_update_time = 0.0 + + self._initialized = True + logger.debug("VehicleStateManager initialized") + + @property + def state(self) -> VehicleState: + """Get current vehicle state (read-only snapshot).""" + with self._state_lock: + return self._state + + def get_state(self) -> VehicleState: + """Get current vehicle state.""" + return self.state + + def update_from_reading(self, reading: "OBD2Reading") -> bool: + """ + Update state from an OBD2 reading. + + Args: + reading: Decoded OBD2 reading + + Returns: + True if state was updated + """ + from obd2.pids import OBD2Reading # Avoid circular import + + if not reading.is_valid or reading.value is None: + return False + + field_name = PID_TO_FIELD.get(reading.pid) + if field_name is None: + return False + + return self.update_field(field_name, reading.value, reading.pid) + + def update_field( + self, + field: str, + value: Any, + pid: Optional[int] = None + ) -> bool: + """ + Update a specific state field. + + Args: + field: Field name to update + value: New value + pid: Source PID (optional) + + Returns: + True if field was updated (value changed) + """ + with self._state_lock: + old_value = getattr(self._state, field, None) + + if not self._state.update_field(field, value): + return False + + # Update connection status + self._state.ecu_connected = True + self._state.last_response_time = time.time() + + self._updates_count += 1 + self._last_update_time = time.time() + + # Fire event outside lock + event = StateChangeEvent( + field=field, + old_value=old_value, + new_value=value, + timestamp=time.time(), + pid=pid + ) + self._notify_subscribers(event) + + return True + + def subscribe( + self, + callback: Callable[[StateChangeEvent], None] + ) -> None: + """ + Subscribe to all state changes. + + Args: + callback: Function called with StateChangeEvent + """ + with self._sub_lock: + self._subscriptions.append(Subscription( + callback=callback, + sub_type=SubscriptionType.ALL + )) + + def subscribe_field( + self, + field: str, + callback: Callable[[StateChangeEvent], None] + ) -> None: + """ + Subscribe to changes of a specific field. + + Args: + field: Field name to watch + callback: Function called with StateChangeEvent + """ + with self._sub_lock: + self._subscriptions.append(Subscription( + callback=callback, + sub_type=SubscriptionType.FIELD, + filter_value=field + )) + + def subscribe_fields( + self, + fields: List[str], + callback: Callable[[StateChangeEvent], None] + ) -> None: + """Subscribe to changes of multiple fields.""" + for field in fields: + self.subscribe_field(field, callback) + + def unsubscribe( + self, + callback: Callable[[StateChangeEvent], None] + ) -> None: + """Remove a subscription by callback.""" + with self._sub_lock: + self._subscriptions = [ + s for s in self._subscriptions + if s.callback != callback + ] + + def clear_subscriptions(self) -> None: + """Remove all subscriptions.""" + with self._sub_lock: + self._subscriptions.clear() + + def _notify_subscribers(self, event: StateChangeEvent) -> None: + """Notify relevant subscribers of a state change.""" + with self._sub_lock: + subs = list(self._subscriptions) + + for sub in subs: + should_notify = False + + if sub.sub_type == SubscriptionType.ALL: + should_notify = True + elif sub.sub_type == SubscriptionType.FIELD: + should_notify = (sub.filter_value == event.field) + + if should_notify: + try: + sub.callback(event) + except Exception as e: + logger.error(f"Subscription callback error: {e}") + + def start_session(self) -> None: + """Start a new driving session.""" + with self._state_lock: + self._session_start = time.time() + self._session_start_odometer = self._state.odometer + logger.info("Session started") + + def end_session(self) -> Dict[str, Any]: + """ + End the current session and return summary. + + Returns: + Session summary dictionary + """ + with self._state_lock: + if self._session_start is None: + return {} + + duration = time.time() - self._session_start + distance = None + + if (self._session_start_odometer is not None and + self._state.odometer is not None): + distance = self._state.odometer - self._session_start_odometer + + summary = { + "duration_s": duration, + "distance_km": distance, + "updates_count": self._updates_count, + } + + self._session_start = None + self._session_start_odometer = None + + logger.info("Session ended", extra=summary) + return summary + + def get_stats(self) -> Dict[str, Any]: + """Get state manager statistics.""" + with self._state_lock: + return { + "updates_count": self._updates_count, + "last_update_time": self._last_update_time, + "ecu_connected": self._state.ecu_connected, + "session_active": self._session_start is not None, + "subscriptions_count": len(self._subscriptions), + } + + def mark_disconnected(self) -> None: + """Mark ECU as disconnected.""" + with self._state_lock: + self._state.ecu_connected = False + + def reset(self) -> None: + """Reset state to defaults.""" + with self._state_lock: + self._state = VehicleState() + self._updates_count = 0 + logger.info("Vehicle state reset") + + +# Convenience function for getting the singleton +def get_state_manager() -> VehicleStateManager: + """Get the global VehicleStateManager instance.""" + return VehicleStateManager()